blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1d7168df36367a6a9de58b2eef43b6e2a6c0481 | 14a58f0c6d0bcfeeb308a8a8719d0e9e728ee48e | /tests/test_custom.py | 13238835eb34053c78b53794b33cf1e7e3e11830 | [
"MIT"
] | permissive | wesselb/lab | 262da5a30c1b3a78e576014d9b752aae52959774 | 275d041bdd47bbbad1fce5a10bbce0d7beceefdb | refs/heads/master | 2023-06-08T11:04:03.523207 | 2023-05-27T10:15:07 | 2023-05-27T10:15:07 | 127,299,861 | 62 | 6 | MIT | 2023-09-01T09:53:02 | 2018-03-29T14:02:14 | Python | UTF-8 | Python | false | false | 4,677 | py | import jax
import jax.numpy as jnp
import lab as B
import numpy as np
import pytest
import tensorflow as tf
import torch
from autograd import grad
from fdm import check_sensitivity, gradient
from lab.custom import (
toeplitz_solve,
s_toeplitz_solve,
bvn_cdf,
s_bvn_cdf,
expm,
s_expm,
logm,
s_logm,
)
from lab.tensorflow.custom import as_tf
from lab.torch.custom import as_torch
from plum import isinstance
# noinspection PyUnresolvedReferences
from .util import approx, check_lazy_shapes, check_function, PSD
def test_as_tf(check_lazy_shapes):
assert isinstance(as_tf(B.randn()), B.TFNumeric)
assert isinstance(as_tf((B.randn(),))[0], B.TFNumeric)
def test_as_torch(check_lazy_shapes):
assert isinstance(as_torch(B.randn()), B.TorchNumeric)
assert isinstance(as_torch((B.randn(),))[0], B.TorchNumeric)
def check_grad(f, args, kw_args=None, rtol=1e-8):
"""Check the gradients of a function.
Args:
f (function): Function to check gradients of.
args (tuple): Arguments to check `f` at.
kw_args (tuple, optional): Keyword arguments to check `f` at. Defaults
to no keyword arguments.
rtol (float, optional): Relative tolerance. Defaults to `1e-8`.
"""
# Default to no keyword arguments.
if kw_args is None:
kw_args = {}
# Get the associated function in LAB.
lab_f = getattr(B, f.__name__)
def create_f_i(i, args_):
# Create a function that only varies the `i`th argument.
def f_i(x):
return B.mean(lab_f(*(args_[:i] + (x,) + args_[i + 1 :]), **kw_args))
return f_i
# Walk through the arguments.
for i in range(len(args)):
# Numerically compute gradient.
f_i = create_f_i(i, args)
numerical_grad = gradient(f_i)(args[i])
# Check AutoGrad gradient.
autograd_grad = grad(f_i)(args[i])
approx(numerical_grad, autograd_grad, rtol=rtol)
# Check TensorFlow gradient.
tf_args = tuple([as_tf(arg) for arg in args])
f_i = tf.function(create_f_i(i, tf_args), autograph=False)
with tf.GradientTape() as t:
t.watch(tf_args[i])
tf_grad = t.gradient(f_i(tf_args[i]), tf_args[i]).numpy()
approx(numerical_grad, tf_grad, rtol=rtol)
# Check PyTorch gradient.
torch_args = tuple([as_torch(arg, grad=False) for arg in args])
f_i = torch.jit.trace(create_f_i(i, torch_args), torch_args[i])
arg = torch_args[i].requires_grad_(True)
f_i(arg).backward()
approx(numerical_grad, arg.grad, rtol=rtol)
# Check JAX gradient.
jax_args = tuple([jnp.asarray(arg) for arg in args])
f_i = create_f_i(i, jax_args)
jax_grad = jax.jit(jax.grad(f_i))(jax_args[i])
approx(numerical_grad, jax_grad, rtol=rtol)
def test_toeplitz_solve(check_lazy_shapes):
check_sensitivity(
toeplitz_solve, s_toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3))
)
check_sensitivity(
toeplitz_solve, s_toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3, 4))
)
check_grad(toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3)))
check_grad(toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3, 4)))
def test_bvn_cdf(check_lazy_shapes):
check_sensitivity(bvn_cdf, s_bvn_cdf, (B.rand(3), B.rand(3), B.rand(3)))
check_grad(bvn_cdf, (B.rand(3), B.rand(3), B.rand(3)))
# Check that function runs on both `float32`s and `float64`s.
a, b, c = B.rand(3), B.rand(3), B.rand(3)
approx(
B.bvn_cdf(a, b, c),
B.bvn_cdf(B.cast(np.float32, a), B.cast(np.float32, b), B.cast(np.float32, c)),
)
# Check that, in JAX, the function check the shape of the inputs.
with pytest.raises(ValueError):
B.bvn_cdf(
B.rand(jnp.float32, 2), B.rand(jnp.float32, 3), B.rand(jnp.float32, 3)
)
with pytest.raises(ValueError):
B.bvn_cdf(
B.rand(jnp.float32, 3), B.rand(jnp.float32, 2), B.rand(jnp.float32, 3)
)
with pytest.raises(ValueError):
B.bvn_cdf(
B.rand(jnp.float32, 3), B.rand(jnp.float32, 3), B.rand(jnp.float32, 2)
)
def test_expm(check_lazy_shapes):
check_sensitivity(expm, s_expm, (B.randn(3, 3),))
check_grad(expm, (B.randn(3, 3),))
def test_logm_forward(check_lazy_shapes):
# This test can be removed once the gradient is implemented and the below test
# passes.
check_function(B.logm, (PSD(3),))
@pytest.mark.xfail
def test_logm(check_lazy_shapes):
mat = B.eye(3) + 0.1 * B.randn(3, 3)
check_sensitivity(logm, s_logm, (mat,))
check_grad(logm, (mat,))
| [
"[email protected]"
] | |
b6b30a5211de514ccc29a84165549898bf818ab2 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /GIT-USERS/TOM2/cs41long_lambda_mud_server/adv_project/settings.py | 262f1bf5e8db671d85bf94889ff91cb1a1be8b13 | [] | no_license | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,698 | py | """
Django settings for adv_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adventure',
'api',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'adv_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adv_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# DATABASES = {}
import dj_database_url
# DATABASES['default'] = dj_database_url.config(default=config('DATABASE_URL'), conn_max_age=600)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
# DATABASES['default'].update(db_from_env)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# DATABASES['default'] = dj_database_url.config(default=config('DATABASE_URL'))
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
# ],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL=True
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
import django_heroku
django_heroku.settings(locals())
| [
"[email protected]"
] | |
e92b8e4b15c288eff358cd8ca006e1290b4b5e34 | 36b04c5d9ae181a780d44764dd85bcc784e72101 | /cifar10_resnet/ensemble/saved_models/load_model.py | 5334b291c6071e015285bbce9ab03b85e6023c65 | [] | no_license | chengning-zhang/Ensemble-Method-in-Image-Classification | 9783b6b30f3e174fad1504a44cca57093f8c8730 | a7c4176334f8e7701fe9cae77fc31b3b6ed0704d | refs/heads/master | 2022-12-30T09:07:30.546966 | 2020-10-21T20:03:51 | 2020-10-21T20:03:51 | 112,964,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from keras.models import load_model
from resnet import *
if __name__ == "__main__":
# Training parameters
batch_size = 64
epochs = 2
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
n = 3
# Model version
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = 1
substract_pixel_mean = True
(x_train, y_train), (x_test, y_test), input_shape = prepare_data_for_resnet(substract_pixel_mean)
filename = "cifar10_resnet_model.01.h5"
model = load_model(filename)
| [
"[email protected]"
] | |
ac96698f9e823501b4e9678648199c29f14f8d32 | 236c6d7f53d61dfbddefa3b6574f181ccef72e60 | /lessons/lesson18/demo/migrations/0001_initial.py | 5c3a99d8634bbc9b10b49ec5bf5f502928440c92 | [] | no_license | maxchv/LearnPython | 94193c9770dc7b788099076316a1dbd6a5112cf4 | 561f74f39644cd6694ef36869beb7ddb6ff006dc | refs/heads/master | 2022-07-07T18:31:02.274159 | 2022-06-15T15:10:40 | 2022-06-15T15:10:40 | 65,457,162 | 1 | 7 | null | 2019-10-23T05:42:30 | 2016-08-11T09:30:53 | Python | UTF-8 | Python | false | false | 677 | py | # Generated by Django 4.0.4 on 2022-05-11 18:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
2d73c2e24886dc741ce1d0a7e7c2efb1f6f6cda2 | 0e383ccac5fdf21dc5059502b9aae26412fd6a88 | /shared_lib/readers.py | 3ce6f7460240524ae740f15057482645e5bbafea | [
"MIT"
] | permissive | jimsrc/seatos | 63c8ad99f2b5d4ae5f203cdc8f8e061948f257f4 | e775dba1a2a96ff44b837cf8d85101ccfef302b1 | refs/heads/master | 2021-01-02T08:38:51.349670 | 2017-09-01T01:59:35 | 2017-09-01T01:59:35 | 99,040,968 | 0 | 1 | null | 2017-09-01T01:59:36 | 2017-08-01T20:33:55 | Python | UTF-8 | Python | false | false | 23,271 | py | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
from scipy.io.netcdf import netcdf_file
import numpy as np
from numpy.linalg import norm
from datetime import datetime, timedelta
from h5py import File as h5
import os, sys, h5py, argparse
#--- shared libs
from shared.ShiftTimes import ShiftCorrection, ShiftDts
import shared.console_colors as ccl
from shared.shared_funcs import nans, My2DArray, selecc_window_ii
import shared.shared_funcs as sf
#+++++++++++++++++++++++++++++++++++++
#---- auxiliary functions for the
#---- data-handlers below
#+++++++++++++++++++++++++++++++++++++
def calc_beta(Temp, Pcc, B):
"""
Agarramos la definicion de OMNI, de:
http://omniweb.gsfc.nasa.gov/ftpbrowser/magnetopause/Reference.html
http://pamela.roma2.infn.it/index.php
Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT)
"""
beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2
return beta
def dates_from_omni(t):
time = []
n = len(t)
for i in range(n):
yyyy = t[i][0]
mm = t[i][1]
dd = t[i][2]
HH = t[i][3]
MM = t[i][4]
SS = t[i][5]
uSS = t[i][6]
time += [datetime(yyyy, mm, dd, HH, MM, SS, uSS)]
return time
def date_to_utc(fecha):
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
sec_utc = (fecha - utc).total_seconds()
return sec_utc
def utc_from_omni(file):
t = np.array(file.variables['time'].data)
dates = dates_from_omni(t)
n = len(dates)
time = np.zeros(n)
for i in range(n):
time[i] = date_to_utc(dates[i])
return time
def read_hsts_data(fname, typic, ch_Eds):
"""
code adapted from ...ch_Eds_smoo2.py
"""
f = h5(fname, 'r')
# initial date
datestr = f['date_ini'].value
yyyy, mm, dd = map(int, datestr.split('-'))
INI_DATE = datetime(yyyy, mm, dd)
# final date
datestr = f['date_end'].value
yyyy, mm, dd = map(int, datestr.split('-'))
END_DATE = datetime(yyyy, mm, dd)
date = INI_DATE
tt, rr = [], []
ntot, nt = 0, 0
while date < END_DATE:
yyyy, mm, dd = date.year, date.month, date.day
path = '%04d/%02d/%02d' % (yyyy, mm, dd)
try:
dummy = f[path] # test if this exists!
except:
date += timedelta(days=1) # next day...
continue
ntanks = f['%s/tanks'%path][...]
cc = ntanks>150.
ncc = cc.nonzero()[0].size
if ncc>1: #mas de un dato tiene >150 tanques
time = f['%s/t_utc'%path][...] # utc secs
cts, typ = np.zeros(96, dtype=np.float64), 0.0
for i in ch_Eds:
Ed = i*20.+10.
cts += f['%s/cts_temp-corr_%04dMeV'%(path,Ed)][...]
typ += typic[i] # escalar
cts_norm = cts/typ
#aux = np.nanmean(cts_norm[cc])
tt += [ time[cc] ]
rr += [ cts_norm[cc] ]
ntot += 1 # files read ok
nt += ncc # total nmbr ok elements
date += timedelta(days=1) # next day...
#--- converting tt, rr to 1D-numpy.arrays
t, r = nans(nt), nans(nt)
ini, end = 0, 0
for i in range(ntot):
ni = len(tt[i])
t[ini:ini+ni] = tt[i]
r[ini:ini+ni] = rr[i]
ini += ni
f.close()
return t, r
class _read_auger_scals(object):
"""
reads different versions of corrected-scalers
"""
def __init__(self, fname_inp, data_name):
self.fname_inp = fname_inp
self.data_name = data_name
def read(self):
with h5py.File(self.fname_inp,'r') as f:
if 'auger' in f.keys():
return self.read_i()
elif 't_utc' in f.keys():
return self.read_ii()
else:
raise SystemExit('\
---> no reader setup for this version scaler file!\
')
def read_i(self):
"""
read first version of processed
corrected-scalers.
"""
f5 = h5py.File(self.fname_inp, 'r')
t_utc = f5['auger/time_seg_utc'][...].copy() #data_murdo[:,0]
CRs = f5['auger/sc_wAoP_wPres'][...].copy() #data_murdo[:,1]
print " -------> variables leidas!"
VARS = {
'CRs.'+self.data_name : {
'value' : CRs,
'lims' : [-1.0, 1.0],
'label' : 'Auger Scaler rate [%]',
},
}
return t_utc, VARS
def _pair_yyyymm(self, f, kname):
years = map(int, f[kname].keys())
ly, lm = [], []
for year in years:
months = map(int, f[kname+'/%04d'%year].keys())
nm = len(months)
ly += [year]*nm
lm += months
return zip(ly,lm)
def read_ii(self):
"""
read 2nd version of processed correctd-scalers.
We do NOT read the geop-height-corrected scalers, because
seems unphysical (i.e. geop height is not a parameter
for scalers correction!). So just use pressure-corrected ones.
"""
f = h5py.File(self.fname_inp,'r')
years_and_months = self._pair_yyyymm(f, 't_utc')
t_utc = My2DArray((3,), dtype=np.float32)
CRs = My2DArray((3,), dtype=np.float32)
n = 0
for yyyy, mm in years_and_months:
nt = f['t_utc/%04d/%02d'%(yyyy,mm)].size
t_utc[n:n+nt] = f['t_utc/%04d/%02d'%(yyyy,mm)][...]
CRs[n:n+nt] = f['wAoP_wPrs/%04d/%02d'%(yyyy,mm)][...]
n += nt
print " --> Auger scalers leidos!"
VARS = {
'CRs.'+self.data_name : {
'value' : CRs[:n],
'lims' : [-1.0, 1.0],
'label' : 'Auger Scaler rate [%]',
},
}
return t_utc[:n], VARS
def get_all_bartels():
dates = {}
ok2read = False
i = 0
for line in open('./bartels.txt','r').readlines():
if line in ('','\n'): continue
if line.startswith('Post L1 Insertion'): # cut here
ok2read = True
continue
if line.startswith(' *-Seconds'):
ok2read = False
continue
if ok2read:
#print line.split()
mm,dd,yyyy = map(int,line.split()[1].split('/'))
dates[i] = {
'bartel' : int(line.split()[0]), # Bartels rotation number
'date' : datetime(yyyy, mm, dd),
'ACEepoch' : float(line.split()[4]),
}
#print yyyy,mm,dd, dates[i]['ACEepoch']
i += 1
return dates
def deduce_fnms(bartels, ini, end, subdir=''):
fnms = []
n = len(bartels)
for i in range(n-1):
date = bartels[i]['date']
date_next = bartels[i+1]['date']
if date_next>=ini: #and date<end:
bart = bartels[i]['bartel'] # bartel rotation number
fnms += [subdir+'/mag_data_1sec_{bart}.hdf'.format(**locals())]
if date_next>end:
break ## FINISHED!
return fnms
def calc_rmsB(t_inp, B, width=3600., fgap=0.2, res_o=60):
"""
* t
time in seconds (be UTC-sec, GPS-sec, ACEepoch-sec, etc,
doesn't matter).
* B
vector such that, Bx=B[:,0], By=B[:,1], and Bz=B[:,2].
* width:
time size in seconds, of the width on which
we'll calculate the rmsB.
* fgap:
fraction of gaps that we'll tolerate.
* res_o:
output time resolution. Note that processing 1sec data
one by one, y VERY expensive; so an alternative approach
that we are using here, is to process one data point
every 60 points (i.e. with 1min cadence). NOTE: the
`width` must be INTEGER!!
"""
# to convert numpy warnings to errors
#np.seterr(all='raise')
t = t_inp.copy() # don't touch input data!
c1 = t<t[0] + 0.5*width
c2 = t>t[-1] - 0.5*width
# initial/final indexes on which we'll work
ini, end = c1.nonzero()[0][-1], c2.nonzero()[0][0]
# index list
t_indexes = np.arange(ini+1, end, res_o)
# outputs
rmsB = np.zeros(t_indexes.size, dtype=B.dtype)
rmsB_para = np.zeros(t_indexes.size, dtype=B.dtype)
rmsB_perp = np.zeros(t_indexes.size, dtype=B.dtype)
tnew = np.zeros(t_indexes.size, dtype=np.float64)
# half the size of width in number of index units
w2 = int(0.5*width)
for i, i_ in zip(t_indexes, range(t_indexes.size)):
tnew[i_] = t[i]
ts_ = slice(i-w2,i+w2+1) # time slice
ccg = ~np.isnan(B[ts_,0]) # False for gap values
# time indexes having good data, in our `ts_` window
ti = ts_.start + ccg.nonzero()[0] # {numpy.array} one-dimensional
# too many gaps
if (~ccg).nonzero()[0].size > (fgap*2*w2):
rmsB[i_] = np.nan
continue
#NOTE: a.std() is equivalent to np.sqrt(((a - a.mean())**2).sum()/a.size)
Bo = np.mean(B[ti,:], axis=0) # local Bo in the window `width`
dB = B[ti,:] - Bo # deviation of `B` from `Bo`
# parallel component of `dB` on `Bo`
dB_para = np.dot(dB, Bo/norm(Bo))
# perp component is `dB` minus the parallel part
"""
NOTE: np.outer() is the "outer product" of two vectors, so that
dB_para[0]*Bo/norm(Bo) is the parallel component of `dB` in
vector form (recall that `Bo` has a (3,) shape).
Then:
>>> dB[j,:] - np.outer(dB_para, Bo/norm(Bo))[j,:]
is the perpendicular component of `dB` for the time
index `j`.
"""
# rmsB
dB_perp = dB - np.outer(dB_para, Bo/norm(Bo))
ms = (np.square(dB)).sum()
ms /= 1.*ti.size
rmsB[i_] = np.sqrt(ms)
# rmsB (parallel)
ms = np.square(dB_para).sum()/(1.*ti.size)
rmsB_para[i_] = np.sqrt(ms)
# rmsB (perpendicular)
ms = np.square(dB_perp).sum()/(1.*ti.size)
rmsB_perp[i_] = np.sqrt(ms)
return tnew, rmsB, rmsB_para, rmsB_perp
#+++++++++++++++++++++++++++++++++++++
#----------- data handlers -----------
#+++++++++++++++++++++++++++++++++++++
class _data_ACE(object):
"""
to read the .nc file of ACE data, built from ASCII versions
"""
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
f_sc = netcdf_file(self.fname_inp, 'r')
print " leyendo tiempo..."
t_utc = utc_from_omni(f_sc)
print " Ready."
tb = kws['tb'] # datetimes of borders of all structures
bd = kws['bd'] # borders of the structures we will use
#+++++++++++++++++++++++++++++++++++++++++++
B = f_sc.variables['Bmag'].data.copy()
Vsw = f_sc.variables['Vp'].data.copy()
Temp = f_sc.variables['Tp'].data.copy()
Pcc = f_sc.variables['Np'].data.copy()
rmsB = f_sc.variables['dBrms'].data.copy()
alphar = f_sc.variables['Alpha_ratio'].data.copy()
beta = calc_beta(Temp, Pcc, B)
rmsBoB = rmsB/B
print " -------> variables leidas!"
#------------------------------------ VARIABLES
VARS = {}
# variable, nombre archivo, limite vertical, ylabel
VARS['B.'+data_name] = {
'value' : B,
'lims' : [5., 18.],
'label' : 'B [nT]'
}
VARS['V.'+data_name] = {
'value' : Vsw,
'lims' : [300., 650.],
'label' : 'Vsw [km/s]'
}
VARS['rmsBoB.'+data_name] = {
'value' : rmsBoB,
'lims' : [0.01, 0.2],
'label' : 'rms($\hat B$/|B|) [1]'
}
VARS['rmsB.'+data_name] = {
'value' : rmsB,
'lims' : [0.05, 2.0],
'label' : 'rms($\hat B$) [nT]'
}
VARS['beta.'+data_name] = {
'value' : beta,
'lims' : [0.001, 5.],
'label' : '$\\beta$ [1]'
}
VARS['Pcc.'+data_name] = {
'value' : Pcc,
'lims' : [2, 17.],
'label' : 'proton density [#/cc]'
}
VARS['Temp.'+data_name] = {
'value' : Temp,
'lims' : [1e4, 4e5],
'label' : 'Temp [K]'
}
VARS['AlphaRatio.'+data_name] = {
'value' : alphar,
'lims' : [1e-3, 0.1],
'label' : 'alpha ratio [1]'
}
#self.nvars = len(VARS.keys())
#---------
#self.aux = aux = {}
#aux['SELECC'] = self.SELECC
"""
NOTE: `bd` and `tb` have been shifted if
`self.FITLER['CorrShift']`==True in the
events_mgr() class.
"""
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_Auger_BandMuons(object):
"""
for muon band of Auger charge histograms
"""
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
"""
para leer la data de histogramas Auger
"""
f5 = h5(self.fname_inp, 'r')
ch_Eds = (10, 11, 12, 13)
# get the global-average histogram
nEd = 50
typic = np.zeros(nEd, dtype=np.float32)
for i in range(nEd):
Ed = i*20.+10.
typic[i] = f5['mean/corr_%04dMeV'%Ed].value
t_utc, CRs = read_hsts_data(self.fname_inp, typic, ch_Eds)
print " -------> variables leidas!"
VARS = {} #[]
VARS['CRs.'+data_name] = {
'value' : CRs,
'lims' : [-1.0, 1.0],
'label' : 'Auger (muon band) [%]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_Auger_BandScals(object):
"""
for muon band of Auger charge histograms
"""
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
"""
para leer la data de histogramas Auger
"""
f5 = h5(self.fname_inp, 'r')
ch_Eds = (3, 4, 5)
# get the global-average histogram
nEd = 50
typic = np.zeros(nEd, dtype=np.float32)
for i in range(nEd):
Ed = i*20.+10.
typic[i] = f5['mean/corr_%04dMeV'%Ed].value
t_utc, CRs = read_hsts_data(self.fname_inp, typic, ch_Eds)
print " -------> variables leidas!"
VARS = {} #[]
VARS['CRs.'+data_name] = {
'value' : CRs,
'lims' : [-1.0, 1.0],
'label' : 'Auger (muon band) [%]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_ACE_o7o6(object):
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
tb = self.tb
nBin = self.nBin
bd = self.bd
day = 86400.
self.f_sc = netcdf_file(self.fname_inp, 'r')
print " leyendo tiempo..."
t_utc = utc_from_omni(self.f_sc)
print " Ready."
#++++++++++++++++++++++++++++++++++++++++++++++++
o7o6 = self.f_sc.variables['O7toO6'].data.copy()
print " -------> variables leidas!"
#----------------------- VARIABLES
self.t_utc = t_utc
self.VARS = VARS = {}
# variable, nombre archivo, limite vertical, ylabel
VARS['o7o6'] = {
'value' : o7o6,
'lims' : [0.0, 1.5],
'label' : 'O7/O6 [1]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_Auger_scals(object):
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
"""
solo cargamos Auger Scalers
"""
opt = {
'fname_inp' : self.fname_inp,
'data_name' : data_name,
}
"""
the class `_read_auger_scals` reads both versions of
scalers (old & new).
"""
sc = _read_auger_scals(**opt)
t_utc, VARS = sc.read()
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_McMurdo(object):
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
fname_inp = self.fname_inp
data_murdo = np.loadtxt(fname_inp)
t_utc = t_utc = data_murdo[:,0]
CRs = data_murdo[:,1]
print " -------> variables leidas!"
VARS = {}
VARS['CRs.'+data_name] = {
'value' : CRs,
'lims' : [-8.0, 1.0],
'label' : 'mcmurdo rate [%]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
#--- reader for ACE 1seg MAG data
class _data_ACE1sec(object):
"""
the parameters below are for the processing of deduced
observables, such as "rmsB".
They are used in `self.grab_block()`.
"""
width = 3600. # [sec] time width of rmsB-calculation
fgap = 0.2 # [1] gap-fraction to tolerate
res_o = 60 # [sec] output resolution
def __init__(self, **kws):
self.dir_inp = kws['input']
self.now = None
#@profile
def load(self, **kws):
import cython_wrapper
self.cw = cython_wrapper
# contains: bartels rotation numbers, ACEepochs, adn datetimes.
self.bartels = get_all_bartels() # {dict}
self.nbartels = len(self.bartels)
self.dname = dname = kws['data_name']
VARS = {}
"""
the keys if `VARS` will be used to iterate on the
possible values of `vname` in `self.grab_block()`.
"""
VARS['Bmag.'+dname] = {
'value' : None,
'lims' : [5., 18.],
'label' : 'B [nT]',
}
VARS['rmsB.'+dname] = {
'value' : None,
'lims' : [0.5, 11.],
'label' : 'rms($\hat B$) [nT]',
}
VARS['rmsB_ratio.'+dname] = {
'value' : None,
'lims' : [0.5, 50.],
'label' : '$\delta B^2_{{\perp}} / \delta B^2_{{\parallel}}$'+\
' ($\Delta t:$ {dt:2.1f} hr)'.format(dt=self.width/3600.),
}
return {
# this is the period for available data in our input directory
#'t_utc' : [883180800, 1468713600], # [utc sec]
't_utc' : [sf.date2utc(self.bartels[0]['date']),
sf.date2utc(self.bartels[self.nbartels-1]['date'])], # [utc sec]
'VARS' : VARS,
}
#@profile
def grab_block(self, vname=None, **kws):
# alias
OneDay = timedelta(days=1) # {timedelta}
# time extent of queried data, in terms of the
# size of the structure
nbef, naft = kws['nwndw']
# range of requested data
tini = kws['tini'] - nbef*OneDay # {datetime}
tend = kws['tend'] + naft*OneDay # {datetime}
# if the bounds of the events are out of the
# boundaries of the available data, return error
assert self.bartels[0]['date']<=tini and \
self.bartels[self.nbartels-1]['date']>=tend,\
"""
[-] ERROR:
# no data for this `vname` in
# this time window!!
--- window of available data:
ini: {d_ini}
end: {d_end}
--- window of requested data:
ini: {r_ini}
end: {r_end}
""".format(
r_ini = tini,
r_end = tend,
d_ini = self.bartels[0]['date'],
d_end = self.bartels[self.nbartels-1]['date'],
)
# -- deduce fnm_ls
subdir = '/media/hdd_extern_hegea/data_ace/mag_data_1sec'.format(**os.environ)
fnm_ls = deduce_fnms(self.bartels, tini, tend, subdir)
for fnm in fnm_ls:
print fnm
assert os.path.isfile(fnm)
# -- deduce ace_ini, ace_end
ace_ini = sf.date2ACEepoch(tini)
ace_end = sf.date2ACEepoch(tend)
m = self.cw.mag_l2(fnm_ls) # cython function
m.indexes_for_period(ace_ini, ace_end)
#NOTE: make `copy()` to avoid memory overlapping? (maybe
# some weird numpy implementation)
t_ace = m.return_var('ACEepoch').copy() # [ACE epoch seconds]
varname = vname.replace('.'+self.dname,'') # remove '.ACE1sec'
if varname.startswith('rmsB') and self.now!=(tini,tend):
"""
only do the rms calculation if we didn't
for this period (tini,tend) already!
"""
# deduced quantity
Bx = m.return_var('Bgse_x').copy()
By = m.return_var('Bgse_y').copy()
Bz = m.return_var('Bgse_z').copy()
cc = Bx<-900. # True for gaps
# fill gaps with NaNs
Bx[cc], By[cc], Bz[cc] = np.nan, np.nan, np.nan
self.t_out, self.rmsB, self.rmsB_para, self.rmsB_perp = calc_rmsB(
t_inp = t_ace,
B = np.array([Bx,By,Bz]).T,
width = self.width,
fgap = self.fgap,
res_o = self.res_o,
)
"""
NOTE: `t_out` is supposed to have a time resolution
of `res_o`. This can be tested by printing:
>>> print np.unique(t_out[1:]-t_out[:-1])
"""
# to avoid doing the calculation for the
# next rms quantity, in this same period (tini,tend).
self.now = (tini, tend)
if varname=='rmsB':
t_out = self.t_out
var = self.rmsB
elif varname=='rmsB_ratio':
t_out = self.t_out
var = np.square(self.rmsB_perp/self.rmsB_para)
else:
var = m.return_var(varname).copy()
t_out = t_ace
#assert len(var)!=1 and var!=-1, ' ## wrong varname!'
if type(var)==int:
assert var!=-1, " ## error: wrong varname "
cc = var<-100.
var[cc] = np.nan # put NaN in flags
t_utc = t_out + 820454400.0 # [utc sec] ACEepoch -> UTC-sec
kws.pop('data') # because its 'data' does not make sense here, and
# therefore we can replace it below.
return selecc_window_ii(
data=[t_utc, var],
**kws
)
#+++++++++++++++++++++++++++++++++++++
#------------ testing --------------
#+++++++++++++++++++++++++++++++++++++
def main():
ini, end = datetime(2005,1,1), datetime(2005,6,1)
bartels = get_all_bartels()
if __name__=='__main__':
main()
#EOF
| [
"[email protected]"
] | |
46465500fcd64b9c80c677b23f3c2d5ec50ef1f0 | b7e0ea14903ac611413e490c741f5b7e4ffb29df | /MySQL数据库命令.py | d4c6c66dc2a7bd74f2e64fba37b1caaab13b8d63 | [] | no_license | pointworld/python | c729d6fc029edf28a3b331adf7de888af0216646 | 06bee716f2615526757a67dcd35be59abc31ff41 | refs/heads/master | 2021-07-13T13:14:55.540038 | 2017-10-09T14:45:50 | 2017-10-09T14:45:50 | 103,842,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,636 | py | 编程常见命令集合
MySQL数据库命令
登录到MySQL
mysql -h localhost -u root -p
localhost:IP地址;
root:用户名;
database:数据库名(可以省略,如果有,跟在-p面)
删除数据库:
mysqladmin -u root -pwrf956750621 drop awesome
初始化数据库:
mysql –u root –p密码 <D:\computer_learning\backup\schema.sql
mysql -u root -p
2.mysql -h localhost -u root -p database_name
列出数据库:
show databases;
选择数据库:
use databases_name;
列出数据表:
show tables;
显示表格列的属性:
show columns from table_name;
describe table_name;
导出整个数据库:
mysqldump -u user_name -p database_name > path_file_name
例如:mysqldump -u root -p test_db > d:/test_db.sql
导出一个表:
mysqldump -u user_name -p database_name table_name > /tmp/file_name
例如:mysqldump -u root -p test_db table1 > d:/table1.sql
导出一个数据库结构:
mysqldump -u user_name -p -d --add-drop-table database_name > file_name
例如:mysqldump -u root -p -d --add-drop-table test_db > test_db.sql
导入数据库:
source file_name;
或
mysql -u user_name -p database_name < file_name
例如:
source /tmp/bbs.sql;
source d:/bbs.sql;
mysql -u root -p bbs < "d:/bbs.sql"
mysql -u root -p bbs < "/tmp/bbs.sql"
将文本文件导入数据表中(excel与之相同)
load data infile "tables.txt" into table table_name;
例如:
load data infile "/tmp/bbs.txt" into table bbs;
load data infile "/tmp/bbs.xls" into table bbs;
load data infile "d:/bbs.txt" into table bbs;
load data infile "d:/bbs.xls" into table bbs;
将数据表导出为文本文件(excel与之相同)
select * into outfile "path_file_name" from table_name;
例如:
select * into outfile "/tmp/bbs.txt" from bbs;
select * into outfile "/tmp/bbs.xls" from bbs where id=1;
select * into outfile "d:/bbs.txt" from bbs;
select * into outfile "d:/bbs.xls" from bbs where id=1;
创建数据库时先判断数据库是否存在:
create database if not exists database_name;
例如:create database if not exists bbs
创建数据库:
create database database_name;
例如:create database bbs;
删除数据库:
1.drop database database_name;
例如:drop database bbs;
创建数据表:
1.mysql> create table <table_name> ( <column 1 name> <col. 1 type> <col. 1 details>,<column 2 name> <col. 2 type> <col. 2 details>, ...);
例如:create table (id int not null auto_increment primary key,name char(16) not null default "jack",date_year date not null);
删除数据表中数据:
delete from table_name;
例如:
delete from bbs;
delete from bbs where id=2;
删除数据库中的数据表:
drop table table_name;
例如:
drop table test_db;
rm -f database_name/table_name.* (linux下)
例如:
rm -rf bbs/accp.*
向数据库中添加数据:
insert into table_name set column_name1=value1,column_name2=value2;
例如:insert into bbs set name="jack",date_year="1993-10-01";
insert into table_name values (column1,column2,...);
例如:insert into bbs ("2","jack","1993-10-02")
insert into table_name (column_name1,column_name2,...) values (value1,value2);
例如:insert into bbs (name,data_year) values ("jack","1993-10-01");
查询数据表中的数据:
select * from table_name;
例如:select * from bbs where id=1;
修改数据表中的数据:
update table_name set col_name=new_value where id=1;
例如:update bbs set name="tom" where name="jack";
增加一个字段:
alter table table_name add column field_name datatype not null default "1";
例如:alter table bbs add column tel char(16) not null;
增加多个字段:(column可省略不写)
alter table table_name add column filed_name1 datatype,add column filed_name2 datatype;
例如:alter table bbs add column tel char(16) not null,add column address text;
删除一个字段:
alter table table_name drop field_name;
例如:alter table bbs drop tel;
修改字段的数据类型:
alter table table_name modify id int unsigned;//修改列id的类型为int unsigned
alter table table_name change id sid int unsigned;//修改列id的名字为sid,而且把属性修改为int unsigned
修改一个字段的默认值:
alter table table_name modify column_name datatype not null default "";
例如:alter table test_db modify name char(16) default not null "yourname";
对表重新命名:
alter table table_name rename as new_table_name;
例如:alter table bbs rename as bbs_table;
rename table old_table_name to new_table_name;
例如:rename table test_db to accp;
从已经有的表中复制表的结构:
create table table2 select * from table1 where 1<>1;
例如:create table test_db select * from accp where 1<>1;
查询时间:
select now();
查询当前用户:
select user();
查询数据库版本:
select version();
创建索引:
alter table table1 add index ind_id(id);
create index ind_id on table1(id);
create unique index ind_id on table1(id);//建立唯一性索引
删除索引:
drop index idx_id on table1;
alter table table1 drop index ind_id;
联合字符或者多个列(将id与":"和列name和"="连接)
select concat(id,':',name,'=') from table;
limit(选出10到20条)
select * from bbs order by id limit 9,10;
(从查询结果中列出第几到几条的记录)
增加一个管理员账号:
grant all on *.* to user@localhost identified by "password";
创建表是先判断表是否存在
create table if not exists students(……);
复制表:
create table table2 select * from table1;
例如:create table test_db select * from accp;
授于用户远程访问mysql的权限
grant all privileges on *.* to "root"@"%" identified by "password" with grant option;
或者是修改mysql数据库中的user表中的host字段
use mysql;
select user,host from user;
update user set host="%" where user="user_name";
查看当前状态
show status;
查看当前连接的用户
show processlist;
(如果是root用户,则查看全部的线程,得到的用户连接数同show status;里的 Threads_connected值是相同的)
qq邮箱授权密码: xgyphxmyyntjbfbg | [
"[email protected]"
] | |
f7d6c4be894a80d920335e88119b8b8f5cb8e7ba | 325bee18d3a8b5de183118d02c480e562f6acba8 | /germany/germany_l/germany/ScriptDir/Move_2_Nas.py | ad52465414998306bcd3649c403736db8a51f842 | [] | no_license | waynecanfly/spiderItem | fc07af6921493fcfc21437c464c6433d247abad3 | 1960efaad0d995e83e8cf85e58e1db029e49fa56 | refs/heads/master | 2022-11-14T16:35:42.855901 | 2019-10-25T03:43:57 | 2019-10-25T03:43:57 | 193,424,274 | 4 | 0 | null | 2022-11-04T19:16:15 | 2019-06-24T03:00:51 | Python | UTF-8 | Python | false | false | 1,962 | py | # -*- coding: utf-8 -*-
import pymysql
from ftplib import FTP
import os
class Move2Nas(object):
num = 0
def __init__(self):
self.conn = pymysql.connect(host="10.100.4.99", port=3306, db="opd_common", user="root", passwd="OPDATA", charset="utf8")
self.cursor = self.conn.cursor()
def get_fiscal_year(self, file_name):
"""获取财年"""
sql = "select fiscal_year from non_financial_statement_index where report_id=%s"
self.cursor.execute(sql, file_name.split(".")[0])
result = self.cursor.fetchone()
if result:
return str(result[0])
else:
sql = "select fiscal_year from financial_statement_index where report_id=%s"
self.cursor.execute(sql, file_name.split(".")[0])
result = self.cursor.fetchone()
if result:
return str(result[0])
else:
return "0000"
def ftpconnect(self, host, username, password):
"""建立连接"""
ftp = FTP()
ftp.connect(host, 21)
ftp.login(username, password)
print(ftp.getwelcome())
return ftp
def uploadfile(self, ftp, remotepath, localpath):
"""从本地上传文件到FTP"""
bufsize = 1024
fp = open(localpath, 'rb')
ftp.storbinary('STOR ' + remotepath, fp, bufsize)
ftp.set_debuglevel(0)
fp.close()
def Move2NasMain(self, LocalDir, NasDir):
ftp = self.ftpconnect("10.100.4.102", "admin", "originp123")
dir_list = os.listdir(LocalDir)
for temp in dir_list:
fiscal_year = self.get_fiscal_year(temp)
try:
ftp.mkd(NasDir + fiscal_year)
except:
pass
self.num += 1
self.uploadfile(ftp, NasDir + fiscal_year + "/" + temp, LocalDir + "/" + temp)
print("已上传%s个文件到NAS服务器" % self.num)
ftp.quit()
| [
"[email protected]"
] | |
eb9c6c4846c223dc0bc16732c3ae5abb061b94d9 | e84242b4e00b2afdcda6d9b68292631c3c86d9f1 | /hangar_2019/vinogradov.py | bfe7181d8335edf9fb5ed44ce09f9ddd4b9a056b | [] | no_license | Gleb-bit/astrobox-project | ac12b92255febafd196cf2ba717ecd4aa3771fb5 | de6a74db001a4d4e9456d8946a741164190b32ae | refs/heads/main | 2023-03-18T18:19:32.730946 | 2021-02-27T15:49:07 | 2021-03-03T16:06:31 | 346,435,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,016 | py | from astrobox.core import Drone
ASTEROIDS_DRONES = {}
class VinogradovDrone(Drone):
def on_born(self):
if len(ASTEROIDS_DRONES) == 0:
self._fill_holder()
asteroid = self.choose_asteroid()
self.move_at(asteroid)
def on_stop_at_asteroid(self, asteroid):
if not self.mothership.is_full:
self.load_from(asteroid)
def on_load_complete(self):
asteroid = self.choose_asteroid()
if not self.is_full and asteroid is not None:
self.move_at(asteroid)
else:
self.move_at(self.my_mothership)
def on_stop_at_mothership(self, mothership):
self.unload_to(mothership)
def on_unload_complete(self):
asteroid = self.choose_asteroid()
if asteroid is not None:
self.move_at(asteroid)
def _fill_holder(self):
for asteroid in self.asteroids:
if asteroid.payload > 0:
if asteroid not in ASTEROIDS_DRONES:
ASTEROIDS_DRONES[asteroid] = []
def choose_asteroid(self):
for aster, drone in ASTEROIDS_DRONES.items():
if drone is self:
if aster.is_empty:
ASTEROIDS_DRONES.pop(aster)
asteroids_params = [asteroid for asteroid in self.asteroids if not asteroid.is_empty]
asteroids_params.sort(key=lambda ast: self.distance_to(ast)/ast.payload)
if len(asteroids_params) > 0:
for sorted_asteroid in asteroids_params:
asteroid_drones = ASTEROIDS_DRONES[sorted_asteroid]
free_space = [drone.free_space for drone in asteroid_drones if drone != self]
free_space.append(self.free_space)
free_space_sum = sum(free_space)
if sorted_asteroid.payload >= free_space_sum*.8:
ASTEROIDS_DRONES[sorted_asteroid].append(self)
return sorted_asteroid
return asteroids_params[0]
drone_class = VinogradovDrone
| [
"[email protected]"
] | |
89cf0d50fb6e2df3124bee8b77421b3fd186c0fb | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/primes.py | c910b9b14a132ba9a5b1670d224af1c8c2d70824 | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | def setup():
primes = []
for num in range(2,100000+1):
is_prime = True
for p in primes:
if num%p == 0:
is_prime = False
break
if is_prime:
primes.append(num)
return primes
primes = setup()
def prime_factors(num):
ans = []
for prime in primes:
if num % prime == 0:
ans.append(prime)
if prime > num ** 0.5:
break
if len(ans) < 1:
return [num]
return ans
print(prime_factors(4))
print(prime_factors(6))
print(prime_factors(15))
print(prime_factors(35))
print(prime_factors(3*13*29*43*111)) | [
"[email protected]"
] | |
d374f557f75fbfa71084b996ee9029ef19b9f982 | fa7e0fd1013762eac8d878b70582544a5598600d | /django_3_2_18/users/views.py | 748e20b3538248543b14b3b7547299dacb80c910 | [] | no_license | gy0109/django_2.18 | b8c48005027720ab46e7e050ff139b57437ff5f6 | 0c9971fae249a4f1932b3fc20d29fc7232b244ab | refs/heads/master | 2020-04-24T01:41:05.553801 | 2019-02-21T12:37:36 | 2019-02-21T12:37:36 | 171,608,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | import json
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
# Create your views here. 视图
# /user/index/
def index(request):
"""
index视图部分
:param request: 接收到的请求对象
:return: 响应对象
"""
return HttpResponse('hello world!')
def weather(request, city, year):
print('city=%s' % city)
print('year=%s' % year)
return HttpResponse('ok!')
def weather1(request, city, year):
print('city=%s' % city)
print('year=%s' % year)
return HttpResponse('ok! weather1! ')
def weather2(request, city, year):
print('city=%s' % city)
print('year=%s' % year)
a = request.GET.get('a')
b = request.GET.get('b')
a_list = request.GET.getlist('a')
print(a, b, a_list)
return HttpResponse('ok! weather1! ')
def get_post_params(request):
# POST请求获取表单数据
a = request.POST.get('a')
b = request.POST.get('b')
a_list = request.POST.getlist('a')
print(a, b, a_list)
return HttpResponse('get_post')
def get_body_json(request):
json_str = request.body.decode()
req_data = json.loads(json_str)
print(request.META['CONTENT_TYPE'])
print(request.META['SERVER_NAME'])
print(request.method)
print(request.user)
print(request.encoding)
print(request.path)
print(request.files)
print(req_data['a'])
print(req_data['b'])
return HttpResponse('OK get_body')
# 自定义响应体
def response_json(request):
json_dict = {'name': 'gy', 'age': 12}
# return HttpResponse('OK', content_type='text/html', status=200)
return JsonResponse(json_dict)
| [
"[email protected]"
] | |
2a79ebc762ce1b21a171102de8c4921db995312e | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH_SF/Full2018/DNN/configuration_2018.py | b9aa3c9058017e04aa91fb0713b0804969ea1ced | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 917 | py | # example of configuration file
treeName= 'Events'
tag = 'ggH_SF_split2'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts_all.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 35.867
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
# outputDirPlots = '~/www/plotCR'
outputDirPlots = 'plot_'+tag+'_DNN_signal'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"[email protected]"
] | |
57b410d649fbf712a2dc614d4c684ae1b830064c | 46900ffadf08b92f656ff5d0a0b71a7717e4415c | /old_trash/renthop_src/stacking/stacking_no_mngr_medians.py | 0813eec07659754692009389b16c91cbee03e00e | [] | no_license | ubbikk/kaggle | 9765ca9530d139525752b4286c20e971b0a97be7 | cc7ea173ad8215a42108a973abe2cf0095517588 | refs/heads/master | 2021-01-19T10:32:44.615755 | 2017-11-11T19:28:42 | 2017-11-11T19:28:42 | 82,200,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,496 | py | import json
import os
import traceback
from time import time, sleep
import seaborn as sns
import pandas as pd
from collections import OrderedDict
import sys
from matplotlib import pyplot
from scipy.sparse import coo_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
import numpy as np
import xgboost as xgb
from sklearn.metrics import log_loss
from xgboost import plot_importance
from sklearn.model_selection import train_test_split
from scipy.stats import boxcox
from scipy.spatial import KDTree
import math
from pymongo import MongoClient
TARGET = u'interest_level'
TARGET_VALUES = ['low', 'medium', 'high']
MANAGER_ID = 'manager_id'
BUILDING_ID = 'building_id'
LATITUDE = 'latitude'
LONGITUDE = 'longitude'
PRICE = 'price'
BATHROOMS = 'bathrooms'
BEDROOMS = 'bedrooms'
DESCRIPTION = 'description'
DISPLAY_ADDRESS = 'display_address'
STREET_ADDRESS = 'street_address'
LISTING_ID = 'listing_id'
PRICE_PER_BEDROOM = 'price_per_bedroom'
F_COL = u'features'
CREATED_MONTH = "created_month"
CREATED_DAY = "created_day"
CREATED_MINUTE = 'created_minute'
CREATED_HOUR = 'created_hour'
DAY_OF_WEEK = 'dayOfWeek'
CREATED = 'created'
LABEL = 'lbl'
BED_NORMALIZED = 'bed_norm'
BATH_NORMALIZED = 'bath_norm'
COL = 'normalized_features'
NEI_1 = 'nei1'
NEI_2 = 'nei2'
NEI_3 = 'nei3'
NEI = 'neighbourhood'
BORO = 'boro'
INDEX_COPY = 'index_copy'
FEATURES = [u'bathrooms', u'bedrooms', u'building_id', u'created',
u'description', u'display_address', u'features',
u'latitude', u'listing_id', u'longitude', MANAGER_ID, u'photos',
u'price', u'street_address']
sns.set(color_codes=True)
sns.set(style="whitegrid", color_codes=True)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 5000)
train_file = '../../data/redhoop/train.json'
test_file = '../../data/redhoop/test.json'
train_geo_file = '../../data/redhoop/with_geo/train_geo.json'
test_geo_file = '../../data/redhoop/with_geo/test_geo.json'
rent_file = '../../data/neis_from_renthop_lower.json'
seeds_fp = '../../seeds.json'
splits_big_fp='../../splits_big.json'
splits_small_fp='../../splits_small.json'
magic_file = '../../data/redhoop/listing_image_time.csv'
# train_file = '../data/redhoop/train.json'
# test_file = '../data/redhoop/test.json'
# train_geo_file = '../data/redhoop/with_geo/train_geo.json'
# test_geo_file = '../data/redhoop/with_geo/test_geo.json'
# rent_file = 'with_geo/data/neis_from_renthop_lower.json'
# seeds_fp = '../../seeds.json'
# splits_big_fp='../../splits_big.json'
# splits_small_fp='../../splits_small.json'
#########################################################################################
# Mongo Control
#########################################################################################
SEEDS = json.load(open(seeds_fp))
SPLITS_BIG=json.load(open(splits_big_fp))
SPLITS_SMALL=json.load(open(splits_small_fp))
def getN(mongo_host, name, experiment_max_time):
client = MongoClient(mongo_host, 27017)
db = client[name]
collection = db['splits_control'.format(name)]
res = [x for x in collection.find()]
res.sort(key=lambda s: s['N'])
for con in res:
if (not con['finished']) and (time()-con['time'] > experiment_max_time):
N = con['N']
collection.replace_one({'N': N}, {'N': N, 'time': time(), 'finished': False})
return N
N = len(res)
collection.insert_one({'N': N, 'time': time(), 'finished': False})
return N
def split_from_N(df, N):
N=N%5
return df.loc[SPLITS_BIG[N],:], df.loc[SPLITS_SMALL[N], :]
def complete_split_mongo(N, name, mongo_host, probs, test_indexes, losses, importance, f_names):
client = MongoClient(mongo_host, 27017)
db = client[name]
collection = db['probs']
collection.insert_one({'N': N, 'val': probs, 'index':test_indexes})
collection = db['losses']
collection.insert_one({'N': N, 'val': losses})
collection = db['importance']
collection.insert_one({'N': N, 'val': importance})
collection = db['features']
collection.insert_one({'N': N, 'val': f_names})
collection = db['splits_control'.format(name)]
collection.replace_one({'N': N}, {'N': N, 'time': time(), 'finished': True})
def get_probs_from_est(estimator, proba, test_df):
classes = [x for x in estimator.classes_]
res = {}
for cl in classes:
p=proba[:, classes.index(cl)]
res[cl] = [a.item() for a in p]
return res, [x for x in test_df.index.values]
def complete_split_file(ii, l, name):
fp = name + '_results.json'
ii_fp = name + '_importance.json'
with open(fp, 'w+') as f:
json.dump(l, f)
with open(ii_fp, 'w+') as f:
json.dump(ii, f)
#########################################################################################
# Mongo Control
#########################################################################################
#########################################################################################
# Writing Results
#########################################################################################
def write_results(N, name, mongo_host, probs, test_indexes, l_results, ii_importance, f_names):
losses = l_results[len(l_results) - 1]
importance = ii_importance[len(ii_importance) - 1]
retries = 5
while retries >= 0:
try:
complete_split_mongo(N, name, mongo_host, probs, test_indexes, losses, importance, f_names)
break
except:
traceback.print_exc()
retries -= 1
sleep(30)
try:
complete_split_file(ii_importance, l_results, name)
except:
traceback.print_exc()
def out(l, loss, l_1K, loss1K, num, t):
print '\n\n'
print '#{}'.format(num)
if loss1K is not None:
print 'loss1K {}'.format(loss1K)
print 'avg_loss1K {}'.format(np.mean(l_1K))
print get_3s_confidence_for_mean(l_1K)
print
print 'loss {}'.format(loss)
print 'avg_loss {}'.format(np.mean(l))
print get_3s_confidence_for_mean(l)
print 'std {}'.format(np.std(l))
print 'time {}'.format(t)
def get_3s_confidence_for_mean(l):
std = np.std(l) / math.sqrt(len(l))
m = np.mean(l)
start = m - 3 * std
end = m + 3 * std
return '3s_confidence: [{}, {}]'.format(start, end)
#########################################################################################
# Writing Results
#########################################################################################
#########################################################################################
# loading data
#########################################################################################
def load_df(file, geo_file):
df = pd.read_json(file)
geo = pd.read_json(geo_file)
df[NEI] = geo[NEI]
df['tmp'] = df[NEI].apply(transform_geo_to_rent)
df[NEI_1] = df['tmp'].apply(lambda s: None if s is None else s[0])
df[NEI_2] = df['tmp'].apply(lambda s: None if s is None else s[1])
df[NEI_3] = df['tmp'].apply(lambda s: None if s is None else s[2])
normalize_bed_bath(df)
return basic_preprocess(df)
def load_train():
df = load_df(train_file, train_geo_file)
df[LABEL] = 'train'
return df
def load_test():
df = load_df(test_file, test_geo_file)
df[LABEL] = 'test'
return df
def load_rent():
m = json.load(open(rent_file))
res = {}
for boro, boro_m in m.iteritems():
for sub_boro, neis in boro_m.iteritems():
for n in neis:
res[n] = [n, sub_boro, boro]
return res
def basic_preprocess(df):
df['num_features'] = df[u'features'].apply(len)
df['num_photos'] = df['photos'].apply(len)
df['word_num_in_descr'] = df['description'].apply(lambda x: len(x.split(' ')))
df["created"] = pd.to_datetime(df["created"])
# df["created_year"] = df["created"].dt.year
df[CREATED_MONTH] = df["created"].dt.month
df[CREATED_DAY] = df["created"].dt.day
df[CREATED_HOUR] = df["created"].dt.hour
df[CREATED_MINUTE] = df["created"].dt.minute
df[DAY_OF_WEEK] = df['created'].dt.dayofweek
bc_price, tmp = boxcox(df['price'])
df['bc_price'] = bc_price
df[INDEX_COPY] = df.index.values
return df
def fix_index(df):
df.index = df[INDEX_COPY]
#########################################################################################
# loading data
#########################################################################################
#########################################################################################
# Creating Neis
#########################################################################################
def normalize_bed_bath(df):
df['bed_bath']=df[[BEDROOMS, BATHROOMS]].apply(lambda s: (s[BEDROOMS], s[BATHROOMS]), axis=1)
def norm(s):
bed=s[0]
bath=s[1]
if bed==0:
if bath>=1.5:
return [0,2.0]
elif bed==1:
if bath>=2.5:
return [1,2.0]
elif bed==2:
if bath>=3.0:
return [2,3.0]
elif bed==3:
if bath>=4.0:
return [3,4.0]
elif bed==4:
if bath==0:
return [4,1]
elif bath>=4.5:
return [4,4.5]
elif bed>=5:
if bath <=1.5:
return [5,1.5]
elif bath <=2.5:
return [5,2.5]
elif bath <=3.5:
return [5,3]
else:
return [5,4]
return [bed, bath]
df['bed_bath']=df['bed_bath'].apply(norm)
df[BED_NORMALIZED]=df['bed_bath'].apply(lambda s:s[0])
df[BATH_NORMALIZED]=df['bed_bath'].apply(lambda s:s[1])
EXACT_MAP = {
'gramercy': 'gramercy park',
'clinton': "hell's kitchen",
'turtle bay': 'midtown east',
'tudor city': 'midtown east',
'sutton place': 'midtown east',
'hamilton heights': 'west harlem',
'bedford stuyvesant': 'bedford-stuyvesant',
'hunters point': 'long island city',
'battery park': 'battery park city',
'manhattanville': 'west harlem',
'carnegie hill': 'upper east side',
'stuyvesant town': 'stuyvesant town - peter cooper village',
'downtown': 'downtown brooklyn',
'morningside heights': 'west harlem',
'spuyten duyvil': 'riverdale',
'prospect lefferts gardens': 'flatbush',
'greenwood': 'greenwood heights',
'fort hamilton': 'bay ridge',
'high bridge': 'highbridge',
'columbia street waterfront district': 'carroll gardens',
'ocean parkway': 'midwood',
'north riverdale': 'riverdale',
'astoria heights': 'astoria',
'tremont': 'mount hope',
'homecrest': 'sheepshead bay',
'new utrecht': 'borough park',
'fieldston': 'riverdale',
'georgetown': 'upper east side',
'tottenville': 'washington heights',
'hillcrest': 'kew gardens hills',
'oakland gardens': 'forest hills',
'pomonok': 'washington heights',
'wingate': 'east flatbush',
'fordham': 'fordham manor',
'forest hills gardens': 'forest hills',
'columbus circle': "hell's kitchen"
}
SPECIAL = {
'midtown': ('midtown', 'midtown manhattan', 'manhattan'),
'harlem': ('harlem', 'upper manhattan', 'manhattan')
}
ONLY_SECOND = {
'castle hill': ('2nd', 'east bronx', 'bronx'),
'throggs neck': ('2nd', 'east bronx', 'bronx'),
'soundview': ('2nd', 'east bronx', 'bronx'),
'port morris': ('2nd', 'east bronx', 'bronx'),
}
ONLY_THIRD = {
'queens village': ('3rd', '3rd', 'queens'),
'laurelton': ('3rd', '3rd', 'queens')
}
def transform_geo_to_rent(s):
if s is None:
return s
s = s.lower()
rent = load_rent()
if s in rent:
return rent[s]
if s in EXACT_MAP:
return rent[EXACT_MAP[s]]
if s in SPECIAL:
return SPECIAL[s]
return ('not_mapped_yet', 'not_mapped_yet', 'not_mapped_yet')
#########################################################################################
# Creating Neis
#########################################################################################
#########################################################################################
# MNGR HCC
#########################################################################################
def hcc_encode(train_df, test_df, variable, binary_target, k=5, f=1, g=1, r_k=0.01, folds=5):
"""
See "A Preprocessing Scheme for High-Cardinality Categorical Attributes in
Classification and Prediction Problems" by Daniele Micci-Barreca
"""
prior_prob = train_df[binary_target].mean()
hcc_name = "_".join(["hcc", variable, binary_target])
seed = int(time())
print 'seed hcc {}'.format(seed)
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)
for big_ind, small_ind in skf.split(np.zeros(len(train_df)), train_df['interest_level']):
big = train_df.iloc[big_ind]
small = train_df.iloc[small_ind]
grouped = big.groupby(variable)[binary_target].agg({"size": "size", "mean": "mean"})
grouped["lambda"] = 1 / (g + np.exp((k - grouped["size"]) / f))
grouped[hcc_name] = grouped["lambda"] * grouped["mean"] + (1 - grouped["lambda"]) * prior_prob
if hcc_name in small.columns:
del small[hcc_name]
small = pd.merge(small, grouped[[hcc_name]], left_on=variable, right_index=True, how='left')
small.loc[small[hcc_name].isnull(), hcc_name] = prior_prob
small[hcc_name] = small[hcc_name] * np.random.uniform(1 - r_k, 1 + r_k, len(small))
train_df.loc[small.index, hcc_name] = small[hcc_name]
grouped = train_df.groupby(variable)[binary_target].agg({"size": "size", "mean": "mean"})
grouped["lambda"] = 1 / (g + np.exp((k - grouped["size"]) / f))
grouped[hcc_name] = grouped["lambda"] * grouped["mean"] + (1 - grouped["lambda"]) * prior_prob
test_df = pd.merge(test_df, grouped[[hcc_name]], left_on=variable, right_index=True, how='left')
test_df.loc[test_df[hcc_name].isnull(), hcc_name] = prior_prob
return train_df, test_df, hcc_name
def process_mngr_categ_preprocessing(train_df, test_df):
col = MANAGER_ID
new_cols = []
for df in [train_df, test_df]:
df['target_high'] = df[TARGET].apply(lambda s: 1 if s == 'high' else 0)
df['target_medium'] = df[TARGET].apply(lambda s: 1 if s == 'medium' else 0)
for binary_col in ['target_high', 'target_medium']:
train_df, test_df, new_col = hcc_encode(train_df, test_df, col, binary_col)
new_cols.append(new_col)
return train_df, test_df, new_cols
#########################################################################################
# MNGR HCC
#########################################################################################
#########################################################################################
# MNGR NUM
#########################################################################################
def process_manager_num(train_df, test_df):
mngr_num_col = 'manager_num'
df = train_df.groupby(MANAGER_ID)[MANAGER_ID].count()
# df[df<=1]=-1
df = df.apply(float)
df = df.to_frame(mngr_num_col)
train_df = pd.merge(train_df, df, left_on=MANAGER_ID, right_index=True)
test_df = pd.merge(test_df, df, left_on=MANAGER_ID, right_index=True, how='left')
return train_df, test_df, [mngr_num_col]
#########################################################################################
# MNGR NUM
#########################################################################################
#########################################################################################
# BID HCC
#########################################################################################
def process_bid_categ_preprocessing(train_df, test_df):
col = BUILDING_ID
new_cols = []
for df in [train_df, test_df]:
df['target_high'] = df[TARGET].apply(lambda s: 1 if s == 'high' else 0)
df['target_medium'] = df[TARGET].apply(lambda s: 1 if s == 'medium' else 0)
for binary_col in ['target_high', 'target_medium']:
train_df, test_df, new_col = hcc_encode(train_df, test_df, col, binary_col)
new_cols.append(new_col)
return train_df, test_df, new_cols
#########################################################################################
# BID HCC
#########################################################################################
#########################################################################################
# BID NUM
#########################################################################################
def process_bid_num(train_df, test_df):
bid_num_col = 'bid_num'
df = train_df.groupby(BUILDING_ID)[BUILDING_ID].count()
# df[df<=1]=-1
df = df.apply(float)
df = df.to_frame(bid_num_col)
train_df = pd.merge(train_df, df, left_on=BUILDING_ID, right_index=True)
test_df = pd.merge(test_df, df, left_on=BUILDING_ID, right_index=True, how='left')
return train_df, test_df, [bid_num_col]
#########################################################################################
# BID NUM
#########################################################################################
#########################################################################################
# Listing id
#########################################################################################
def process_listing_id(train_df, test_df):
return train_df, test_df, [LISTING_ID]
#########################################################################################
# Listing id
#########################################################################################
#########################################################################################
# NEI 123
#########################################################################################
def dummy_col(col_name, val):
return '{}_{}'.format(col_name, val)
def get_dummy_cols(col_name, col_values):
return ['{}_{}'.format(col_name, val) for val in col_values]
def process_nei123(train_df, test_df):
df = pd.concat([train_df, test_df])
normalize_bed_bath(df)
sz = float(len(df))
# neis_cols = [NEI_1, NEI_2, NEI_3]
new_cols = []
for col in [NEI_1, NEI_2]:
new_col = 'freq_of_{}'.format(col)
df[new_col] = df.groupby(col)[PRICE].transform('count')
df[new_col] = df[new_col] / sz
new_cols.append(new_col)
beds_vals = [0, 1, 2, 3]
for col in [NEI_1, NEI_2, NEI_3]:
for bed in beds_vals:
new_col = 'freq_of_{}, with bed={}'.format(col, bed)
df[new_col] = df.groupby([col, BED_NORMALIZED])[PRICE].transform('count')
df[new_col] = df[new_col] / sz
new_cols.append(new_col)
for col in [NEI_1, NEI_2]:
new_col = 'median_ratio_of_{}'.format(col)
df['tmp'] = df.groupby([col, BEDROOMS])[PRICE].transform('median')
df[new_col] = df[PRICE] - df['tmp']
df[new_col] = df[new_col] / df['tmp']
new_cols.append(new_col)
for col in [NEI_1, NEI_2, NEI_3]:
vals = set(df[col])
if None in vals:
vals.remove(None)
df = pd.get_dummies(df, columns=[col])
dummies = get_dummy_cols(col, vals)
new_cols += dummies
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# NEI 123
#########################################################################################
#########################################################################################
# MNGR AVG PRICE
#########################################################################################
def process_mngr_avg_median_price(train_df, test_df):
df = pd.concat([train_df, test_df])
bed_bath_median = 'bed_bath_median'
df[bed_bath_median] = df.groupby([BED_NORMALIZED, BATH_NORMALIZED])[PRICE].transform('median')
bed_bath_diff = 'bed_bath_diff'
df[bed_bath_diff]=df[PRICE]-df[bed_bath_median]
bed_bath_raio = 'bed_bath_ratio'
df[bed_bath_raio]=df[bed_bath_diff]/df['bed_bath_median']
group_by = df.groupby(MANAGER_ID)[bed_bath_diff]
df['gr_by_mngr_bed_bath_diff_median']= group_by.transform('median')
df['gr_by_mngr_bed_bath_diff_quantile_0.25']= group_by.transform('quantile', 0.25)
df['gr_by_mngr_bed_bath_diff_quantile_0.75']= group_by.transform('quantile', 0.75)
df['gr_by_mngr_bed_bath_diff_mean']= group_by.transform('mean')
group_by = df.groupby(MANAGER_ID)[bed_bath_raio]
df['gr_by_mngr_bed_bath_ratio_median']= group_by.transform('median')
df['gr_by_mngr_bed_bath_ratio_quantile_0.25']= group_by.transform('quantile', 0.25)
df['gr_by_mngr_bed_bath_ratio_quantile_0.75']= group_by.transform('quantile', 0.75)
df['gr_by_mngr_bed_bath_ratio_mean']= group_by.transform('mean')
new_cols= ['bed_bath_diff','bed_bath_ratio','bed_bath_median',
'gr_by_mngr_bed_bath_diff_median','gr_by_mngr_bed_bath_diff_mean',
'gr_by_mngr_bed_bath_diff_quantile_0.25','gr_by_mngr_bed_bath_diff_quantile_0.75',
'gr_by_mngr_bed_bath_ratio_median', 'gr_by_mngr_bed_bath_ratio_mean' ,
'gr_by_mngr_bed_bath_ratio_quantile_0.25', 'gr_by_mngr_bed_bath_ratio_quantile_0.75'
]
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# MNGR AVG PRICE
#########################################################################################
#########################################################################################
# OTHER MEDIANS
#########################################################################################
def process_other_mngr_medians(train_df, test_df):
features = ['num_features', 'num_photos', 'word_num_in_descr', BED_NORMALIZED, BATH_NORMALIZED]
df = pd.concat([train_df, test_df])
new_cols = []
for f in features:
col = 'get_by_mngr_{}_mean'.format(f)
df[col] = df.groupby(MANAGER_ID)[f].transform('mean')
new_cols.append(col)
if f in [BATH_NORMALIZED, BED_NORMALIZED]:
continue
col = 'get_by_mngr_{}_median'.format(f)
new_cols.append(col)
df[col] = df.groupby(MANAGER_ID)[f].transform('median')
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# OTHER MEDIANS
#########################################################################################
#########################################################################################
# OTHER MEDIANS nEW
#########################################################################################
def get_main_value(s):
n = int(0.66*len(s))
vals = {k:0 for k in set(s)}
for x in s:
vals[x]+=1
for k,v in vals.iteritems():
if v>=n:
return k
def process_other_mngr_medians_new(train_df, test_df):
df = pd.concat([train_df, test_df])
total_minutes_col='total_minutes'
df[total_minutes_col] = df[CREATED_MINUTE]+24*df[CREATED_HOUR]
features = [PRICE, LATITUDE, LONGITUDE, total_minutes_col]
new_cols = []
for f in features:
col = 'get_by_mngr_{}_mean'.format(f)
df[col] = df.groupby(MANAGER_ID)[f].transform('mean')
new_cols.append(col)
col = 'get_by_mngr_{}_median'.format(f)
new_cols.append(col)
df[col] = df.groupby(MANAGER_ID)[f].transform('median')
main_hour='main_hour'
bl = df.groupby(MANAGER_ID)[CREATED_HOUR].apply(get_main_value).to_frame(main_hour)
df = pd.merge(df, bl, left_on=MANAGER_ID, right_index=True)
new_cols.append(main_hour)
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# OTHER MEDIANS NEW
#########################################################################################
#########################################################################################
# FEATURES
#########################################################################################
GROUPING_MAP = OrderedDict(
[('elevator', {'vals': ['elevator'], 'type': 'in'}),
('hardwood floors', {'vals': ['hardwood'], 'type': 'in'}),
('cats allowed', {'vals': ['cats'], 'type': 'in'}),
('dogs allowed', {'vals': ['dogs'], 'type': 'in'}),
('doorman', {'vals': ['doorman', 'concierge'], 'type': 'in'}),
('dishwasher', {'vals': ['dishwasher'], 'type': 'in'}),
('laundry in building', {'vals': ['laundry'], 'type': 'in'}),
('no fee', {'vals': ['no fee', 'no broker fee', 'no realtor fee'], 'type': 'in'}),
('reduced fee', {'vals': ['reduced fee', 'reduced-fee', 'reducedfee'], 'type': 'in'}),
('fitness center', {'vals': ['fitness'], 'type': 'in'}),
('pre-war', {'vals': ['pre-war', 'prewar'], 'type': 'in'}),
('roof deck', {'vals': ['roof'], 'type': 'in'}),
('outdoor space',
{'vals': ['outdoor space', 'outdoor-space', 'outdoor areas', 'outdoor entertainment'], 'type': 'in'}),
('common outdoor space',
{'vals': ['common outdoor', 'publicoutdoor', 'public-outdoor', 'common-outdoor'], 'type': 'in'}),
('private outdoor space', {'vals': ['private outdoor', 'private-outdoor', 'privateoutdoor'], 'type': 'in'}),
('dining room', {'vals': ['dining'], 'type': 'in'}),
('high speed internet', {'vals': ['internet'], 'type': 'in'}),
('balcony', {'vals': ['balcony'], 'type': 'in'}),
('swimming pool', {'vals': ['swimming', 'pool'], 'type': 'in'}),
('new construction', {'vals': ['new construction'], 'type': 'in'}),
('terrace', {'vals': ['terrace'], 'type': 'in'}),
('exclusive', {'vals': ['exclusive'], 'type': 'equal'}),
('loft', {'vals': ['loft'], 'type': 'in'}),
('garden/patio', {'vals': ['garden'], 'type': 'in'}),
('wheelchair access', {'vals': ['wheelchair'], 'type': 'in'}),
('fireplace', {'vals': ['fireplace'], 'type': 'in'}),
('simplex', {'vals': ['simplex'], 'type': 'in'}),
('lowrise', {'vals': ['lowrise', 'low-rise'], 'type': 'in'}),
('garage', {'vals': ['garage'], 'type': 'in'}),
('furnished', {'vals': ['furnished'], 'type': 'equal'}),
('multi-level', {'vals': ['multi-level', 'multi level', 'multilevel'], 'type': 'in'}),
('high ceilings', {'vals': ['high ceilings', 'highceilings', 'high-ceilings'], 'type': 'in'}),
('parking space', {'vals': ['parking'], 'type': 'in'}),
('live in super', {'vals': ['super'], 'vals2': ['live', 'site'], 'type': 'two'}),
('renovated', {'vals': ['renovated'], 'type': 'in'}),
('green building', {'vals': ['green building'], 'type': 'in'}),
('storage', {'vals': ['storage'], 'type': 'in'}),
('washer', {'vals': ['washer'], 'type': 'in'}),
('stainless steel appliances', {'vals': ['stainless'], 'type': 'in'})])
def normalize_df(df):
df[COL] = df[F_COL].apply(lambda l: [x.lower() for x in l])
def lambda_in(in_arr):
def is_in(l):
for f in l:
for t in in_arr:
if t in f:
return 1
return 0
return is_in
def lambda_equal(val):
def is_equal(l):
for f in l:
if f.strip() == val:
return 1
return 0
return is_equal
def lambda_two_arr(arr1, arr2):
def is_in(l):
for f in l:
for x in arr1:
for y in arr2:
if x in f and y in f:
return 1
return 0
return is_in
def process_features(df):
normalize_df(df)
new_cols = []
for col, m in GROUPING_MAP.iteritems():
new_cols.append(col)
tp = m['type']
if tp == 'in':
df[col] = df[COL].apply(lambda_in(m['vals']))
elif tp == 'equal':
df[col] = df[COL].apply(lambda_equal(m['vals'][0]))
elif tp == 'two':
df[col] = df[COL].apply(lambda_two_arr(m['vals'], m['vals2']))
else:
raise Exception()
return df, new_cols
#########################################################################################
# FEATURES
#########################################################################################
####################################################
#MAGIC
#######################################################
def process_magic(train_df, test_df):
image_date = pd.read_csv(magic_file)
image_date.loc[80240,"time_stamp"] = 1478129766
# image_date.loc[image_date['Listing_Id']==7119094, "time_stamp"] = 1478129766
image_date["img_date"] = pd.to_datetime(image_date["time_stamp"], unit="s")
image_date["img_days_passed"] = (image_date["img_date"].max() - image_date["img_date"]).astype(
"timedelta64[D]").astype(int)
image_date["img_date_month"] = image_date["img_date"].dt.month
image_date["img_date_week"] = image_date["img_date"].dt.week
image_date["img_date_day"] = image_date["img_date"].dt.day
image_date["img_date_dayofweek"] = image_date["img_date"].dt.dayofweek
image_date["img_date_dayofyear"] = image_date["img_date"].dt.dayofyear
image_date["img_date_hour"] = image_date["img_date"].dt.hour
image_date["img_date_minute"] = image_date["img_date"].dt.minute
image_date["img_date_second"] = image_date["img_date"].dt.second
image_date["img_date_monthBeginMidEnd"] = image_date["img_date_day"].apply(
lambda x: 1 if x < 10 else 2 if x < 20 else 3)
df = pd.concat([train_df, test_df])
df = pd.merge(df, image_date, left_on=LISTING_ID, right_on='Listing_Id')
new_cols = ["img_days_passed","img_date_month","img_date_week",
"img_date_day","img_date_dayofweek","img_date_dayofyear",
"img_date_hour", "img_date_monthBeginMidEnd",
"img_date_minute", "img_date_second"]#+["img_date", "time_stamp"]
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
####################################################
#MAGIC
#######################################################
def shuffle_df(df):
return df.iloc[np.random.permutation(len(df))]
def get_loss_at1K(estimator):
results_on_test = estimator.evals_result()['validation_1']['mlogloss']
return results_on_test[999]
def loss_with_per_tree_stats(train_df, test_df, new_cols):
features, test_df, train_df = process_split(train_df, test_df, new_cols)
train_target, test_target = train_df[TARGET].values, test_df[TARGET].values
del train_df[TARGET]
del test_df[TARGET]
train_df = train_df[features]
test_df = test_df[features]
train_arr, test_arr = train_df.values, test_df.values
print features
seed = int(time())
print 'XGB seed {}'.format(seed)
estimator = xgb.XGBClassifier(n_estimators=1000,
objective='mlogloss',
subsample=0.8,
colsample_bytree=0.8,
seed=seed)
eval_set = [(train_arr, train_target), (test_arr, test_target)]
estimator.fit(train_arr, train_target, eval_set=eval_set, eval_metric='mlogloss', verbose=False)
proba = estimator.predict_proba(test_arr)
loss = log_loss(test_target, proba)
loss1K = get_loss_at1K(estimator)
return loss, loss1K, xgboost_per_tree_results(estimator), \
estimator.feature_importances_, get_probs_from_est(estimator, proba, test_df), features
def process_split(train_df, test_df, new_cols):
features = []
features += new_cols
train_df, test_df, new_cols = process_mngr_categ_preprocessing(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_bid_categ_preprocessing(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
return features, test_df, train_df
def process_all_name(train_df, test_df):
features = ['bathrooms', 'bedrooms', 'latitude',
'longitude', 'price',
'num_features', 'num_photos', 'word_num_in_descr',
"created_month", "created_day",
CREATED_HOUR, CREATED_MINUTE, DAY_OF_WEEK]
train_df, test_df, new_cols = process_manager_num(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_bid_num(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_listing_id(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_nei123(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, new_cols = process_features(train_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features+=new_cols
# train_df, test_df, new_cols = process_mngr_avg_median_price(train_df, test_df)
# train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
# features += new_cols
#
#
# train_df, test_df, new_cols = process_other_mngr_medians(train_df, test_df)
# train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
# features += new_cols
#
#
# train_df, test_df, new_cols = process_other_mngr_medians_new(train_df, test_df)
# train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
# features += new_cols
train_df, test_df, new_cols = process_magic(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
return train_df, test_df, features
def xgboost_per_tree_results(estimator):
results_on_test = estimator.evals_result()['validation_1']['mlogloss']
results_on_train = estimator.evals_result()['validation_0']['mlogloss']
return {
'train': results_on_train,
'test': results_on_test
}
def do_test_xgboost(name, mongo_host, experiment_max_time=15*60):
all_losses = []
l_results_per_tree = []
losses_at_1K = []
train_df = load_train()
test_df = load_test()
train_df, test_df, features = process_all_name(train_df, test_df)
fix_index(train_df)
fix_index(test_df)
ii_importance = []
for counter in range(15):
cur_time = time()
N = getN(mongo_host, name, experiment_max_time)
train, test = split_from_N(train_df.copy(), N)
loss, loss1K, losses_per_tree, importance, probs_data, f_names = \
loss_with_per_tree_stats(train, test, features)
probs, test_indexes = probs_data
ii_importance.append(importance.tolist())
cur_time = time() - cur_time
all_losses.append(loss)
losses_at_1K.append(loss1K)
l_results_per_tree.append(losses_per_tree)
out(all_losses, loss, losses_at_1K, loss1K, counter, cur_time)
write_results(N, name, mongo_host, probs,test_indexes, l_results_per_tree, ii_importance, f_names)
do_test_xgboost('stacking_no_mngr_medians', sys.argv[1])
| [
"[email protected]"
] | |
9d5a6ba6446140a91ac8195ae11cdf52026435c4 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/tutorial/control/quests/__init__.py | e0557fa51737e0fce1a443bfd0b2043093d7aa71 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,036 | py | # 2017.08.29 21:51:51 Střední Evropa (letní čas)
# Embedded file name: scripts/client/tutorial/control/quests/__init__.py
from tutorial.control.lobby.context import LobbyBonusesRequester
from tutorial.control.quests import queries
from tutorial.data.effects import EFFECT_TYPE
from tutorial.control import ControlsFactory
from tutorial.control import context as core_ctx
from tutorial.control import functional as core_func
from tutorial.control.chains import functional as chains_func
from tutorial.control.lobby import functional as lobby_func
from tutorial.control.quests import functional as quests_func
class QuestsControlsFactory(ControlsFactory):
def __init__(self):
effects = {EFFECT_TYPE.ACTIVATE: core_func.FunctionalActivateEffect,
EFFECT_TYPE.DEACTIVATE: core_func.FunctionalDeactivateEffect,
EFFECT_TYPE.GLOBAL_ACTIVATE: core_func.FunctionalGlobalActivateEffect,
EFFECT_TYPE.GLOBAL_DEACTIVATE: core_func.FunctionalGlobalDeactivateEffect,
EFFECT_TYPE.SET_GUI_ITEM_CRITERIA: core_func.FunctionalSetGuiItemCriteria,
EFFECT_TYPE.SET_ACTION: core_func.FunctionalSetAction,
EFFECT_TYPE.REMOVE_ACTION: core_func.FunctionalRemoveAction,
EFFECT_TYPE.REFUSE_TRAINING: core_func.FunctionalRefuseTrainingEffect,
EFFECT_TYPE.REQUEST_BONUS: core_func.FunctionalRequestBonusEffect,
EFFECT_TYPE.NEXT_CHAPTER: core_func.FunctionalNextChapterEffect,
EFFECT_TYPE.CLEAR_SCENE: core_func.FunctionalClearScene,
EFFECT_TYPE.GO_SCENE: core_func.GoToSceneEffect,
EFFECT_TYPE.SHOW_HINT: chains_func.FunctionalShowHint,
EFFECT_TYPE.CLOSE_HINT: chains_func.FunctionalCloseHint,
EFFECT_TYPE.SHOW_WINDOW: quests_func.ShowSharedWindowEffect,
EFFECT_TYPE.SELECT_VEHICLE_IN_HANGAR: quests_func.SelectVehicleInHangar,
EFFECT_TYPE.SAVE_TUTORIAL_SETTING: quests_func.SaveTutorialSettingEffect,
EFFECT_TYPE.SAVE_ACCOUNT_SETTING: quests_func.SaveAccountSettingEffect,
EFFECT_TYPE.RUN_TRIGGER: quests_func.QuestsFunctionalRunTriggerEffect,
EFFECT_TYPE.SHOW_UNLOCKED_CHAPTER: chains_func.FunctionalShowUnlockedChapter,
EFFECT_TYPE.SHOW_AWARD_WINDOW: chains_func.FunctionalShowAwardWindow,
EFFECT_TYPE.ENTER_QUEUE: chains_func.FunctionalSwitchToRandom}
_queries = {'awardWindow': queries.AwardWindowContentQuery}
ControlsFactory.__init__(self, effects, _queries)
def createBonuses(self, completed):
return LobbyBonusesRequester(completed)
def createSoundPlayer(self):
return core_ctx.NoSound()
def createFuncScene(self, sceneModel):
return core_func.FunctionalScene(sceneModel)
def createFuncInfo(self):
return lobby_func.FunctionalLobbyChapterInfo()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\tutorial\control\quests\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:51:51 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
cd009ea532016e6d794b44635f9cf787d176f987 | d374478ba42d027e730e2b9d378b0a08de9c23b5 | /4. Building your Deep Neural Network/linear_backward.py | a7d5789fc96744d2b4c971623e11c58a98dfa9a2 | [] | no_license | kuangzijian/Neural-Networks-and-Deep-Learning | 8ffe46e7b99611c033f54d553a897313b36ea22b | 781d62679497e9dfa6e6556d2b49a6366c6f945f | refs/heads/master | 2023-08-08T07:32:13.280785 | 2021-05-05T16:44:49 | 2021-05-05T16:44:49 | 217,354,065 | 0 | 0 | null | 2023-07-22T19:42:15 | 2019-10-24T17:20:55 | Python | UTF-8 | Python | false | false | 990 | py | # GRADED FUNCTION: linear_backward
import numpy as np
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1 / m) * dZ.dot(A_prev.T)
db = np.sum(dZ, axis=1, keepdims=True) / m
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db | [
"[email protected]"
] | |
8aba2942340cc5f1e675229a80ce52ff0a0f4244 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/M/martharotter/wikipediavisualiser.py | 8077e845b760b04b80b2c4f75a79ad8f643a9261 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import scraperwiki
scraperwiki.sqlite.attach("wikipedia_paper_scraper_until_jan_20")
data = scraperwiki.sqlite.select(
'''* from wikipedia_paper_scraper_until_jan_20.swdata
order by id desc limit 10'''
)
print "<table>"
print "<tr><th>ID</th><th>Tweet</th><th>User</th>"
for d in data:
print "<tr>"
print "<td>", d["id"], "</td>"
print "<td>", d["text"], "</td>"
print "<td>", d["from_user"], "</td>"
print "</tr>"
print "</table>"
import scraperwiki
scraperwiki.sqlite.attach("wikipedia_paper_scraper_until_jan_20")
data = scraperwiki.sqlite.select(
'''* from wikipedia_paper_scraper_until_jan_20.swdata
order by id desc limit 10'''
)
print "<table>"
print "<tr><th>ID</th><th>Tweet</th><th>User</th>"
for d in data:
print "<tr>"
print "<td>", d["id"], "</td>"
print "<td>", d["text"], "</td>"
print "<td>", d["from_user"], "</td>"
print "</tr>"
print "</table>"
| [
"[email protected]"
] | |
24993c1168307724a1daf85bbc352dab641caf90 | aff612112744272579ec081b189d087e636d923a | /website/app.py | 789fc0ef8f5bd290937b4740b7d49ae7270b8eda | [] | no_license | pythonguru101/CrpytoCurrentMarket | a76bfe9ab647058fcedfd63bbe41f8f421ac385e | b207f54e15883db277b31f60bc8bbca42c9f61b5 | refs/heads/master | 2022-12-16T00:50:02.828610 | 2020-02-27T04:04:38 | 2020-02-27T04:04:38 | 236,615,768 | 0 | 1 | null | 2022-12-08T03:35:04 | 2020-01-27T23:10:02 | Python | UTF-8 | Python | false | false | 36,685 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import os
import commands
# import subprocess
import base64
import csv
import re
import json
import libdb_mysql_web
import decimal
from operator import itemgetter
from flask import Flask, render_template, request, jsonify, redirect, url_for, send_file
from datetime import datetime
app = Flask(__name__)
db = libdb_mysql_web.libdb_mysql()
##########################################################################
### MAIN WEB PAGE FUNCTIONS
def chart(coin_name):
price_list = []
item = []
condition = "ticker='"+coin_name+"'"
capitalizations = db.generic_select([condition], "capitalization")
for i in capitalizations:
item.append(i['secs'])
item.append(i['last'])
price_list.append(item)
item = []
return price_list
@app.route('/coinmarketcap.html', methods=['GET'])
def get_cmc():
price_in_usd = 0
btc = 0
eth = 0
price_in_btc = 0
price_in_eth = 0
market_cap = 0
volume24 = 0
secs_list = []
selected_coin_prefix = 'CM-'
coin_sel = 'CM-BTC'
capitalizations = db.generic_select(["last_flag=1"], "capitalization")
for item in capitalizations:
secs_list.append(item['secs'])
latest_secs = max(secs_list)
for item in capitalizations:
if item['secs'] == latest_secs and "CM-" in item['ticker']:
selected_coin_prefix = 'CM-'
if item['secs'] == latest_secs and "MK-" in item['ticker']:
selected_coin_prefix = 'MK-'
if request.args.get('coin'):
coin_sel = selected_coin_prefix + str(request.args.get('coin'))
price = chart(coin_sel)
for item in capitalizations:
if item['secs'] == latest_secs and item['ticker'] == coin_sel:
market_cap = item['market_cap']
market_cap = format(market_cap, '.1f')
volume24 = item['volume']
if item['secs'] == latest_secs and item['ticker'] == selected_coin_prefix + "BTC":
btc = item['last']
if item['secs'] == latest_secs and item['ticker'] == selected_coin_prefix + "ETH":
eth = item['last']
day_range = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 24 * 60 * 60 \
and item['ticker'] == coin_sel:
day_range.append(item['last'])
for item in capitalizations:
if item['secs'] == latest_secs and item['ticker'] == coin_sel:
price_in_usd = item['last']
if btc != 0:
price_in_btc = '%.12f' % (price_in_usd / btc)
if eth != 0:
price_in_eth = '%.12f' % (price_in_usd / eth)
range_1d = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 24 * 60 * 60 \
and item['ticker'] == coin_sel:
range_1d.append(item['last'])
range_7d = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 7 * 24 * 60 * 60 \
and item['ticker'] == coin_sel:
range_7d.append(item['last'])
range_52w = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 52 * 7 * 24 * 60 * 60 \
and item['ticker'] == coin_sel:
range_52w.append(item['last'])
vol_list_52w = []
average_vol_52w = 0
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 52 * 7 * 24 * 60 * 60 \
and item['ticker'] == coin_sel:
vol_list_52w.append(item['volume'])
if len(vol_list_52w) != 0:
average_vol_52w = sum(vol_list_52w) / len(vol_list_52w)
whole_range = []
for item in capitalizations:
if item['ticker'] == coin_sel:
whole_range.append(item['last'])
secs_7d_last = 0
basic_secs_7d = max(secs_list) - 7 * 24 * 60 * 60
secs_7d = min(secs_list, key=lambda x: abs(x - basic_secs_7d))
for item in capitalizations:
if item['secs'] == secs_7d and item['ticker'] == coin_sel:
secs_7d_last = item['last']
secs_1m_last = 0
basic_secs_1m = max(secs_list) - 30 * 24 * 60 * 60
secs_1m = min(secs_list, key=lambda x: abs(x - basic_secs_1m))
for item in capitalizations:
if item['secs'] == secs_1m and item['ticker'] == coin_sel:
secs_1m_last = item['last']
secs_6m_last = 0
basic_secs_6m = max(secs_list) - 6 * 30 * 24 * 60 * 60
secs_6m = min(secs_list, key=lambda x: abs(x - basic_secs_6m))
for item in capitalizations:
if item['secs'] == secs_6m and item['ticker'] == coin_sel:
secs_6m_last = item['last']
ticker_list = []
for item in capitalizations:
if item['secs'] == latest_secs:
item['market_cap'] = format(item['market_cap'], '.1f')
item['ticker'] = item['ticker'][3:]
ticker_list.append(item)
alldata = {'market_cap': market_cap,
'day_range_max': max(day_range),
'day_range_min': min(day_range),
'volume24': volume24,
'circulating_supply': '-',
'price_in_usd': price_in_usd,
'price_in_btc': price_in_btc,
'price_in_eth': price_in_eth,
'range_max_1d': max(range_1d),
'range_min_1d': min(range_1d),
'range_max_7d': max(range_7d),
'range_min_7d': min(range_7d),
'range_max_52w': max(range_52w),
'range_min_52w': min(range_52w),
'average_vol_52w': average_vol_52w,
'all_time_high': max(whole_range),
'percent_from_ath': max(whole_range) - price_in_usd,
'cap_in_btc': float(market_cap) / float(btc),
'pro_7d': (secs_7d_last - price_in_usd) / price_in_usd * 100,
'pro_1m': (secs_1m_last - price_in_usd) / price_in_usd * 100,
'pro_6m': (secs_6m_last - price_in_usd) / price_in_usd * 100,
'ticker_list': ticker_list,
'coin_sel': coin_sel[3:],
'updated': datetime.fromtimestamp(latest_secs).strftime("%d/%m/%Y %H:%M:%S"),
'price_list': price
}
return render_template("coinmarketcap.html", data=alldata)
@app.route('/download.html', methods=['POST'])
def download():
values = {}
ticker_list = []
csv_list = []
option = None
controller = request.form.get('controller')
tmpsecs = time.strptime(request.form.get('dini') + " 00:00:00", "%Y-%m-%d %H:%M:%S")
ini_secs = int(time.mktime(tmpsecs))
tmpsecs = time.strptime(request.form.get('dend') + " 23:59:59", "%Y-%m-%d %H:%M:%S")
end_secs = int(time.mktime(tmpsecs))
if request.form.get('option').lower() == "prices":
option = "last"
if request.form.get('option').lower() == "dollars":
option = "dollars"
if request.form.get('option').lower() == "volume":
option = "volume"
# -- get involved tickers --
values['time'] = []
params = [
"controller='" + controller + "'",
"active=1",
"ORDER BY localticker"
]
tickers = db.generic_select(params, "tickers")
ticker_list.append('time')
for i in tickers:
ticker_list.append(i['remoteticker'])
for i in tickers:
data_table = "prices"
if controller == "COINMARKETCAP":
data_table = "capitalization"
params = [
"SELECT secs, " + option,
"ticker='" + i['localticker'] + "'",
"secs>=" + str(ini_secs),
"secs<=" + str(end_secs),
"ORDER BY secs"
]
data = db.generic_select(params, data_table)
if len(data) > 0:
if values['time'] == []:
for n in data:
values['time'].append(n['secs'])
values[i['remoteticker']] = []
for n in data:
if option == "last":
values[i['remoteticker']].append(n['last'])
if option == "dollars":
values[i['remoteticker']].append(n['dollars'])
if option == "volume":
values[i['remoteticker']].append(n['dayvol'])
# -- fill data for CSV --
for i in range(0, len(values['time'])):
tmp = {}
for n in values.keys():
if n == "time":
tmpdate = time.localtime(values[n][i])
tmp[n] = time.strftime("%Y-%m-%d %H:%M:%S", tmpdate)
for n in values.keys():
if n <> "time":
try:
tmp[n] = ('%16.8f' % values[n][i]).strip()
except:
tmp[n] = ('%16.8f' % values[n][-1]).strip()
csv_list.append(tmp)
# -- write to CSV file on /tmp --
if option == "last":
option = "prices"
dini = re.sub("-", "", request.form.get('dini'))
dend = re.sub("-", "", request.form.get('dend'))
csv_file = controller + "_" + dini + "_" + dend + "_" + option + ".csv"
fp = open("/tmp/" + csv_file, 'wb')
writer = csv.DictWriter(fp, fieldnames=ticker_list, extrasaction='ignore', delimiter=',',
quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for i in csv_list:
writer.writerow(i)
fp.close()
return send_file("/tmp/" + csv_file, mimetype="text/csv", attachment_filename=csv_file,
as_attachment=True)
@app.route('/csv-download.html', methods=['GET'])
def csv_download():
controllers = []
# -- get data --
controllers = db.generic_select([], "controllers")
today = time.strftime("%Y-%m-%d", time.localtime())
alldata = {'controllers': controllers, 'date': today}
return render_template("csv-download.html", data=alldata)
@app.route('/save-bot-config.html', methods=['POST'])
def savebotconfig():
bot_config = {
'volume': 0.00,
}
# -- process arguments --
if request.method == 'POST':
volume = request.form['volume']
if volume != "":
bot_config['volume'] = float(volume)
params1 = bot_config
params2 = ["id=1"]
db.generic_update(params1, params2, "bot_config")
# -- reinitialize allstream --
db.path = os.getcwd() + "/"
db.stopper = db.path + "../sd-allstream.py"
print("Stopping allstream...")
os.system(db.stopper + " &")
time.sleep(1)
return redirect(url_for('configbot'))
@app.route('/config-bot.html', methods=['GET'])
def configbot():
alldata = {}
tmp = db.generic_select([], "bot_config")
bot_config = tmp[0]
bot_config['vol10'] = ""
if bot_config['volume'] == 10:
bot_config['vol10'] = " checked"
bot_config['vol25'] = ""
if bot_config['volume'] == 25:
bot_config['vol25'] = " checked"
bot_config['vol50'] = ""
if bot_config['volume'] == 50:
bot_config['vol50'] = " checked"
bot_config['vol75'] = ""
if bot_config['volume'] == 75:
bot_config['vol75'] = " checked"
bot_config['vol100'] = ""
if bot_config['volume'] == 100:
bot_config['vol100'] = " checked"
alldata = {'bot_config': bot_config}
return render_template("config-bot.html", data=alldata)
@app.route('/csv-operations.html', methods=['GET'])
def csv_operations():
args_filter = 1
argums = {
'start_date': "",
'end_date': "",
'ticker': "",
'op_type': "",
'status_type': "",
}
# -- get arguments --
argums['start_date'] = request.args.get('start_date')
argums['end_date'] = request.args.get('end_date')
argums['ticker'] = request.args.get('ticker')
if argums['ticker'] is not None:
argums['ticker'] = "BI-" + request.args.get('ticker')
argums['op_type'] = request.args.get('op_type')
argums['status_type'] = request.args.get('status_type')
if argums['start_date'] is None \
and argums['end_date'] is None \
and argums['ticker'] is None \
and argums['op_type'] is None \
and argums['status_type'] is None:
args_filter = 0
# -- get data --
params = []
params.append("ORDER BY timestamp DESC")
operations = db.generic_select(params, "operations")
if len(operations) != []:
for i in range(0, len(operations)):
operations[i]['price1'] = ('%16.8f' % operations[i]['price1']).strip()
operations[i]['price2'] = ('%16.8f' % operations[i]['price2']).strip()
# -- make csv file --
# return render_template("csv-operations.html", data=alldata)
csv_data = []
csv_file = "operations.csv"
titles = ['Time', 'Ticker', 'Operation', 'Price', 'Status']
status = ['Success', 'Failed', 'No Funds']
for i in operations:
txt_status = status[i['end_status']]
tmp = {
'Time': str(i['pdate']) + " " + str(i['ptime']),
'Ticker': i['ticker'],
'Operation': i['operation'],
'Price': i['price'],
'Status': txt_status
}
csv_data.append(tmp)
fp = open(csv_file, 'wb')
writer = csv.DictWriter(fp, fieldnames=titles, extrasaction='ignore', delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for i in csv_data:
writer.writerow(i)
fp.close()
return send_file(csv_file, mimetype="text/csv", attachment_filename=csv_file, as_attachment=True)
@app.route('/operation-review.html', methods=['GET'])
def op_review():
op_id = request.args.get('id')
params = [
"op_id=" + str(op_id),
"ORDER BY status"
]
alldata = db.generic_select(params, "op_tracking")
return render_template("operation-review.html", data=alldata)
@app.route('/operations.html', methods=['GET'])
def operations():
alldata = []
args_filter = 1
argums = {
'start_date': "",
'end_date': "",
'ticker': "",
'op_type': "",
'status_type': "",
}
# -- get arguments --
tmp = request.args.get('page')
curr_page = 1
if tmp != "" \
and tmp is not None:
curr_page = int(tmp)
if curr_page < 1:
curr_page = 1
argums['start_date'] = request.args.get('start_date')
argums['end_date'] = request.args.get('end_date')
argums['ticker'] = request.args.get('ticker')
if argums['ticker'] is not None:
argums['ticker'] = "BI-" + request.args.get('ticker')
argums['op_type'] = request.args.get('op_type')
argums['status_type'] = request.args.get('status_type')
if argums['start_date'] is None \
and argums['end_date'] is None \
and argums['ticker'] is None \
and argums['op_type'] is None \
and argums['status_type'] is None:
args_filter = 0
# -- get all filter data --
params = ["SELECT DISTINCT(ticker)"]
tickers = db.generic_select(params, "operations")
show_tickers = [{'ticker': 'All', 'selected': ""}]
for i in tickers:
if argums['ticker'] == i['ticker']:
tmp = {'ticker': i['ticker'][3:], 'selected': " selected"}
else:
tmp = {'ticker': i['ticker'][3:], 'selected': ""}
show_tickers.append(tmp)
op_types = []
for i in ['All', 'Buy', 'Sell']:
if argums['op_type'] == i:
tmp = {'op_type': i, 'selected': " selected"}
else:
tmp = {'op_type': i, 'selected': ""}
op_types.append(tmp)
status_types = []
for i in ['All', 'Success', 'Failed']:
if argums['status_type'] == i:
tmp = {'status_type': i, 'selected': " selected"}
else:
tmp = {'status_type': i, 'selected': ""}
status_types.append(tmp)
# -- make filter query --
params = []
if argums['start_date'] is not None \
and argums['start_date'] != "":
start_date = time.strftime("%Y-%m-%d", time.strptime(argums['start_date'], "%d-%b-%Y"))
params.append("pdate>='" + start_date + "'")
if argums['end_date'] is not None \
and argums['end_date'] != "":
end_date = time.strftime("%Y-%m-%d", time.strptime(argums['end_date'], "%d-%b-%Y"))
params.append("pdate<='" + end_date + "'")
if argums['ticker'] is not None \
and argums['ticker'] != "BI-":
params.append("ticker='" + argums['ticker'] + "'")
if argums['op_type'] is not None:
if argums['op_type'] == "Buy":
params.append("operation='B'")
if argums['op_type'] == "Sell":
params.append("operation='S'")
if argums['status_type'] is not None:
if argums['status_type'] == "Success":
params.append("end_status=0")
if argums['status_type'] == "Failed":
params.append("end_status=1")
if argums['status_type'] == "No Funds":
params.append("end_status=2")
params.append("ORDER BY timestamp DESC")
operations = db.generic_select(params, "operations")
# -- correct date arguments --
if argums['start_date'] is None:
argums['start_date'] = ""
if argums['end_date'] is None:
argums['end_date'] = ""
# -- compose operations for page --
if len(operations):
for i in range(0, len(operations)):
operations[i]['price'] = ('%16.8f' % operations[i]['price']).strip()
operations[i]['status'] = "Success"
if operations[i]['end_status'] == 1:
operations[i]['status'] = "Failed"
if operations[i]['end_status'] == 2:
operations[i]['status'] = "No Funds"
# -- compose paginator --
pages_url = "/operations.html"
if request.query_string != "":
end = request.query_string.find("&page=")
pages_url += "?" + request.query_string[0:end]
print("PAGES_URL:")
print(pages_url)
page_list = db.paginator(operations, curr_page, 25, pages_url)
alldata = {'operations': page_list['rows'], 'pages': page_list['pages'], 'argums': argums, 'tickers': show_tickers,
'op_types': op_types, 'status_types': status_types, 'args_filter': args_filter}
return render_template("operations.html", data=alldata)
@app.route('/stop.html')
def stop_bot():
# if not session.get('logged_in'):
# return redirect(url_for('login'))
comando = "ps auwx | grep -i 'python' | grep -i 'botcommand' | grep -v 'grep' | awk {'print $2'}"
result = commands.getoutput(comando).split("\n")
# result = subprocess.getoutput(comando).split("\n")
for i in result:
if len(i) > 1:
comando = "kill -9 " + str(i)
os.system(comando)
return redirect(url_for('bot_control'))
@app.route('/run.html')
def run_bot():
# if not session.get('logged_in'):
# return redirect(url_for('login'))
# -- start bot --
comando = "cd ../ && " + db.path + "botcommand.py &"
print("COMANDO:")
print(comando)
os.system(comando)
return redirect(url_for('bot_control'))
@app.route('/bot-control.html')
def bot_control():
log_text = ""
running_state = 0
stopping_state = 0
log_file = db.path + "bot_log.log"
# -- get current state --
comando = "ps auwx | grep -i 'python' | grep -i 'botcommand' | grep -v 'vi ' | grep -v 'grep'"
lines = commands.getoutput(comando).split("\n")
# lines = subprocess.getoutput(comando).split("\n")
if len(lines) > 0:
if len(lines[0]) > 1:
running_state = 1
if os.path.isfile(db.path + "stop_bot.ctl"):
stopping_state = 1
print("RUNNING:")
print(running_state)
print("STOPPING:")
print(stopping_state)
print("---------------------------")
# -- if bot not running prepare or create log file for reading --
if running_state == 0:
if not os.path.isfile(log_file):
fp = open(log_file, "w")
fp.write("")
fp.close()
# -- if bot is running, get log file and check stopping --
# if running_state == 1:
fp = open(log_file, "r")
log_text = fp.read().split("\n")
fp.close()
alldata = {'running_state': running_state, 'log_text': log_text, 'path': db.path, 'stopping_state': stopping_state}
return render_template("bot-control.html", data=alldata)
@app.route('/save-ticker.html', methods=['GET'])
def save_ticker():
active = 0
active_selected = request.args.getlist('active')
check_selected = bool(active_selected)
if check_selected == True:
active = 1
# -- update data --
params1 = {
'active': active,
'name': request.args.get('name'),
'localticker': request.args.get('localticker'),
'remoteticker': request.args.get('remoteticker'),
'controller': request.args.get('controller')
}
params2 = ["id=" + str(request.args.get('id'))]
db.generic_update(params1, params2, "tickers_tmp")
return redirect(url_for('setup'))
@app.route('/edit-ticker.html', methods=['GET'])
def edit_ticker():
ticker_id = request.args.get('ticker')
controllers = []
# -- get arguments --
controller = request.args.get('controller')
if controller is None:
controller = ""
# -- get data --
params = ["id=" + str(ticker_id)]
ticker = db.generic_select(params, "tickers_tmp")[0]
if ticker['active'] == 1:
ticker['active'] = " checked"
tmp = db.generic_select([], "controllers")
# -- add default for controller list --
for i in tmp:
if i['controller'] == ticker['controller']:
controllers.append({'controller': i['controller'], 'selected': " selected"})
else:
controllers.append({'controller': i['controller'], 'selected': ""})
alldata = {'controllers': controllers, 'ticker': ticker}
return render_template("edit-ticker.html", data=alldata)
@app.route('/get-controller-ids.html', methods=['GET'])
def get_controller_ids():
ticker_ids = []
controller = ""
controller = request.args.get('controller')
search = request.args.get('search')
ticker_ids = db.get_ticker_id_search(controller, search)
alldata = {'tickers': ticker_ids}
return jsonify(alldata)
@app.route('/update-controller.html', methods=['GET'])
def update_controller():
controller = str(request.args.get('controller'))
search = str(request.args.get('search'))
arg_active = str(request.args.get('active'))
referer = ""
if controller == "None":
controller = ""
if search == "None":
search = ""
db.update_controller_active(controller, search, arg_active)
return redirect(url_for('setup', controller=controller, search=search))
@app.route('/update-list.html')
def update_list():
ticker_id = ""
active = 0
ticker_id = request.args.get('ticker')
arg_active = int(request.args.get('active'))
params1 = {'active': arg_active}
params2 = ["id=" + str(ticker_id)]
db.generic_update(params1, params2, "tickers_tmp")
return redirect(url_for('setup'))
@app.route('/apply.html', methods=['GET'])
def apply_changes():
referer = None
controller = str(request.args.get('controller'))
search = str(request.args.get('search'))
if controller == "None":
controller = ""
if search == "None":
search = ""
return_function = "setup"
db.generic_delete([], "tickers")
db.fill_tickers_tmp("tickers_tmp", "tickers")
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
if referer == "config-exchanges.html":
return_function = "setup_exchanges"
if referer == "config-times.html":
return_function = "setup_times"
if referer == "config-fees.html":
return_function = "setup_fees"
if referer == "config-keys.html":
return_function = "setup_keys"
# -- reinitialize allstream --
db.path = os.getcwd() + "/../"
db.launcher = db.path + "allstream.py"
# db.histo_launcher = db.path+"allhistorical.py"
db.stopper = db.path + "sd-allstream.py"
print("Stopping allstream and allhistorical...")
os.system(db.stopper + " &")
time.sleep(1)
# print("Relaunching allstream...")
# os.system(db.launcher+" &")
# print("Relaunching allhistorical...")
# os.system(db.histo_launcher+" &")
return redirect(url_for(return_function, message='saved', controller=controller, search=search))
@app.route('/save-keys.html', methods=['GET'])
def save_keys():
referer = ""
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
controller = str(request.args.get('controller'))
key = str(request.args.get('key'))
secret = str(request.args.get('secret'))
passphrase = str(request.args.get('passphrase'))
params1 = {
'api_key': key,
'api_secret': secret,
'passphrase': passphrase
}
params2 = ["controller='" + controller + "'"]
db.generic_update(params1, params2, "controllers")
return redirect(url_for('setup_keys', message='saved'))
@app.route('/save-fees.html', methods=['GET'])
def save_fees():
referer = ""
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
controller = str(request.args.get('controller'))
fee = str(request.args.get('fee'))
params1 = {'fee': fee}
params2 = ["controller='" + controller + "'"]
db.generic_update(params1, params2, "controllers")
return redirect(url_for('setup_fees', message='saved'))
@app.route('/save-times.html', methods=['GET'])
def save_times():
referer = ""
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
controller = str(request.args.get('controller'))
timepace = str(request.args.get('timepace'))
params1 = {'timepace': timepace}
params2 = ["controller='" + controller + "'"]
db.generic_update(params1, params2, "controllers")
return redirect(url_for('setup_times', message='saved'))
@app.route('/config-keys.html')
def setup_keys():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
controllers = db.generic_select([], "controllers")
# -- conditions for modified --
if referer.find("save-keys.html") == 0:
modified = 1
# -- create data representation for page --
alldata = {'controllers': controllers, 'modified': modified, 'finished': finished}
return render_template("config-keys.html", data=alldata)
@app.route('/config-fees.html')
def setup_fees():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
controllers = db.generic_select([], "controllers")
# -- conditions for modified --
if referer.find("save-fees.html") == 0:
modified = 1
# -- create data representation for page --
alldata = {'controllers': controllers, 'modified': modified, 'finished': finished}
return render_template("config-fees.html", data=alldata)
@app.route('/config-times.html')
def setup_times():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
controllers = db.generic_select([], "controllers")
# -- conditions for modified --
if referer.find("save-times.html") == 0:
modified = 1
# -- create data representation for page --
alldata = {'controllers': controllers, 'modified': modified, 'finished': finished}
return render_template("config-times.html", data=alldata)
@app.route('/config-tickers.html', methods=['GET'])
def setup():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
controller = str(request.args.get('controller'))
search = str(request.args.get('search'))
if controller == "None":
controller = ""
if search == "None":
search = ""
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
# -- delete tickers_tmp, refill, get controllers and tickers --
if referer.find("config-tickers.html") < 0 \
and referer.find("edit-ticker.html") < 0:
db.generic_delete([], "tickers_tmp")
db.fill_tickers_tmp("tickers", "tickers_tmp")
# -- get controllers --
controllers = db.generic_select([], "controllers")
if controller == "":
controller = controllers[0]['controller']
# -- if there is a search argument go to search --
# if search != "":
# return redirect(url_for('search', controller=controller, search=search))
# -- load data --
for i in range(0, len(controllers)):
if controllers[i]['controller'] == controller:
controllers[i]['selected'] = " selected"
else:
controllers[i]['selected'] = ""
# -- get tickers --
tickers = db.get_search_tickers(-1, "tickers_tmp", controller, search)
orig_tickers = db.get_search_tickers(-1, "tickers", controller, search)
# -- conditions for modified --
if referer.find("save-ticker.html") == 0 \
or tickers != orig_tickers:
modified = 1
alldata = {'search': search, 'controllers': controllers, 'controller': controller, 'modified': modified,
'finished': finished, 'settings': {'total': 0, 'selected': 0, 'allsel': ""}, 'tickers': []}
# -- create data representation for page --
numtickers = 0
seltickers = 0
for i in range(0, len(tickers)):
tickers[i]['checked'] = ""
if tickers[i]['active'] == 1:
tickers[i]['checked'] = " checked"
seltickers += 1
numtickers += 1
alldata['tickers'] = tickers
alldata['settings']['total'] = numtickers
alldata['settings']['selected'] = seltickers
if alldata['settings']['total'] == alldata['settings']['selected']:
alldata['settings']['allsel'] = " checked"
return render_template("config-tickers.html", data=alldata)
@app.route('/config-exchanges.html')
def setup_exchanges():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
# -- delete tickers_tmp, refill, get controllers and tickers --
if referer == "index.html" or referer == "":
db.generic_delete([], "tickers_tmp")
db.fill_tickers_tmp("tickers", "tickers_tmp")
controllers = db.generic_select([], "controllers")
params = ["ORDER BY controller, remoteticker"]
tickers = db.generic_select(params, "tickers_tmp")
orig_tickers = db.generic_select(params, "tickers")
# -- conditions for modified --
if referer == "save-ticker.html" \
or tickers != orig_tickers:
modified = 1
# -- create data representation for page --
for i in range(0, len(controllers)):
active = ""
activated = "0"
for n in tickers:
if n['controller'] == controllers[i]['controller']:
if str(n['active']) == "1":
active = " checked"
activated = "1"
alldata.append(
{'controller': controllers[i]['controller'], 'active': active, 'activated': activated, 'modified': modified,
'finished': finished})
return render_template("config-exchanges.html", data=alldata)
@app.route('/view-log.html')
def view_log():
alldata = {}
# -- get arguments --
secs = str(request.args.get('secs'))
# -- get listing --
params = ["secs=" + secs]
listing = db.generic_select(params, "opportunities")
# -- date hour --
tmpdate = time.localtime(listing[0]['secs'])
log_date = time.strftime("%d-%m-%Y %H:%M:%S", tmpdate)
for i in range(0, len(listing)):
listing[i]['operation1'] = "B"
listing[i]['operation2'] = "S"
if listing[i]['op_type'] == "S-B":
listing[i]['operation1'] = "S"
listing[i]['operation2'] = "B"
listing[i]['ticker1'] = listing[i]['ticker1'][3:]
listing[i]['ticker2'] = listing[i]['ticker2'][3:]
listing[i]['price1'] = '%16.8f' % listing[i]['price1']
listing[i]['price2'] = '%16.8f' % listing[i]['price2']
listing[i]['pot_profit'] = '%16.8f' % listing[i]['pot_profit']
listing = sorted(listing, key=itemgetter("pot_profit"))
listing.reverse()
alldata = {'datehour': log_date, 'log': listing}
return render_template("view-log.html", data=alldata)
@app.route('/index.html')
@app.route('/screener.html')
@app.route('/')
def screener():
data = []
prices = []
lastsecs = int(time.time())
controller = None
# -- get controller --
controller_sel = str(request.args.get('controller'))
# -- get controllers and tickers --
controllers = db.generic_select([], "controllers")
if controller_sel == "None":
controller = controllers[0]
for i in range(0, len(controllers)):
if controllers[i]['controller'] == controller_sel:
controller = controllers[i]
controllers[i]['selected'] = " selected"
else:
controllers[i]['selected'] = ""
params = [
"controller='" + controller['controller'] + "'",
"active=1",
"ORDER BY localticker"
]
tickers = db.generic_select(params, "tickers")
params = [
"last_flag=1",
"ticker LIKE '" + controller['preintticker'] + "%'"
]
prices_values = db.generic_select(params, "prices")
# -- create data representation for page --
for i in prices_values:
for n in tickers:
if i['ticker'] == n['localticker']:
name = n['name']
# -- add to list --
secsdiff = lastsecs - i['secs']
if secsdiff < 3600:
updated = "Recently"
else:
hours = int(secsdiff / 3600)
updated = "More than " + str(hours) + " hours"
last = i['last']
volume = i['dayvol']
price = {
'name': name,
'localticker': i['ticker'],
'ticker': i['ticker'][3:],
'last': format(last, 'f'),
'volume': volume,
'updated': updated
}
prices.append(price)
data.append({'controller': controller, 'prices': prices})
# -- create last date and group all data for template --
tmpfecha = time.localtime(lastsecs)
fecha = time.strftime("%Y-%m-%d %H:%M:%S", tmpfecha)
alldata = {'last_updated': fecha, 'controllers': controllers, 'controller': controller['controller'], 'data': data}
return render_template("screener.html", data=alldata)
#############################################################################
## MAIN
# db = libdb_mysql.libdb_mysql()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| [
"[email protected]"
] | |
c564cb8a4f8fb15ca5244ece24f0664747b45e2e | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/513FindBottomLeftTreeValue.py | b9b3b6ac780a028c6dda69b1deef818d7aa4d7fd | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 1,254 | py | # coding=utf-8
'''
Created on 2017�2�15�
@author: Administrator
'''
from collections import deque
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def LevelTraverseFromRoot(root):
ans = []
if root:
from collections import deque
current_level = 0
q = deque()
q.append((root, 0))
temp = []
while q:
node, level = q.popleft()
if level > current_level:
ans.append(temp)
temp = []
current_level += 1
temp.append(node.val)
if node.left:
q.append((node.left, current_level + 1))
if node.right:
q.append((node.right, current_level + 1))
ans.append(temp)
return ans
return LevelTraverseFromRoot(root)[-1][0]
| [
"[email protected]"
] | |
e0f12a86fb901108bf9f2a170c3607667e2e6e21 | 22215eae0bf31c1021cf927155d310ba57adf0fb | /automaton/state_machine.py | 913aa3461a9dcbe5fe1166eb378e5cace0ed6ba7 | [
"MIT"
] | permissive | pombredanne/automaton-5 | 37e466f26d55e3ed34c8780e63b0b9ce1cac66dc | 3f5b6dc4521bc8ee284f732daa7883e49b3433e2 | refs/heads/master | 2020-12-31T03:42:52.956371 | 2013-12-28T01:02:37 | 2013-12-28T01:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # -*- coding: utf-8 -*-
def to_state_machine(node):
state_machine = {}
nodes = set([node])
visited_nodes = set()
accepting_states = set()
while True:
start_node = nodes.pop()
for edge in node.alphabet():
end_node = start_node.derive(edge).reduce()
if end_node.accepts_lambda:
accepting_states.add(end_node)
if end_node and end_node not in visited_nodes:
visited_nodes.add(end_node)
nodes.add(end_node)
state_machine[(start_node, edge)] = end_node
if not nodes:
break
return node, state_machine, accepting_states
| [
"[email protected]"
] | |
50e0d7ba43a2d2eddaf378c06555d32d6b5d604f | a28e1e659e4dd82be5e253443b0c7a808cdcee92 | /SortAlgorithm/QuickSort.py | ff67210431d9014a59585e9e5f3016511d39ca00 | [] | no_license | LeBron-Jian/BasicAlgorithmPractice | b2af112e8f1299fe17cf456111276fce874586cb | 51943e2c2c4ec70c7c1d5b53c9fdf0a719428d7a | refs/heads/master | 2023-06-07T19:12:16.362428 | 2023-05-27T06:58:12 | 2023-05-27T06:58:12 | 217,682,743 | 13 | 14 | null | 2020-09-12T01:50:35 | 2019-10-26T08:59:04 | Python | UTF-8 | Python | false | false | 4,569 | py | # -*- coding: utf-8 -*-
'''
快速排序
让指定的元素归位,所谓归位,就是放到他应该放的位置
左边的元素比他小,右边的元素比他大,然后对每个元素归位,就完成了排序
正常情况下,快速排序的复杂度是O(nlogn)
快速排序存在一个最坏的情况,就是每次归位,都不能把列表分成两部分,
此时的复杂度就是O(n**2)
如果避免设计成这种最坏情况,可以在取第一个数的时候不要去取第一个元素
而是取一个列表中的随机数。
'''
# 归位函数
def partition(data, left, right): # 左右分别指向两端的元素
# 把左边第一个元素赋值给tmp,此时left指向空
tmp = data[left]
# 如果左右两个指针不重合,则继续
while left < right: # 左右两个指针不重合,就继续
# 当左边的元素小于右边,而且右边的元素大于tmp则不交换
while left < right and data[right] >= tmp:
right -= 1 # 右边的指标往左走一步
# 如果right指向的元素小于tmp,就放到左边目前为空的位置
data[left] = data[right]
print('left:', li)
# 如果left指向的元素小于tmp,则不交换
while left < right and data[left] <= tmp:
left += 1 # 此时left向右移动一位
# 如果left指向的元素大于tmp,就交换到右边目前为空的位置
data[right] = data[left]
print('right:', li)
# 最后把最开始拿出来的那个值,放到左右重合的那个位置上即可
data[left] = tmp
return left # 最后返回这个位置
# 写好归位函数后,就可以递归调用这个函数,实现排序
def quick_sort(data, left, right):
if left < right:
# 找到指定元素的位置
mid = partition(data, left, right)
# 对左边的元素排序
quick_sort(data, left, mid - 1)
# 对右边的元素排序
quick_sort(data, mid + 1, right)
return data
li = [5, 7, 4, 6, 3, 1, 2, 9, 8]
print('start:', li)
quick_sort(li, 0, len(li) - 1)
print('end:', li)
'''
start: [5, 7, 4, 6, 3, 1, 2, 9, 8]
left: [2, 7, 4, 6, 3, 1, 2, 9, 8]
right: [2, 7, 4, 6, 3, 1, 7, 9, 8]
left: [2, 1, 4, 6, 3, 1, 7, 9, 8]
right: [2, 1, 4, 6, 3, 6, 7, 9, 8]
left: [2, 1, 4, 3, 3, 6, 7, 9, 8]
right: [2, 1, 4, 3, 3, 6, 7, 9, 8]
left: [1, 1, 4, 3, 5, 6, 7, 9, 8]
right: [1, 1, 4, 3, 5, 6, 7, 9, 8]
left: [1, 2, 3, 3, 5, 6, 7, 9, 8]
right: [1, 2, 3, 3, 5, 6, 7, 9, 8]
left: [1, 2, 3, 4, 5, 6, 7, 9, 8]
right: [1, 2, 3, 4, 5, 6, 7, 9, 8]
left: [1, 2, 3, 4, 5, 6, 7, 9, 8]
right: [1, 2, 3, 4, 5, 6, 7, 9, 8]
left: [1, 2, 3, 4, 5, 6, 7, 8, 8]
right: [1, 2, 3, 4, 5, 6, 7, 8, 8]
end: [1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
# *****************方法二**********************
def quick_sort1(array, left, right):
if left >= right:
return
low = left
high = right
key = array[low] # 第一个值
while low < high: # 只要左右未遇见
while low < high and array[high] > key: # 找到列表右边比key大的值为止
high -= 1
# 此时直接把key(array[low]) 根比他大的 array[high]进行交换
array[low] = array[high]
array[high] = key
# 这里要思考为什么是 <= 而不是 <
while low < high and array[low] <= key: # 找到key左边比key大的值
low += 1
# 找到了左边比k大的值,把array[high](此时应该换成了key)和这个比key大的array[low]进行调换
array[high] = array[low]
array[low] = key
# 最后用同样的方法对分出来的左边的小组进行同上的做法
quick_sort(array, left, low-1)
# 再使用同样的方法对分出来的右边的小组进行同上的做法
quick_sort(array, low+1, right)
# li = [5, 7, 4, 6, 3, 1, 2, 9, 8]
# print('start:', li)
# quick_sort1(li, 0, len(li) - 1)
# print('end:', li)
def quick_sort(data):
if len(data) >= 2: # 递归入口及出口
mid = data[len(data) // 2] # 选择基准数,也可以选取第一个或最后一个
left, right = [], [] # 定义基准值左右两侧的列表
data.remove(mid) # 从原始数组中移除基准值
for num in data:
if num >= mid:
right.append(num)
else:
left.append(num)
return quick_sort(left) + [mid] + quick_sort(right)
else:
return data
li = [3, 2, 4, 5, 6, 7, 1]
print(quick_sort(li))
# [1, 2, 3, 4, 5, 6, 7]
| [
"[email protected]"
] | |
1d59a9449e1faa81ca0cc15a68fdd61b3aed9c02 | 29ad60d0f4e4207aaf0374f811c9728b16942da2 | /Report/files/switchoff.py | 9ebd115adc375c7740070dff463253315afc67ba | [] | no_license | LCAV/AcousticRobot | c97e03bc06c59650556832794aca38cfe2d873a5 | 9f33434f64cb882897b1e0e3b8ad01642e91148a | refs/heads/master | 2021-01-10T16:46:51.989150 | 2017-10-11T08:26:58 | 2017-10-11T08:26:58 | 43,871,589 | 2 | 5 | null | 2017-03-06T16:15:09 | 2015-10-08T07:58:42 | HTML | UTF-8 | Python | false | false | 831 | py | mport RPi.GPIO as GPIO
import os
import time
#set up GPIO using BCM numbering
GPIO.setmode(GPIO.BCM)
GPIO.setup(10,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#function called on pin interrupt
def button_triggered(channel):
counter = 0
counter_on = 0
while (counter <= 6):
time.sleep(1)
counter+=1
if (GPIO.input(10)):
counter_on+=1
if (counter_on >= 3):
break
if (counter_on >= 3):
print("switchoff.py: Raspberry shutting down now")
os.system("sudo halt")
elif (counter_on < 3):
print("switchoff.py: Rapsberry is going to reboot now")
os.system("sudo reboot")
#setup pin interrupt
GPIO.add_event_detect(10,GPIO.RISING,callback=button_triggered,bouncetime=300)
#wait forever
while True:
time.sleep(0.001)
GPIO.cleanup()
| [
"[email protected]"
] | |
5823a62afd3d08698685ab28c56917e64b1a3011 | 21818228cb62d31b9685de44deb27cfd90430573 | /ccxt/async/bter.py | 9945a46c62c3d907e8cc92124daada92fbb822ac | [] | no_license | mico/cryptoArbitrage | d9d5d2f89e3fccc0b84d9c13b771edef0f2b00a1 | ea9ef03e79f302b36948746c77e4acbb3d6f01b7 | refs/heads/master | 2021-03-22T00:17:30.448593 | 2018-05-28T05:08:21 | 2018-05-28T05:08:21 | 108,232,310 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 9,931 | py | # -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class bter (Exchange):
def describe(self):
return self.deep_extend(super(bter, self).describe(), {
'id': 'bter',
'name': 'Bter',
'countries': ['VG', 'CN'], # British Virgin Islands, China
'version': '2',
'hasCORS': False,
'hasFetchTickers': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980479-cfa3188c-6387-11e7-8191-93fc4184ba5c.jpg',
'api': {
'public': 'https://data.bter.com/api',
'private': 'https://api.bter.com/api',
},
'www': 'https://bter.com',
'doc': 'https://bter.com/api2',
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarketinfo()
markets = response['pairs']
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
base, quote = id.split('_')
base = base.upper()
quote = quote.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': details['decimal_places'],
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': None,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high24hr']),
'low': float(ticker['low24hr']),
'bid': float(ticker['highestBid']),
'ask': float(ticker['lowestAsk']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': float(ticker['percentChange']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['quoteVolume']),
'quoteVolume': float(ticker['baseVolume']),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = self.parse8601(trade['date'])
return {
'id': trade['tradeID'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['rate'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
await self.load_markets()
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['orderNumber'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelOrder({'orderNumber': id})
async def withdraw(self, currency, amount, address, params={}):
await self.load_markets()
response = await self.privatePostWithdraw(self.extend({
'currency': currency.lower(),
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
if response['result'] != 'true':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| [
"[email protected]"
] | |
7f2852b0f4be4f781576595cef92b728b46f471b | c2ce7155a393e1056b5fdc4d3f9b9a89046e9285 | /scripts/pipeline.py | 3d981873f2ffb78575182feb447d1effe251678a | [
"MIT"
] | permissive | blyucs/aw_nas | 9c068dab1bd84a35e58a4c426f7c852a67b93882 | 8a32196ce342b8ad9e3885895735d1286e25beba | refs/heads/master | 2023-08-19T11:00:00.526229 | 2021-08-21T05:16:13 | 2021-08-21T05:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,413 | py | from __future__ import print_function
import os
import re
import random
import shutil
import logging
import argparse
import subprocess
import yaml
import numpy as np
DERIVE_N = 10
def _get_genotype_substr(genotypes):
return re.search(r".+?Genotype\((.+)\)", genotypes).group(1)
def _get_perf(log, type_="cnn"):
if type_ == "cnn":
out = subprocess.check_output("grep -Eo 'valid_acc [0-9.]+' {}".format(log) + \
" | tail -n 1 | awk '{print $NF}'", shell=True)
logging.info(out)
acc = float(out)
return acc
raise NotImplementedError("unknown type: {}".format(type_))
def call_search(cfg, gpu, seed, train_dir, vis_dir, save_every):
if seed is None:
seed = random.randint(1, 999999)
logging.info("train seed: %s", str(seed))
logging.info(("awnas search {cfg} --gpu {gpu} --seed {seed} --save-every {save_every} "
"--train-dir {train_dir} --vis-dir {vis_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, vis_dir=vis_dir, save_every=save_every))
subprocess.check_call(("awnas search {cfg} --gpu {gpu} --seed {seed} --save-every {save_every} "
"--train-dir {train_dir} --vis-dir {vis_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, vis_dir=vis_dir, save_every=save_every),
shell=True)
# derive
def call_derive(cfg, gpu, seed, load, out_file, n):
if seed is None:
seed = random.randint(1, 999999)
logging.info("train seed: %s", str(seed))
logging.info(("awnas derive {cfg} --load {load} --gpu {gpu} --seed {seed}"
" --test -n {n} -o {out_file}")\
.format(cfg=cfg, load=load, gpu=gpu, seed=seed,
out_file=out_file, n=n))
subprocess.check_call(("awnas derive {cfg} --load {load} --gpu {gpu} --seed {seed}"
" --test -n {n} -o {out_file}")\
.format(cfg=cfg, load=load, gpu=gpu, seed=seed,
out_file=out_file, n=n),
shell=True)
# train
def call_train(cfg, gpu, seed, train_dir, save_every):
if seed is None:
seed = random.randint(1, 999999)
logging.info("train seed: %s", str(seed))
save_str = "" if save_every is None else "--save-every {}".format(save_every)
logging.info(("awnas train {cfg} --gpus {gpu} --seed {seed} {save_str} "
"--train-dir {train_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, save_str=save_str))
subprocess.check_call(("awnas train {cfg} --gpus {gpu} --seed {seed} {save_str} "
"--train-dir {train_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, save_str=save_str),
shell=True)
def make_surrogate_cfgs(derive_out_file, template_file, sur_dir):
with open(template_file, "r") as f:
cfg_template = yaml.load(f)
with open(derive_out_file, "r") as f:
genotypes_list = yaml.load(f)
for ind, genotypes in enumerate(genotypes_list):
sur_fname = os.path.join(sur_dir, "{}.yaml".format(ind))
genotypes = _get_genotype_substr(genotypes)
cfg_template["final_model_cfg"]["genotypes"] = genotypes
with open(sur_fname, "w") as of:
yaml.safe_dump(cfg_template, of)
def get_sur_perfs(sur_dir):
final_perfs = []
for ind in range(DERIVE_N):
surrogate_dir = os.path.join(sur_dir, str(ind))
log = os.path.join(surrogate_dir, "train.log")
final_perfs.append(_get_perf(log, type_=args.type))
return final_perfs
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", required=True)
parser.add_argument("--exp-name", required=True, type=str)
parser.add_argument("--type", default="cnn", choices=["cnn", "rnn"], type=str, help="(default: %(default)s)")
parser.add_argument("--base-dir", default=os.path.abspath(os.path.expanduser("~/awnas/results")),
type=str, help="results will be saved to `base_dir`/`exp_name` (default: %(default)s)")
parser.add_argument("--seed", type=int, help="the default seeds of all tasks, "
"if not specified explicitly.")
parser.add_argument("--search-cfg", required=True, type=str)
parser.add_argument("--search-memory", default=6000, type=int)
parser.add_argument("--search-util", default=30, type=int)
parser.add_argument("--search-seed", default=None, type=int)
parser.add_argument("--search-save-every", default=20, type=int)
parser.add_argument("--derive-memory", default=3000, type=int)
parser.add_argument("--derive-util", default=0, type=int)
parser.add_argument("--derive-seed", default=123, type=int)
parser.add_argument("--train-surrogate-cfg", required=True, type=str, help="train surrogate config file")
parser.add_argument("--train-surrogate-memory", default=6000, type=int)
parser.add_argument("--train-surrogate-util", default=0, type=int)
parser.add_argument("--train-surrogate-seed", default=None, type=int)
parser.add_argument("--train-final-cfg", required=True, type=str, help="train final config file")
parser.add_argument("--train-final-memory", default=10000, type=int)
parser.add_argument("--train-final-util", default=70, type=int)
parser.add_argument("--train-final-seed", default=None, type=int)
args = parser.parse_args()
args.search_cfg = os.path.abspath(args.search_cfg)
args.train_surrogate_cfg = os.path.abspath(args.train_surrogate_cfg)
args.train_final_cfg = os.path.abspath(args.train_final_cfg)
gpu = args.gpu
exp_name = args.exp_name
# result dirs
result_dir = os.path.join(args.base_dir, exp_name)
search_dir = os.path.join(result_dir, "search")
sur_dir = os.path.join(result_dir, "train_surrogate")
final_dir = os.path.join(result_dir, "train_final")
if not os.path.exists(result_dir):
os.makedirs(os.path.join(result_dir))
os.makedirs(search_dir)
os.makedirs(sur_dir)
os.makedirs(final_dir)
search_cfg = os.path.join(result_dir, "search.yaml")
train_surrogate_template = os.path.join(result_dir, "train_surrogate.template")
train_final_template = os.path.join(result_dir, "train_final.template")
shutil.copy(args.search_cfg, search_cfg)
shutil.copy(args.train_surrogate_cfg, train_surrogate_template)
shutil.copy(args.train_final_cfg, train_final_template)
# # search
vis_dir = os.path.join(result_dir, "vis")
call_search(search_cfg, gpu, args.search_seed, search_dir, vis_dir, args.search_save_every)
# derive
max_epoch = max([int(n) for n in os.listdir(search_dir) if n.isdigit()])
final_checkpoint = os.path.join(search_dir, str(max_epoch))
derive_out_file = os.path.join(search_dir, "derive.yaml")
call_derive(search_cfg, gpu, args.derive_seed, final_checkpoint, derive_out_file, DERIVE_N)
# make surrogate cfgs
make_surrogate_cfgs(derive_out_file, train_surrogate_template, sur_dir)
# train surrogate
for index in range(DERIVE_N):
sur_fname = os.path.join(sur_dir, "{}.yaml".format(index))
train_sur_dir = os.path.join(sur_dir, str(index))
call_train(sur_fname, gpu, args.train_surrogate_seed, train_sur_dir, save_every=None)
# choose best
sur_perfs = get_sur_perfs(sur_dir)
best_ind = np.argmax(sur_perfs)
with open(derive_out_file, "r") as f:
genotypes_list = yaml.load(f)
best_geno = _get_genotype_substr(genotypes_list[best_ind])
with open(os.path.join(sur_dir, "sur_res.txt"), "w") as of:
of.write("\n".join(["{} {}".format(ind, perf)
for ind, perf in
sorted(list(enumerate(sur_perfs)), key=lambda item: -item[1])]))
# dump configuration of final train
with open(train_final_template, "r") as f:
base_cfg = yaml.load(f)
base_cfg["final_model_cfg"]["genotypes"] = best_geno
train_final_cfg = os.path.join(final_dir, "train.yaml")
with open(train_final_cfg, "w") as of:
yaml.safe_dump(base_cfg, of)
# train final
total_epochs = base_cfg["final_trainer_cfg"]["epochs"]
train_final_dir = os.path.join(final_dir, "train")
call_train(train_final_cfg, gpu, args.train_final_seed, train_final_dir, save_every=total_epochs // 4)
log = os.path.join(train_final_dir, "train.log")
final_valid_perf = _get_perf(log, type_=args.type)
| [
"[email protected]"
] | |
eb91d4a5ee4783d3343c61260473eedbd4c4dd18 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Geron17Hands/C_PartII/E_Chapter13/E_Exercises/index.py | 31dbc036a2281c1cf4c61b4da842159f9b8249e8 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,826 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
#
# TensorFlow Convolution Operations
# TensorFlow also offers a few other kinds of convolutional layers:
#
# • conv1d() creates a convolutional layer for 1D inputs. This is useful, for example,
# in natural language processing, where a sentence may be represented as a 1D
# array of words, and the receptive field covers a few neighboring words.
# • conv3d() creates a convolutional layer for 3D inputs, such as 3D PET scan.
# • atrous_conv2d() creates an atrous convolutional layer (“à trous” is French for
# “with holes”). This is equivalent to using a regular convolutional layer with a fil‐
# ter dilated by inserting rows and columns of zeros (i.e., holes). For example, a 1 ×
# 3 filter equal to [[1,2,3]] may be dilated with a dilation rate of 4, resulting in a
# dilated filter [[1, 0, 0, 0, 2, 0, 0, 0, 3]]. This allows the convolutional
# layer to have a larger receptive field at no computational price and using no extra
# parameters.
# • conv2d_transpose() creates a transpose convolutional layer, sometimes called a
# deconvolutional layer,15 which upsamples an image. It does so by inserting zeros
# between the inputs, so you can think of this as a regular convolutional layer using
# a fractional stride. Upsampling is useful, for example, in image segmentation: in a
# typical CNN, feature maps get smaller and smaller as you progress through the
# network, so if you want to output an image of the same size as the input, you
# need an upsampling layer.
# • depthwise_conv2d() creates a depthwise convolutional layer that applies every fil‐
# ter to every individual input channel independently. Thus, if there are fn filters
# and fn′ input channels, then this will output fn × fn′ feature maps.
# • separable_conv2d() creates a separable convolutional layer that first acts like a
# depthwise convolutional layer, then applies a 1 × 1 convolutional layer to the
# resulting feature maps. This makes it possible to apply filters to arbitrary sets of
# inputs channels.
#
#
#
# Exercises
# 1. What are the advantages of a CNN over a fully connected DNN for image classi‐
# fication?
# 2. Consider a CNN composed of three convolutional layers, each with 3 × 3 kernels,
# a stride of 2, and SAME padding. The lowest layer outputs 100 feature maps, the
#
#
# 15 This name is quite misleading since this layer does not perform a deconvolution, which is a well-defined
# mathematical operation (the inverse of a convolution).
#
#
#
# 376 | Chapter 13: Convolutional Neural Networks
#
# Download from finelybook www.finelybook.com
# middle one outputs 200, and the top one outputs 400. The input images are RGB
# images of 200 × 300 pixels. What is the total number of parameters in the CNN?
# If we are using 32-bit floats, at least how much RAM will this network require
# when making a prediction for a single instance? What about when training on a
# mini-batch of 50 images?
# 3. If your GPU runs out of memory while training a CNN, what are five things you
# could try to solve the problem?
# 4. Why would you want to add a max pooling layer rather than a convolutional
# layer with the same stride?
# 5. When would you want to add a local response normalization layer?
# 6. Can you name the main innovations in AlexNet, compared to LeNet-5? What
# about the main innovations in GoogLeNet and ResNet?
# 7. Build your own CNN and try to achieve the highest possible accuracy on MNIST.
# 8. Classifying large images using Inception v3.
# a. Download some images of various animals. Load them in Python, for example
# using the matplotlib.image.mpimg.imread() function. Resize and/or crop
# them to 299 × 299 pixels, and ensure that they have just three channels (RGB),
# with no transparency channel.
# b. Download the latest pretrained Inception v3 model: the checkpoint is avail‐
# able at https://goo.gl/nxSQvl.
# c. Create the Inception v3 model by calling the inception_v3() function, as
# shown below. This must be done within an argument scope created by the
# inception_v3_arg_scope() function. Also, you must set is_training=False
# and num_classes=1001 like so:
# from tensorflow.contrib.slim.nets import inception
# import tensorflow.contrib.slim as slim
#
# X = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
# with slim.arg_scope(inception.inception_v3_arg_scope()):
# logits, end_points = inception.inception_v3(
# X, num_classes=1001, is_training=False)
# predictions = end_points["Predictions"]
# saver = tf.train.Saver()
# d. Open a session and use the Saver to restore the pretrained model checkpoint
# you downloaded earlier.
# e. Run the model to classify the images you prepared. Display the top five pre‐
# dictions for each image, along with the estimated probability (the list of class
# names is available at https://goo.gl/brXRtZ). How accurate is the model?
# 9. Transfer learning for large image classification.
#
#
# Exercises | 377
#
# Download from finelybook www.finelybook.com
# a. Create a training set containing at least 100 images per class. For example, you
# could classify your own pictures based on the location (beach, mountain, city,
# etc.), or alternatively you can just use an existing dataset, such as the flowers
# dataset or MIT’s places dataset (requires registration, and it is huge).
# b. Write a preprocessing step that will resize and crop the image to 299 × 299,
# with some randomness for data augmentation.
# c. Using the pretrained Inception v3 model from the previous exercise, freeze all
# layers up to the bottleneck layer (i.e., the last layer before the output layer),
# and replace the output layer with the appropriate number of outputs for your
# new classification task (e.g., the flowers dataset has five mutually exclusive
# classes so the output layer must have five neurons and use the softmax activa‐
# tion function).
# d. Split your dataset into a training set and a test set. Train the model on the
# training set and evaluate it on the test set.
# 10. Go through TensorFlow’s DeepDream tutorial. It is a fun way to familiarize your‐
# self with various ways of visualizing the patterns learned by a CNN, and to gener‐
# ate art using Deep Learning.
#
# Solutions to these exercises are available in Appendix A.
#
#
#
#
# 378 | Chapter 13: Convolutional Neural Networks
#
# Download from finelybook www.finelybook.com
#
#
# CHAPTER 14
# Recurrent Neural Networks
#
#
#
#
# The batter hits the ball. You immediately start running, anticipating the ball’s trajec‐
# tory. You track it and adapt your movements, and finally catch it (under a thunder of
# applause). Predicting the future is what you do all the time, whether you are finishing
# a friend’s sentence or anticipating the smell of coffee at breakfast. In this chapter, we
# are going to discuss recurrent neural networks (RNN), a class of nets that can predict
# the future (well, up to a point, of course). They can analyze time series data such as
# stock prices, and tell you when to buy or sell. In autonomous driving systems, they
# can anticipate car trajectories and help avoid accidents. More generally, they can work
# on sequences of arbitrary lengths, rather than on fixed-sized inputs like all the nets we
# have discussed so far. For example, they can take sentences, documents, or audio
# samples as input, making them extremely useful for natural language processing
# (NLP) systems such as automatic translation, speech-to-text, or sentiment analysis
# (e.g., reading movie reviews and extracting the rater’s feeling about the movie).
# Moreover, RNNs’ ability to anticipate also makes them capable of surprising creativ‐
# ity. You can ask them to predict which are the most likely next notes in a melody, then
# randomly pick one of these notes and play it. Then ask the net for the next most likely
# notes, play it, and repeat the process again and again. Before you know it, your net
# will compose a melody such as the one produced by Google’s Magenta project. Simi‐
# larly, RNNs can generate sentences, image captions, and much more. The result is not
# exactly Shakespeare or Mozart yet, but who knows what they will produce a few years
# from now?
# In this chapter, we will look at the fundamental concepts underlying RNNs, the main
# problem they face (namely, vanishing/exploding gradients, discussed in Chapter 11),
# and the solutions widely used to fight it: LSTM and GRU cells. Along the way, as
# always, we will show how to implement RNNs using TensorFlow. Finally, we will take
# a look at the architecture of a machine translation system.
#
#
# 379
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Exercises",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Exercises(HierNode):
def __init__(self):
super().__init__("Exercises")
self.add(Content(), "content")
# eof
| [
"[email protected]"
] | |
51e3944307e72804a1265a356f3d0a5f606b613f | c49590eb7f01df37c8ec5fef00d0ffc7250fa321 | /openapi_client/models/existing_normal_order.py | a13bb994a8a5c32393770f610b14317804a73b72 | [] | no_license | harshad5498/ks-orderapi-python | 373a4b85a56ff97e2367eebd076f67f972e92f51 | 237da6fc3297c02e85f0fff1a34857aaa4c1d295 | refs/heads/master | 2022-12-09T19:55:21.938764 | 2020-09-03T05:22:51 | 2020-09-03T05:22:51 | 293,533,651 | 0 | 0 | null | 2020-09-07T13:19:25 | 2020-09-07T13:19:24 | null | UTF-8 | Python | false | false | 7,092 | py | # coding: utf-8
"""
KS Trade API's
The version of the OpenAPI document: 1.0
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class ExistingNormalOrder(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'orderId': 'str',
'quantity': 'int',
'price': 'float',
'disclosedQuantity': 'int',
'triggerPrice': 'float'
}
attribute_map = {
'orderId': 'orderId',
'quantity': 'quantity',
'price': 'price',
'disclosedQuantity': 'disclosedQuantity',
'triggerPrice': 'triggerPrice'
}
def __init__(self, orderId=None, quantity=None, price=None, disclosedQuantity=None, triggerPrice=None, local_vars_configuration=None): # noqa: E501
"""ExistingNormalOrder - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._orderId = None
self._quantity = None
self._price = None
self._disclosedQuantity = None
self._triggerPrice = None
self.discriminator = None
self.orderId = orderId
if quantity is not None:
self.quantity = quantity
if price is not None:
self.price = price
if disclosedQuantity is not None:
self.disclosedQuantity = disclosedQuantity
if triggerPrice is not None:
self.triggerPrice = triggerPrice
@property
def orderId(self):
"""Gets the orderId of this ExistingNormalOrder. # noqa: E501
Order ID of the order to be modified # noqa: E501
:return: The orderId of this ExistingNormalOrder. # noqa: E501
:rtype: str
"""
return self._orderId
@orderId.setter
def orderId(self, orderId):
"""Sets the orderId of this ExistingNormalOrder.
Order ID of the order to be modified # noqa: E501
:param orderId: The orderId of this ExistingNormalOrder. # noqa: E501
:type orderId: str
"""
if self.local_vars_configuration.client_side_validation and orderId is None: # noqa: E501
raise ValueError("Invalid value for `orderId`, must not be `None`") # noqa: E501
self._orderId = orderId
@property
def quantity(self):
"""Gets the quantity of this ExistingNormalOrder. # noqa: E501
Order quantity - specified in same unit as quoted in market depth # noqa: E501
:return: The quantity of this ExistingNormalOrder. # noqa: E501
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this ExistingNormalOrder.
Order quantity - specified in same unit as quoted in market depth # noqa: E501
:param quantity: The quantity of this ExistingNormalOrder. # noqa: E501
:type quantity: int
"""
self._quantity = quantity
@property
def price(self):
"""Gets the price of this ExistingNormalOrder. # noqa: E501
Order Price, non zero positive for limit order and zero for market order # noqa: E501
:return: The price of this ExistingNormalOrder. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this ExistingNormalOrder.
Order Price, non zero positive for limit order and zero for market order # noqa: E501
:param price: The price of this ExistingNormalOrder. # noqa: E501
:type price: float
"""
self._price = price
@property
def disclosedQuantity(self):
"""Gets the disclosedQuantity of this ExistingNormalOrder. # noqa: E501
Quantity to be disclosed in order # noqa: E501
:return: The disclosedQuantity of this ExistingNormalOrder. # noqa: E501
:rtype: int
"""
return self._disclosedQuantity
@disclosedQuantity.setter
def disclosedQuantity(self, disclosedQuantity):
"""Sets the disclosedQuantity of this ExistingNormalOrder.
Quantity to be disclosed in order # noqa: E501
:param disclosedQuantity: The disclosedQuantity of this ExistingNormalOrder. # noqa: E501
:type disclosedQuantity: int
"""
self._disclosedQuantity = disclosedQuantity
@property
def triggerPrice(self):
"""Gets the triggerPrice of this ExistingNormalOrder. # noqa: E501
Trigger price, required for stoploss or supermultiple order # noqa: E501
:return: The triggerPrice of this ExistingNormalOrder. # noqa: E501
:rtype: float
"""
return self._triggerPrice
@triggerPrice.setter
def triggerPrice(self, triggerPrice):
"""Sets the triggerPrice of this ExistingNormalOrder.
Trigger price, required for stoploss or supermultiple order # noqa: E501
:param triggerPrice: The triggerPrice of this ExistingNormalOrder. # noqa: E501
:type triggerPrice: float
"""
self._triggerPrice = triggerPrice
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExistingNormalOrder):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExistingNormalOrder):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
ce3f3e77b734c979ff3d49bc82b04f891d0df5bd | 4d1cca31a3aae847bd6ee2dc12eca3971b263fc4 | /src/flua/Compiler/Output/python/PythonClass.py | a2ffea6ea236df51bc99c98170c3846e3c94f63c | [] | no_license | akyoto/flua | 4cc27202c326a6eedd088c5bb88c644905e7be64 | e09d50e0d50fc4f4faa1b0ee482756eaef4e60ec | refs/heads/master | 2021-06-06T10:55:32.795005 | 2016-12-04T00:17:20 | 2016-12-04T00:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | ####################################################################
# Header
####################################################################
# Target: Python Code
# Author: Eduard Urbach
####################################################################
# License
####################################################################
# (C) 2012 Eduard Urbach
#
# This file is part of Blitzprog.
#
# Blitzprog is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blitzprog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blitzprog. If not, see <http://www.gnu.org/licenses/>.
####################################################################
# Imports
####################################################################
from flua.Compiler.Output import *
from flua.Compiler.Output.BaseClass import *
from flua.Compiler.Output.python.PythonClassImplementation import *
####################################################################
# Classes
####################################################################
class PythonClass(BaseClass):
def __init__(self, name, node, cppFile):
super().__init__(name, node, cppFile)
def createClassImplementation(self, templateValues):
return PythonClassImplementation(self, templateValues)
| [
"[email protected]"
] | |
080327cbd21766ac54d21ecf1f08d7336c162d80 | cb57a9ea4622b94207d12ea90eab9dd5b13e9e29 | /lc/python/289_game_of_life.py | 7db24705a3c6af0951103e9edb5e780f16317398 | [] | no_license | boknowswiki/mytraning | b59585e1e255a7a47c2b28bf2e591aef4af2f09a | 5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a | refs/heads/master | 2023-08-16T03:28:51.881848 | 2023-08-10T04:28:54 | 2023-08-10T04:28:54 | 124,834,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | #!/usr/bin/python -t
#time O(m*n) space O(1)
#0,2 are "dead", and "dead->live"
#1,3 are "live", and "live->dead"
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: None Do not return anything, modify board in-place instead.
"""
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
if board[i][j] == 0 or board[i][j] == 2:
if self.nnb(board, i, j) == 3:
board[i][j] = 2
else:
if self.nnb(board, i, j) < 2 or self.nnb(board, i, j) > 3:
board[i][j] = 3
for i in range(m):
for j in range(n):
if board[i][j] == 2:
board[i][j] = 1
if board[i][j] == 3:
board[i][j] = 0
def nnb(self, board, i, j):
m,n = len(board), len(board[0])
count = 0
if i-1 >= 0 and j-1 >= 0: count += board[i-1][j-1]%2
if i-1 >= 0: count += board[i-1][j]%2
if i-1 >= 0 and j+1 < n: count += board[i-1][j+1]%2
if j-1 >= 0: count += board[i][j-1]%2
if j+1 < n: count += board[i][j+1]%2
if i+1 < m and j-1 >= 0: count += board[i+1][j-1]%2
if i+1 < m: count += board[i+1][j]%2
if i+1 < m and j+1 < n: count += board[i+1][j+1]%2
return count
| [
"[email protected]"
] | |
b4c66f8a260da4fe83eb670aeb5e4b6544e3ef5b | 00b6699ea1302149ab2b9fd57e115656f7a26e7d | /models/transformer_encoder.py | 605c2d2f1ea94fdd68595454e41d279e6400e3ec | [] | no_license | gauravaror/catastrophic_forgetting | 97ac8e1c999db4f36d01ae19a0fb307f8109eb8b | 60e53f61c45f6ce24a28bf8454c8078559bb9e6f | refs/heads/master | 2021-06-30T21:07:27.448889 | 2020-10-05T09:37:36 | 2020-10-05T09:37:36 | 174,500,380 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,573 | py | import math
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from models.utils import Hardsigmoid, BernoulliST
from models.kv_memory import KeyValueMemory
# It's actually TransformerEncoder custom with PositionalEncoder but we use
# name: TransformerRepresentation to avoid confusion with TransformerEncoder Representation.
class TransformerRepresentation(nn.Module):
def __init__(self, emb_dim, nhead, nhid, nlayers, args, dropout=0.5,
use_memory=False, mem_size=None, mem_context_size=None,
inv_temp=None, use_binary=False):
super(TransformerRepresentation, self).__init__()
self.model_type = 'Transformer'
self.emb_dim = emb_dim
self.inv_temp = inv_temp
self.args = args
self.no_positional = self.args.no_positional
self.memory = KeyValueMemory(use_memory=use_memory,
emb_dim=self.emb_dim,
mem_size=mem_size,
mem_context_size=mem_context_size,
inv_temp=self.inv_temp,
use_binary=use_binary)
self.src_mask = None
self.transposed = True
self.pos_encoder = PositionalEncoding(emb_dim, dropout, transposed=self.transposed)
encoder_layers = TransformerEncoderLayer(self.memory.get_input_size(),
nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.pooler = nn.Linear(self.emb_dim, self.emb_dim)
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def add_target_pad(self):
self.memory.add_target_pad()
def get_output_dim(self):
## Transformer input size is same as output
return self.memory.get_input_size()
def forward(self, src, mask):
src = src.transpose(0,1)
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = src * math.sqrt(self.emb_dim)
src = self.pos_encoder(src) if not self.no_positional else src
src_input = self.memory(src)
output = self.transformer_encoder(src_input, self.src_mask)
output = output.transpose(0,1)
return torch.mean(output, dim=1)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000, transposed=False):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.transposed = transposed
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
if self.transposed:
pe = pe.transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
added_pe = self.pe[:x.size(0), :, :] if self.transposed else self.pe[:, x.size(1), :]
x = x + added_pe
return self.dropout(x)
| [
"[email protected]"
] | |
bf5ff811dd36959cbb56b862856ef8a46fcdaabe | a7e5aa55139641ca49d27c8b0c275c25f8cc0c54 | /src/main/python/modules/window_statusbar/gui/bar.py | 209b4a51917b5adec045657e13011b29c5680617 | [] | no_license | AlexWoroschilow/AOD-Reader | 5a5fa4ea8184ea2df2301870ccd67717eab307f1 | 6e643958a4fae62128f036821030b8ea9f937d07 | refs/heads/master | 2022-02-24T05:48:48.549468 | 2019-09-20T23:42:03 | 2019-09-20T23:42:03 | 197,986,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # -*- coding: utf-8 -*-
# Copyright 2015 Alex Woroschilow ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from PyQt5 import QtWidgets
from PyQt5 import QtCore
class StatusbarWidget(QtWidgets.QStatusBar):
def __init__(self):
super(StatusbarWidget, self).__init__()
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.status = QtWidgets.QLabel()
self.status.setAlignment(QtCore.Qt.AlignCenter)
self.addWidget(self.status)
self.progress = QtWidgets.QProgressBar()
self.progress.hide()
def text(self, text):
self.status.setText(text)
def start(self, progress):
if self.status is not None:
self.status.hide()
self.removeWidget(self.status)
if self.progress is not None:
self.progress.setValue(progress)
self.addWidget(self.progress, 1)
self.progress.show()
def setProgress(self, progress):
if self.progress is not None:
self.progress.setValue(progress)
def stop(self, progress):
if self.progress is not None:
self.progress.setValue(progress)
self.progress.hide()
self.removeWidget(self.progress)
if self.status is not None:
self.addWidget(self.status, 1)
self.status.show()
| [
"[email protected]"
] | |
df5280e7916d11004414fad03e054bf80f089274 | 730154818de1d81d0bca2546ca1a3ab029945860 | /test/cli/test_commands.py | 7fcb502ab2a37d9db734c94829a20e64fdfc5b1d | [
"MIT"
] | permissive | mnalband/schemathesis | b2b4cba8505dfc0f7779ac7fb10abbf42c566076 | 42b351589fae3f407a1de248750bc82d6b5796d1 | refs/heads/master | 2020-08-21T12:30:58.006890 | 2019-10-16T08:59:36 | 2019-10-18T21:28:32 | 216,160,468 | 0 | 0 | MIT | 2019-10-19T06:31:13 | 2019-10-19T06:31:13 | null | UTF-8 | Python | false | false | 9,661 | py | import pytest
from _pytest.main import ExitCode
from hypothesis import HealthCheck, Phase, Verbosity
from requests.auth import HTTPDigestAuth
from schemathesis.runner import DEFAULT_CHECKS
def test_commands_help(cli):
result = cli.run_subprocess()
assert result.ret == ExitCode.OK
assert result.stdout.get_lines_after("Commands:") == [" run Perform schemathesis test."]
result_help = cli.run_subprocess("--help")
result_h = cli.run_subprocess("-h")
assert result.stdout.lines == result_h.stdout.lines == result_help.stdout.lines
def test_commands_version(cli):
result = cli.run_subprocess("--version")
assert result.ret == ExitCode.OK
assert "version" in result.stdout.lines[0]
@pytest.mark.parametrize(
"args, error",
(
(("run",), 'Error: Missing argument "SCHEMA".'),
(("run", "not-url"), "Error: Invalid SCHEMA, must be a valid URL."),
(
("run", "http://127.0.0.1", "--auth=123"),
'Error: Invalid value for "--auth" / "-a": Should be in KEY:VALUE format. Got: 123',
),
(
("run", "http://127.0.0.1", "--auth-type=random"),
'Error: Invalid value for "--auth-type" / "-A": invalid choice: random. (choose from basic, digest)',
),
(
("run", "http://127.0.0.1", "--header=123"),
'Error: Invalid value for "--header" / "-H": Should be in KEY:VALUE format. Got: 123',
),
(
("run", "http://127.0.0.1", "--header=:"),
'Error: Invalid value for "--header" / "-H": Header name should not be empty',
),
(
("run", "http://127.0.0.1", "--hypothesis-phases=explicit,first,second"),
'Error: Invalid value for "--hypothesis-phases": invalid choice(s): first, second. '
"Choose from explicit, reuse, generate, shrink",
),
),
)
def test_commands_run_errors(cli, args, error):
# When invalid arguments are passed to CLI
result = cli.run_subprocess(*args)
# Then an appropriate error should be displayed
assert result.ret == ExitCode.INTERRUPTED
assert result.stderr.lines[-1] == error
def test_commands_run_help(cli):
result_help = cli.run_subprocess("run", "--help")
assert result_help.ret == ExitCode.OK
assert result_help.stdout.lines == [
"Usage: schemathesis run [OPTIONS] SCHEMA",
"",
" Perform schemathesis test against an API specified by SCHEMA.",
"",
" SCHEMA must be a valid URL pointing to an Open API / Swagger",
" specification.",
"",
"Options:",
" -c, --checks [not_a_server_error]",
" List of checks to run.",
" -a, --auth TEXT Server user and password. Example:",
" USER:PASSWORD",
" -A, --auth-type [basic|digest] The authentication mechanism to be used.",
" Defaults to 'basic'.",
" -H, --header TEXT Custom header in a that will be used in all",
r" requests to the server. Example:",
r" Authorization: Bearer\ 123",
r" -E, --endpoint TEXT Filter schemathesis test by endpoint",
r" pattern. Example: users/\d+",
" -M, --method TEXT Filter schemathesis test by HTTP method.",
" -b, --base-url TEXT Base URL address of the API.",
" --hypothesis-deadline INTEGER Duration in milliseconds that each",
" individual example with a test is not",
" allowed to exceed.",
" --hypothesis-derandomize Use Hypothesis's deterministic mode.",
" --hypothesis-max-examples INTEGER",
" Maximum number of generated examples per",
" each method/endpoint combination.",
" --hypothesis-phases [explicit|reuse|generate|shrink]",
" Control which phases should be run.",
" --hypothesis-report-multiple-bugs BOOLEAN",
" Raise only the exception with the smallest",
" minimal example.",
" --hypothesis-suppress-health-check [data_too_large|filter_too_much|too_slow|return_value|"
"hung_test|large_base_example|not_a_test_method]",
" Comma-separated list of health checks to",
" disable.",
" --hypothesis-verbosity [quiet|normal|verbose|debug]",
" Verbosity level of Hypothesis messages",
" -h, --help Show this message and exit.",
]
SCHEMA_URI = "https://example.com/swagger.json"
@pytest.mark.parametrize(
"args, expected",
(
([SCHEMA_URI], {"checks": DEFAULT_CHECKS}),
([SCHEMA_URI, "--auth=test:test"], {"checks": DEFAULT_CHECKS, "api_options": {"auth": ("test", "test")}}),
(
[SCHEMA_URI, "--auth=test:test", "--auth-type=digest"],
{"checks": DEFAULT_CHECKS, "api_options": {"auth": HTTPDigestAuth("test", "test")}},
),
(
[SCHEMA_URI, "--auth=test:test", "--auth-type=DIGEST"],
{"checks": DEFAULT_CHECKS, "api_options": {"auth": HTTPDigestAuth("test", "test")}},
),
(
[SCHEMA_URI, "--header=Authorization:Bearer 123"],
{"checks": DEFAULT_CHECKS, "api_options": {"headers": {"Authorization": "Bearer 123"}}},
),
(
[SCHEMA_URI, "--header=Authorization: Bearer 123 "],
{"checks": DEFAULT_CHECKS, "api_options": {"headers": {"Authorization": "Bearer 123 "}}},
),
(
[SCHEMA_URI, "--method=POST", "--method", "GET"],
{"checks": DEFAULT_CHECKS, "loader_options": {"method": ("POST", "GET")}},
),
([SCHEMA_URI, "--endpoint=users"], {"checks": DEFAULT_CHECKS, "loader_options": {"endpoint": ("users",)}}),
(
[SCHEMA_URI, "--base-url=https://example.com/api/v1test"],
{"checks": DEFAULT_CHECKS, "api_options": {"base_url": "https://example.com/api/v1test"}},
),
(
[
SCHEMA_URI,
"--hypothesis-deadline=1000",
"--hypothesis-derandomize",
"--hypothesis-max-examples=1000",
"--hypothesis-phases=explicit,generate",
"--hypothesis-report-multiple-bugs=0",
"--hypothesis-suppress-health-check=too_slow,filter_too_much",
"--hypothesis-verbosity=normal",
],
{
"checks": DEFAULT_CHECKS,
"hypothesis_options": {
"deadline": 1000,
"derandomize": True,
"max_examples": 1000,
"phases": [Phase.explicit, Phase.generate],
"report_multiple_bugs": False,
"suppress_health_check": [HealthCheck.too_slow, HealthCheck.filter_too_much],
"verbosity": Verbosity.normal,
},
},
),
),
)
def test_execute_arguments(cli, mocker, args, expected):
m_execute = mocker.patch("schemathesis.runner.execute", autospec=True)
result = cli.run_inprocess(*args)
assert result.exit_code == 0
m_execute.assert_called_once_with(SCHEMA_URI, **expected)
@pytest.mark.endpoints()
def test_hypothesis_parameters(cli, schema_url):
# When Hypothesis options are passed via command line
result = cli.run_inprocess(
schema_url,
"--hypothesis-deadline=1000",
"--hypothesis-derandomize",
"--hypothesis-max-examples=1000",
"--hypothesis-phases=explicit,generate",
"--hypothesis-report-multiple-bugs=0",
"--hypothesis-suppress-health-check=too_slow,filter_too_much",
"--hypothesis-verbosity=normal",
)
# Then they should be correctly converted into arguments accepted by `hypothesis.settings`
# Parameters are validated in `hypothesis.settings`
assert result.exit_code == 0
@pytest.mark.endpoints("success")
def test_cli_run_output_success(cli, schema_url):
result = cli.run_inprocess(schema_url)
assert result.exit_code == 0
assert " FALSIFYING EXAMPLES " not in result.stdout
assert " SUMMARY " in result.stdout
lines = result.stdout.split("\n")
assert "Running schemathesis test cases ..." in lines
assert "Tests succeeded." in lines
def test_cli_run_output_with_errors(cli, schema_url):
result = cli.run_inprocess(schema_url)
assert result.exit_code == 1
assert " FALSIFYING EXAMPLES " in result.stdout
assert " SUMMARY " in result.stdout
lines = result.stdout.split("\n")
assert "Running schemathesis test cases ..." in lines
assert "not_a_server_error 1 / 3 passed FAILED " in lines
assert "Tests failed." in lines
@pytest.mark.endpoints()
def test_cli_run_output_empty(cli, schema_url):
result = cli.run_inprocess(schema_url)
assert result.exit_code == 0
assert " FALSIFYING EXAMPLES " not in result.stdout
assert " SUMMARY " not in result.stdout
lines = result.stdout.split("\n")
assert "No checks were performed." in lines
assert "Tests succeeded." in lines
| [
"[email protected]"
] | |
5133a67b4edd0c59c4ea5de641e73dca50cb2c73 | bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062 | /ppdet/modeling/cls_utils.py | 3ae8d116959a96bb2bf337dee7330c5909bc61ac | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleDetection | e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961 | bd83b98342b0a6bc8d8dcd5936233aeda1e32167 | refs/heads/release/2.6 | 2023-08-31T07:04:15.357051 | 2023-08-18T02:24:45 | 2023-08-18T02:24:45 | 217,475,193 | 12,523 | 3,096 | Apache-2.0 | 2023-09-10T10:05:56 | 2019-10-25T07:21:14 | Python | UTF-8 | Python | false | false | 1,325 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _get_class_default_kwargs(cls, *args, **kwargs):
"""
Get default arguments of a class in dict format, if args and
kwargs is specified, it will replace default arguments
"""
varnames = cls.__init__.__code__.co_varnames
argcount = cls.__init__.__code__.co_argcount
keys = varnames[:argcount]
assert keys[0] == 'self'
keys = keys[1:]
values = list(cls.__init__.__defaults__)
assert len(values) == len(keys)
if len(args) > 0:
for i, arg in enumerate(args):
values[i] = arg
default_kwargs = dict(zip(keys, values))
if len(kwargs) > 0:
for k, v in kwargs.items():
default_kwargs[k] = v
return default_kwargs
| [
"[email protected]"
] | |
c6c6323c6e8149cfa777226d6b774aadd0f6d089 | aa1e637de90f69f9ae742d42d5b777421617d10c | /nitro/resource/config/ntp/ntpparam.py | eeeb63f88a5a0e1502be215dfd0c498591acd78f | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | km0420j/nitro-python | db7fcb49fcad3e7a1ae0a99e4fc8675665da29ba | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | refs/heads/master | 2021-10-21T18:12:50.218465 | 2019-03-05T14:00:15 | 2019-03-05T15:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,294 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class ntpparam(base_resource) :
"""Configuration for NTP parameter resource."""
def __init__(self) :
self._authentication = ""
self._trustedkey = []
self._autokeylogsec = 0
self._revokelogsec = 0
@property
def authentication(self) :
"""Apply NTP authentication, which enables the NTP client (NetScaler) to verify that the server is in fact known and trusted.<br/>Default value: YES<br/>Possible values = YES, NO."""
try :
return self._authentication
except Exception as e:
raise e
@authentication.setter
def authentication(self, authentication) :
"""Apply NTP authentication, which enables the NTP client (NetScaler) to verify that the server is in fact known and trusted.<br/>Default value: YES<br/>Possible values = YES, NO
:param authentication:
"""
try :
self._authentication = authentication
except Exception as e:
raise e
@property
def trustedkey(self) :
"""Key identifiers that are trusted for server authentication with symmetric key cryptography in the keys file.<br/>Minimum length = 1<br/>Maximum length = 65534."""
try :
return self._trustedkey
except Exception as e:
raise e
@trustedkey.setter
def trustedkey(self, trustedkey) :
"""Key identifiers that are trusted for server authentication with symmetric key cryptography in the keys file.<br/>Minimum length = 1<br/>Maximum length = 65534
:param trustedkey:
"""
try :
self._trustedkey = trustedkey
except Exception as e:
raise e
@property
def autokeylogsec(self) :
"""Autokey protocol requires the keys to be refreshed periodically. This parameter specifies the interval between regenerations of new session keys. In seconds, expressed as a power of 2.<br/>Default value: 12<br/>Maximum length = 32."""
try :
return self._autokeylogsec
except Exception as e:
raise e
@autokeylogsec.setter
def autokeylogsec(self, autokeylogsec) :
"""Autokey protocol requires the keys to be refreshed periodically. This parameter specifies the interval between regenerations of new session keys. In seconds, expressed as a power of 2.<br/>Default value: 12<br/>Maximum length = 32
:param autokeylogsec:
"""
try :
self._autokeylogsec = autokeylogsec
except Exception as e:
raise e
@property
def revokelogsec(self) :
"""Interval between re-randomizations of the autokey seeds to prevent brute-force attacks on the autokey algorithms.<br/>Default value: 16<br/>Maximum length = 32."""
try :
return self._revokelogsec
except Exception as e:
raise e
@revokelogsec.setter
def revokelogsec(self, revokelogsec) :
"""Interval between re-randomizations of the autokey seeds to prevent brute-force attacks on the autokey algorithms.<br/>Default value: 16<br/>Maximum length = 32
:param revokelogsec:
"""
try :
self._revokelogsec = revokelogsec
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(ntpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ntpparam
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update ntpparam.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = ntpparam()
updateresource.authentication = resource.authentication
updateresource.trustedkey = resource.trustedkey
updateresource.autokeylogsec = resource.autokeylogsec
updateresource.revokelogsec = resource.revokelogsec
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of ntpparam resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = ntpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the ntpparam resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = ntpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Authentication:
""" """
YES = "YES"
NO = "NO"
class ntpparam_response(base_response) :
""" """
def __init__(self, length=1) :
self.ntpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ntpparam = [ntpparam() for _ in range(length)]
| [
"[email protected]"
] | |
c05336076541453c1569b9d8c09221f777d63f7b | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /2nciiXZN4HCuNEmAi_6.py | 0bd7f17fca7fe6993cac5c0ec60e2e367d3dc816 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
def flatten(r, resp=None, a="main"):
if resp is None:
resp = list()
if type(r) == list:
for i in r:
resp.append(flatten(i, resp, "rec"))
else:
return r
return resp if a== "rec" else [i for i in resp if type(i)!=list]
| [
"[email protected]"
] | |
8122cd63318c83fba3251e64209f9f3899bd2f3b | 35942792e6dbec7862dd7bbc1aaec2b76ec0bc85 | /ABC/C/c110.py | 433dbf7458b7562b7e2b7332698102a7372d8afb | [] | no_license | hokekiyoo/AtCoder | 97f870421b513a5366681d1e05ba1e5038dfa077 | 2be1558c71a3ad8e1852645df050bca494b3afca | refs/heads/master | 2020-04-27T17:51:11.337193 | 2019-10-28T11:42:47 | 2019-10-28T11:42:47 | 174,541,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from collections import Counter
S = input()
T = input()
import numpy as np
S_num = dict(Counter(S)).values()
T_num = dict(Counter(T)).values()
Ss = np.array([s for s in S_num])
Ts = np.array([s for s in T_num])
if len(Ss) != len(Ts):
print("No")
else:
if all(np.sort(Ss)==np.sort(Ts)):
print("Yes")
else:
print("No")
## 別解?
"""
普通にsortedとできるっぽい。わざわざnumpy配列にしなくてOK
from collections import Counter
s, t = input(), input()
c1 = sorted(Counter(s).values())
c2 = sorted(Counter(t).values())
print('Yes') if c1 == c2 else print('No')
""" | [
"[email protected]"
] | |
5cbbb3255cb564286de601d5a17ec543b88b7f58 | b2b03fe08e5b97f2a53852538c738aa60677a2af | /python/tests/unit/test_maasdriver_vlan.py | 6094efea069d8060b8ca4c388d31aa163f90046c | [
"Apache-2.0"
] | permissive | spyd3rweb/drydock | 8685b82f340f590f75a3893244486754f77c048f | 9d1c65dc87807b694d00564bb9fa4fdd25297dc6 | refs/heads/master | 2020-09-02T09:51:42.220866 | 2020-04-05T18:53:10 | 2020-04-05T18:53:10 | 219,194,440 | 0 | 0 | Apache-2.0 | 2019-11-17T05:41:12 | 2019-11-02T18:12:00 | null | UTF-8 | Python | false | false | 1,813 | py | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for the maasdriver node_results routine.'''
import pytest
from drydock_provisioner.drivers.node.maasdriver.models.vlan import Vlan
from drydock_provisioner.drivers.node.maasdriver.errors import RackControllerConflict
class TestMaasVlan():
def test_add_rack_controller(self, mocker):
'''Test vlan model method for setting a managing rack controller.'''
# A object to return that looks like a requests response
# object wrapping a MAAS API response
class MockedResponse():
status_code = 200
vlan_fields = {'name': 'test', 'dhcp_on': True, 'mtu': 1500}
primary_rack = "asdf79"
secondary_rack = "asdf80"
tertiary_rack = "asdf81"
api_client = mocker.MagicMock()
api_client.get.return_value = MockedResponse()
vlan_obj = Vlan(api_client, **vlan_fields)
vlan_obj.add_rack_controller(primary_rack)
assert vlan_obj.primary_rack == primary_rack
vlan_obj.add_rack_controller(secondary_rack)
assert vlan_obj.secondary_rack == secondary_rack
with pytest.raises(RackControllerConflict):
vlan_obj.add_rack_controller(tertiary_rack)
| [
"[email protected]"
] | |
a38cb63d4933a71216ca298c2595eaceb6005c82 | d32fd3dce3d7a3f6b3c0c47d21e9d21e78e140e1 | /day1/ex6_math.py | 1cf29b6d6ff36c00318ac3a5633a929a55d1a004 | [
"Apache-2.0"
] | permissive | ktbyers/pynet_ons | 0fe77d14d5e1f119396c1f72d98eaeb56849c2ab | 7e84060f547ee8346a6ecb2db68a89d0ddf17aa6 | refs/heads/master | 2021-01-17T17:30:58.832361 | 2016-10-05T23:23:02 | 2016-10-05T23:23:02 | 63,434,341 | 2 | 13 | null | 2016-08-01T19:02:47 | 2016-07-15T16:01:39 | Python | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python
num1 = int(raw_input("Enter first number: "))
num2 = int(raw_input("Enter second number: "))
print "\n\nSum: {}".format(num1 + num2)
print "Difference: {}".format(num1 - num2)
print "Product: {}".format(num1 * num2)
print "Division: {:.2f}".format(num1/float(num2))
print
| [
"[email protected]"
] | |
910d89742125c89f6d83d759cdbd1a70be21888b | b29589f95734682663ae6cd40ab00eb0a94b6d87 | /longwave/lblnew_20160916/study__g1_threshold/h2o/conc_None/band02_wn_340_540/nv_1000/dv_0.001/ng_11/g_ascending_k_descending/refPTs_P_1_T_250__P_10_T_250__P_500_T_250/ng_refs_2__3__6/ng_adju_-2__-2__0/getabsth_auto__auto__auto/absth_dlogN_uniform__dlogN_uniform__dlogN_uniform/klin_none/atmpro_saw/wgt_k_1/wgt_0.6_0.6__0.6_0.6_0.6__0.6_0.6_0.6_0.6_0.6_0.6/wgt_flux_1/w_diffuse_1.66_1.66__1.8_1.8_1.8__1.8_1.66_1.45_1.45_1.45_1.45/option_compute_ktable_0/option_compute_btable_0/crd_d5931a1/param.py | 3f670efc4d7a78f1fba5f8514ea4cd44e2aa502f | [] | no_license | qAp/offline_radiation_notebooks | 02c2b2414ef1410f235776001a668f7df0b9f1cf | 44fb62391c27e4e314ad68ae3e91f6111b3172c5 | refs/heads/master | 2020-04-15T14:31:34.675322 | 2019-07-08T04:45:54 | 2019-07-08T04:45:54 | 43,118,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | DIR_FORTRAN = '/chia_cluster/home/jackyu/radiation/crd/LW/examples/separate_g_groups/study__lblnew_g1_threshold/h2o/conc_None/band02_wn_340_540/nv_1000/dv_0.001/ng_11/g_ascending_k_descending/refPTs_P_1_T_250__P_10_T_250__P_500_T_250/ng_refs_2__3__6/ng_adju_-2__-2__0/getabsth_auto__auto__auto/absth_dlogN_uniform__dlogN_uniform__dlogN_uniform/klin_none/atmpro_saw/wgt_k_1/wgt_0.6_0.6__0.6_0.6_0.6__0.6_0.6_0.6_0.6_0.6_0.6/wgt_flux_1/w_diffuse_1.66_1.66__1.8_1.8_1.8__1.8_1.66_1.45_1.45_1.45_1.45/option_compute_ktable_0/option_compute_btable_0/crd_d5931a1'
PARAM = {'molecule': 'h2o', 'band': '2', 'vmin': 340, 'vmax': 540, 'ref_pts': [(1, 250), (10, 250), (500, 250)], 'ng_refs': [2, 3, 6], 'ng_adju': [-2, -2, 0], 'klin': 0, 'option_wgt_k': 1, 'wgt': [(0.6, 0.6), (0.6, 0.6, 0.6), (0.6, 0.6, 0.6, 0.6, 0.6, 0.6)], 'w_diffuse': [(1.66, 1.66), (1.8, 1.8, 1.8), (1.8, 1.66, 1.45, 1.45, 1.45, 1.45)], 'commitnumber': 'd5931a1', 'conc': None, 'dv': 0.001, 'nv': 1000, 'option_wgt_flux': 1, 'option_compute_ktable': 0, 'option_compute_btable': 0, 'atmpro': 'saw', 'tsfc': 257} | [
"[email protected]"
] | |
546a1923ba578b58a263e4e4a8c6151cc1b740ea | 84b04d0787cf4cca686f54dcb4ca8eb0a480bdd5 | /src/plonetheme/kasteeldehaar/tests/test_robot.py | 637d955684aa80abb6b0c6a48ffd0cdc2dd7c458 | [] | no_license | plone-ve/plonetheme.kasteeldehaar | 357c1399d2d14d1b07cbd507521af4bfd4182897 | 136e0304b935f3ffb085824ed85d7a71a71924d4 | refs/heads/master | 2023-08-25T15:10:11.249942 | 2016-11-04T16:58:00 | 2016-11-04T16:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # -*- coding: utf-8 -*-
from plonetheme.kasteeldehaar.testing import plonetheme.kasteeldehaar_ACCEPTANCE_TESTING # noqa
from plone.app.testing import ROBOT_TEST_LEVEL
from plone.testing import layered
import os
import robotsuite
import unittest
def test_suite():
suite = unittest.TestSuite()
current_dir = os.path.abspath(os.path.dirname(__file__))
robot_dir = os.path.join(current_dir, 'robot')
robot_tests = [
os.path.join('robot', doc) for doc in os.listdir(robot_dir)
if doc.endswith('.robot') and doc.startswith('test_')
]
for robot_test in robot_tests:
robottestsuite = robotsuite.RobotTestSuite(robot_test)
robottestsuite.level = ROBOT_TEST_LEVEL
suite.addTests([
layered(
robottestsuite,
layer=plonetheme.kasteeldehaar_ACCEPTANCE_TESTING
),
])
return suite
| [
"[email protected]"
] | |
9d2fa34a66d6dbc7159a496377e64a378cf8bf8a | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /P.O.R.-master/pirates/npc/BossAI.py | 20a0702863d316eba876ab90f147f6b3362cec96 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from direct.distributed import DistributedObjectAI
class BossAI(DistributedObjectAI.DistributedObjectAI):
def ___init___(self, air):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
def announceGenerate(self):
DistributedObjectAI.DistributedObjectAI.announceGenerate(self)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
def delete(self):
DistributedObjectAI.DistributedObjectAI.delete(self)
def disable(self):
DistributedObjectAI.DistributedObjectAI.disable(self) | [
"[email protected]"
] | |
d64822947bd318ef999a252c8a3923e1a6f107a8 | f654f5f07dd8109c0ee31ba89dd4804e6b288343 | /src/programy/utils/oob/email.py | 7ce0678ee5141d87a71f7ce51bfe85ecb32f96c5 | [
"MIT"
] | permissive | sprinteroz/program-y | 3d1f5f28e4f3be770705d4bef15410b8b78f19da | 454c6bde225dce7c3fb01c549d46249248caf7b5 | refs/heads/master | 2021-01-19T16:05:25.636700 | 2017-08-22T03:56:33 | 2017-08-22T03:56:33 | 100,986,551 | 1 | 0 | null | 2017-08-21T19:43:43 | 2017-08-21T19:43:43 | null | UTF-8 | Python | false | false | 1,220 | py | import logging
import xml.etree.ElementTree as ET
from programy.utils.oob.oob import OutOfBandProcessor
"""
<oob>
<email>
<to>recipient</to>
<subject>subject text</subject>
<body>body text</body>
</email>
</oob>
"""
class EmailOutOfBandProcessor(OutOfBandProcessor):
def __init__(self):
OutOfBandProcessor.__init__(self)
self._to = None
self._subject = None
self._body = None
def parse_oob_xml(self, oob: ET.Element):
for child in oob:
if child.tag == 'to':
self._to = child.text
elif child.tag == 'subject':
self._subject = child.text
elif child.tag == 'body':
self._body = child.text
else:
logging.error ("Unknown child element [%s] in email oob"%(child.tag))
if self._to is not None and \
self._subject is not None and \
self._body is not None:
return True
logging.error("Invalid email oob command")
return False
def execute_oob_command(self, bot, clientid):
logging.info("EmailOutOfBandProcessor: Emailing=%s", self._to)
return "EMAIL"
| [
"[email protected]"
] | |
f5db9f6c7200aa09c359fa4156c99124cbaf9b9a | a7e09640c081cf858f30c3cc3fe2d6ffc986eb7c | /gui/system/migrations/0008_auto_20170906_2335.py | ead8a168e06533db3ffd002005ee3b25fcc68f3b | [] | no_license | cbwest3/freenas | 3fbeffe66c78a375843f138afd1ee306954a9c87 | 9947174014dd740145d540f03c1849a851f3b6e7 | refs/heads/master | 2021-04-30T13:59:53.975592 | 2018-02-12T05:25:55 | 2018-02-12T05:25:55 | 121,202,118 | 1 | 0 | null | 2018-02-12T05:01:39 | 2018-02-12T05:01:38 | null | UTF-8 | Python | false | false | 869 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-06 23:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0007_auto_201708211858'),
]
operations = [
migrations.AddField(
model_name='certificate',
name='cert_san',
field=models.TextField(blank=True, help_text='Multi-domain support. Enter additional space separated domains', null=True, verbose_name='Subject Alternate Names'),
),
migrations.AddField(
model_name='certificateauthority',
name='cert_san',
field=models.TextField(blank=True, help_text='Multi-domain support. Enter additional space separated domains', null=True, verbose_name='Subject Alternate Names'),
),
]
| [
"[email protected]"
] | |
df93f65d97755a6b38917bb204e856bbf90a7efd | a0947c2778742aec26b1c0600ceca17df42326cd | /Python/PythonInADay2/CSV-Files-Drill/37of79-119.py | 7707852ef6e09fc8a3743fa9851f02ce0b0f3c43 | [] | no_license | JohnCDunn/Course-Work-TTA | 5758319d4607114914ba9723328658bed8fb2024 | 8c4f60d51007dac2ac4cceb84b0f9666e143c0d7 | refs/heads/master | 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,560 | py | import wx, db_program
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(800,600))
panel = wx.Panel(self)
# Creating the menu bar
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
exitItem = fileMenu.Append(wx.NewId(), "Exit")
menuBar.Append(fileMenu, "File")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.exitProgram, exitItem)
self.CreateStatusBar()
# Setup Add New Character UI
# Create static box
wx.StaticBox(panel, label='Add a new character', pos=(20,40), size=(280,190))
# Text for name, gender etc
wx.StaticText(panel, label='Name:', pos=(30,70))
wx.StaticText(panel, label='Gender:', pos=(30,110))
wx.StaticText(panel, label='Age:', pos=(30,150))
wx.StaticText(panel, label='Occupation:', pos=(30,190))
# Single line text boxes
self.sName = wx.TextCtrl(panel, size=(150, -1), pos=(130,70))
self.sGen = wx.TextCtrl(panel, size=(150, -1), pos=(130,110))
self.sAge = wx.SpinCtrl(panel, value='0', pos=(130, 150), size=(70, 25))
self.sOcc = wx.TextCtrl(panel, size=(150, -1), pos=(130,190))
# Save button
save = wx.Button(panel, label="Add Character", pos=(100, 230))
save.Bind(wx.EVT_BUTTON, self.addCharacter)
# Setup the Table UI
# Setup table as listCtrl
self.listCtrl = wx.ListCtrl(panel, size=(400,400), pos=(350,40), style=wx.LC_REPORT |wx.BORDER_SUNKEN)
# Add columns to listCtrl
self.listCtrl.InsertColumn(0, "ID")
self.listCtrl.InsertColumn(1, "Name")
self.listCtrl.InsertColumn(2, "Gender")
self.listCtrl.InsertColumn(3, "Age")
self.listCtrl.InsertColumn(4, "Occupation")
# Add data to the list control
self.fillListCtrl()
# Run onSelect function when item is selected
self.listCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onSelect)
# Setup a delete button
deleteBtn = wx.Button(panel, label="Delete", pos=(640, 450))
# Bind delete button to onDelete function
deleteBtn.Bind(wx.EVT_BUTTON, self.onDelete)
# Setup Update Character UI
# Create static box
wx.StaticBox(panel, label='Update a character', pos=(20,340), size=(280,190))
# Text for name, gender etc
wx.StaticText(panel, label='Name:', pos=(30,370))
wx.StaticText(panel, label='Gender:', pos=(30,410))
wx.StaticText(panel, label='Age:', pos=(30,450))
wx.StaticText(panel, label='Occupation:', pos=(30,490))
# Single line text boxes
self.sNameU = wx.TextCtrl(panel, size=(150, -1), pos=(130,370))
self.sGenU = wx.TextCtrl(panel, size=(150, -1), pos=(130,410))
self.sAgeU = wx.SpinCtrl(panel, value='0', pos=(130, 450), size=(70, 25))
self.sOccU = wx.TextCtrl(panel, size=(150, -1), pos=(130,490))
# Save button
saveUpdate = wx.Button(panel, label="Update Character", pos=(100, 530))
saveUpdate.Bind(wx.EVT_BUTTON, self.updateCharacter)
def addCharacter(self, event):
name = self.sName.GetValue()
gen = self.sGen.GetValue()
age = self.sAge.GetValue()
occ = self.sOcc.GetValue()
# Checking if variables have a value
if (name == '') or (gen == '') or (age == '') or (occ == ''):
# Alert user that a variable is empty
dlg = wx.MessageDialog(None, \
'Some character details are missing. Enter values in each text box.', \
'Missing Details', wx.OK)
dlg.ShowModal()
dlg.Destroy()
return False
# Adding character to database
db_program.newCharacter(name, gen, age, occ)
print db_program.viewAll()
# Empty text boxes when finished.
self.sName.Clear()
self.sGen.Clear()
self.sOcc.Clear()
self.sAge.SetValue(0)
# Update list control
self.fillListCtrl()
def exitProgram(self, event):
self.Destroy()
def fillListCtrl(self):
# Get data from the database
self.allData = db_program.viewAll()
# Delete old data before adding new data
self.listCtrl.DeleteAllItems()
# Append data to the table
for row in self.allData:
# Loop though and append data
self.listCtrl.Append(row)
def onDelete(self, event):
# Delete the character
db_program.deleteCharacter(self.selectedId)
# Refresh the table
self.fillListCtrl()
def onSelect(self, event):
# Get the id of the selected row
self.selectedId = event.GetText()
# Get index of selected row
index = event.GetIndex()
# Get character info
charInfo = self.allData[index]
print charInfo
# Set value of update text boxes
self.sNameU.SetValue(charInfo[1])
self.sGenU.SetValue(charInfo[2])
self.sAgeU.SetValue(charInfo[3])
self.sOccU.SetValue(charInfo[4])
def updateCharacter(self, event):
pass
app = wx.App()
frame = Frame("Python GUI")
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
1cb1a5a7d714a543f49ebabf54a2b5ea99009cc6 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DFL260-MIB.py | 125dad8d74ee966bee6ebfc13c5c740e0dec552b | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 96,348 | py | #
# PySNMP MIB module DFL260-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DFL260-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:42:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
iso, Unsigned32, IpAddress, Integer32, TimeTicks, Counter64, enterprises, Counter32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Gauge32, MibIdentifier, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Unsigned32", "IpAddress", "Integer32", "TimeTicks", "Counter64", "enterprises", "Counter32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Gauge32", "MibIdentifier", "NotificationType", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
dfl260_MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 1, 2)).setLabel("dfl260-MIB")
dfl260_MIB.setRevisions(('2010-09-02 11:39', '2010-03-30 09:00', '2009-11-10 09:16', '2008-11-18 16:05', '2008-10-14 12:27', '2007-10-31 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: dfl260_MIB.setRevisionsDescriptions(('Added 64-bit counters', 'Added values for SMTP ALG objects.', 'Added values for opened and closed connections per second', 'Added value for timer usage', 'Added values for memory usage and TCP buffer usage', 'Initial version.',))
if mibBuilder.loadTexts: dfl260_MIB.setLastUpdated('201009021139Z')
if mibBuilder.loadTexts: dfl260_MIB.setOrganization('D-Link Corporation')
if mibBuilder.loadTexts: dfl260_MIB.setContactInfo('Postal: D-Link Corporation No. 289, Sinhu 3rd Road, Neihu District, Taipei City 114, Taiwan, R.O.C. Tel: +886-2-66000123 Fax: +886-2-55509988')
if mibBuilder.loadTexts: dfl260_MIB.setDescription('The MIB module for D-Link DFL-260 series product.')
dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 171))
netdefendMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20))
utmFirewall = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2))
dfl260 = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1))
dfl260OS = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1))
dfl260OSStats = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2))
dfl260reg = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2))
dfl260MibModules = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 1))
dfl260MibConfs = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 2))
dfl260MibObjectGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3))
dfl260System = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1))
dfl260SysCpuLoad = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysCpuLoad.setStatus('current')
if mibBuilder.loadTexts: dfl260SysCpuLoad.setDescription('The system cpu load.')
dfl260SysForwardedBits = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysForwardedBits.setStatus('current')
if mibBuilder.loadTexts: dfl260SysForwardedBits.setDescription('The number of bits forwarded through the gateway.')
dfl260SysForwardedPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysForwardedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260SysForwardedPackets.setDescription('Total number of forwarded packets.')
dfl260SysBuffUse = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysBuffUse.setStatus('current')
if mibBuilder.loadTexts: dfl260SysBuffUse.setDescription('The current number of buffers in use.')
dfl260SysConns = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysConns.setStatus('current')
if mibBuilder.loadTexts: dfl260SysConns.setDescription('The numer of connections.')
dfl260SysPerStateCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6))
dfl260SysPscTcpSyn = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscTcpSyn.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscTcpSyn.setDescription('Number of TCP connections in the SYN state.')
dfl260SysPscTcpOpen = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscTcpOpen.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscTcpOpen.setDescription('Number of TCP connections in the OPEN state.')
dfl260SysPscTcpFin = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscTcpFin.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscTcpFin.setDescription('Number of TCP connections in the FIN state.')
dfl260SysPscUdp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscUdp.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscUdp.setDescription('Number of UDP connections.')
dfl260SysPscIcmp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscIcmp.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscIcmp.setDescription('Number of ICMP connections.')
dfl260SysPscOther = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscOther.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscOther.setDescription('Number of other connections.')
dfl260IfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7), )
if mibBuilder.loadTexts: dfl260IfStatsTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsTable.setDescription('A table of DFL-260 specific interfaces statistics')
dfl260IfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfStatsIndex"))
if mibBuilder.loadTexts: dfl260IfStatsEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsEntry.setDescription('The row in a table of DFL-260 specific interface statistics')
dfl260IfStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfStatsIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsIndex.setDescription('Index of a row in dfl260SysIfStatsTable')
dfl260IfName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfName.setStatus('current')
if mibBuilder.loadTexts: dfl260IfName.setDescription('The name of the interface.')
dfl260IfFragsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfFragsIn.setStatus('current')
if mibBuilder.loadTexts: dfl260IfFragsIn.setDescription('Number of IP packet fragments received in the interface.')
dfl260IfFragReassOk = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfFragReassOk.setStatus('current')
if mibBuilder.loadTexts: dfl260IfFragReassOk.setDescription('Number of complete IP packets successfully reassembled from the fragments received in the interface.')
dfl260IfFragReassFail = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfFragReassFail.setStatus('current')
if mibBuilder.loadTexts: dfl260IfFragReassFail.setDescription('Number of packets that could not be reassembled, either due to resource starvation, illegal fragmentation, or just packet loss.')
dfl260IfPktsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfPktsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfPktsInCnt.setDescription('Number of packets received by the interface.')
dfl260IfPktsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfPktsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfPktsOutCnt.setDescription('Number of packets sent by the interface')
dfl260IfBitsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfBitsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfBitsInCnt.setDescription('Number of bits received by the interface')
dfl260IfBitsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfBitsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfBitsOutCnt.setDescription('Number of bits sent by the interface')
dfl260IfPktsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfPktsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfPktsTotCnt.setDescription('Totat number of packets transmited by the interface')
dfl260IfBitsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfBitsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfBitsTotCnt.setDescription('Totat number of bits transmited by the interface')
dfl260IfHCPktsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCPktsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCPktsInCnt.setDescription('Number of packets received by the interface. This object is a 64-bit version of dfl260IfPktsInCnt.')
dfl260IfHCPktsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCPktsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCPktsOutCnt.setDescription('Number of packets sent by the interface. This object is a 64-bit version of dfl260IfPktsOutCnt.')
dfl260IfHCBitsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCBitsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCBitsInCnt.setDescription('Number of bits received by the interface. This object is a 64-bit version of dfl260IfBitsInCnt.')
dfl260IfHCBitsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCBitsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCBitsOutCnt.setDescription('Number of bits sent by the interface. This object is a 64-bit version of dfl260IfBitsOutCnt.')
dfl260IfHCPktsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCPktsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCPktsTotCnt.setDescription('Totat number of packets transmited by the interface. This object is a 64-bit version of dfl260IfPktsTotCnt.')
dfl260IfHCBitsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCBitsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCBitsTotCnt.setDescription('Totat number of bits transmited by the interface. This object is a 64-bit version of dfl260IfBitsTotCnt.')
dfl260IfRxRingTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8), )
if mibBuilder.loadTexts: dfl260IfRxRingTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingTable.setDescription('A table of DFL-260 specific interface Rx ring statistics')
dfl260IfRxRingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfRxRingIndex"))
if mibBuilder.loadTexts: dfl260IfRxRingEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingEntry.setDescription('The row in a table of DFL-260 specific interface Rx ring statistics.')
dfl260IfRxRingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfRxRingIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingIndex.setDescription('Index of a row in dfl260IfRxRingTable.')
dfl260IfRxRingFifoErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxRingFifoErrors.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingFifoErrors.setDescription('Rx Ring number of FIFO errors.')
dfl260IfRxDespools = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxDespools.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxDespools.setDescription('Number of despool events per second.')
dfl260IfRxAvgUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxAvgUse.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxAvgUse.setDescription('Rx Ring average usage.')
dfl260IfRxRingSaturation = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxRingSaturation.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingSaturation.setDescription('Rx Ring sturation. Percentage of ring use per despool event when the ring has been more than half full.')
dfl260RxRingFlooded = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RxRingFlooded.setStatus('current')
if mibBuilder.loadTexts: dfl260RxRingFlooded.setDescription('Rx Ring number of despool events for which the ring has been completely flooded')
dfl260IfTxRingTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9), )
if mibBuilder.loadTexts: dfl260IfTxRingTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingTable.setDescription('A table of DFL-260 specific interface Tx ring statistics')
dfl260IfTxRingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfTxRingIndex"))
if mibBuilder.loadTexts: dfl260IfTxRingEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingEntry.setDescription('The row in a table of DFL-260 specific interface Tx ring statistics.')
dfl260IfTxRingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfTxRingIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingIndex.setDescription('Index of a row in dfl260IfRxRingTable.')
dfl260IfTxDespools = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfTxDespools.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxDespools.setDescription('Tx Ring number of despool event per second (polls when there is at least one buffer in the ring)')
dfl260IfTxAvgUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfTxAvgUse.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxAvgUse.setDescription('Tx Ring number of despool events when the ring has been completely flooded')
dfl260IfTxRingSaturation = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfTxRingSaturation.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingSaturation.setDescription('Tx Ring percentage of use per despool event when the ring has been more than half full.')
dfl260RxTingFlooded = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RxTingFlooded.setStatus('current')
if mibBuilder.loadTexts: dfl260RxTingFlooded.setDescription('Tx Ring number of despool events for in which the ring has been completely flooded')
dfl260IfVlanStatsTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10), )
if mibBuilder.loadTexts: dfl260IfVlanStatsTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanStatsTable.setDescription('A table of DFL-260 VLAN statistics')
dfl260IfVlanStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfVlanIndex"))
if mibBuilder.loadTexts: dfl260IfVlanStatsEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanStatsEntry.setDescription('The row in a table of dfl260IfVlanStatsTable. Each has an index equal to the ifIndex of the corresponding physical interface')
dfl260IfVlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfVlanIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanIndex.setDescription('Extended index of a row in dfl260IfVlanStatsTable.')
dfl260IfVlanUntaggedInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInPkts.setDescription('Number of untaged packets untagged packets received by the interface.')
dfl260IfVlanUntaggedOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutPkts.setDescription('Number of untagged packets sent by the interface.')
dfl260IfVlanUntaggedTotPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotPkts.setDescription('Total number of untagged packets processed by the interface.')
dfl260IfVlanUntaggedInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInOctets.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInOctets.setDescription('Total number of octects in untagged packets received by the interface.')
dfl260IfVlanUntaggedOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutOctets.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutOctets.setDescription('Total number of octects in untagged packets sent by the interface.')
dfl260IfVlanUntaggedTotOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotOctets.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotOctets.setDescription('Total number of octects in untagged packets processed by the interface.')
dfl260HWSensorTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11), )
if mibBuilder.loadTexts: dfl260HWSensorTable.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorTable.setDescription('Table of hardware sensors.')
dfl260HWSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260HWSensorIndex"))
if mibBuilder.loadTexts: dfl260HWSensorEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorEntry.setDescription('Entry of table of hardware sensors.')
dfl260HWSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260HWSensorIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorIndex.setDescription('Index of the entries of the sensor table.')
dfl260HWSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HWSensorName.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorName.setDescription('The description of the sensor.')
dfl260HWSensorValue = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HWSensorValue.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorValue.setDescription('The value of the sensor.')
dfl260HWSensorUnit = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HWSensorUnit.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorUnit.setDescription('The description of the unit of the value mesured by sensor.')
dfl260SysMemUsage = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysMemUsage.setStatus('current')
if mibBuilder.loadTexts: dfl260SysMemUsage.setDescription('The current memory usage.')
dfl260SysTCPUsage = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13))
dfl260SysTCPRecvSmall = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPRecvSmall.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPRecvSmall.setDescription('Small TCP receive windows usage.')
dfl260SysTCPRecvLarge = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPRecvLarge.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPRecvLarge.setDescription('Large TCP receive windows usage.')
dfl260SysTCPSendSmall = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPSendSmall.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPSendSmall.setDescription('Small TCP send windows usage.')
dfl260SysTCPSendLarge = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPSendLarge.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPSendLarge.setDescription('Large TCP send windows usage.')
dfl260SysTimerUsage = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTimerUsage.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTimerUsage.setDescription('The current number of timers in use.')
dfl260SysConnOPS = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysConnOPS.setStatus('current')
if mibBuilder.loadTexts: dfl260SysConnOPS.setDescription('The number of connections opened per second.')
dfl260SysConnCPS = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysConnCPS.setStatus('current')
if mibBuilder.loadTexts: dfl260SysConnCPS.setDescription('The number of connections closed per second.')
dfl260SysHCForwardedBits = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysHCForwardedBits.setStatus('current')
if mibBuilder.loadTexts: dfl260SysHCForwardedBits.setDescription('The number of bits forwarded through the gateway.')
dfl260VPN = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2))
dfl260IPsec = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1))
dfl260IPsecGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1))
dfl260IPsecPhaseOneActive = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneActive.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneActive.setDescription('Number of Phase-1 active negotiations')
dfl260IPsecPhaseOneAggrModeDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneAggrModeDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneAggrModeDone.setDescription('Number of Phase-1 aggressive mode negotiations.')
dfl260IPsecQuickModeActive = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecQuickModeActive.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecQuickModeActive.setDescription('Number of quick mode active negotiations.')
dfl260IPsecPhaseOneDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneDone.setDescription('Number of Phase-1 negotiations done.')
dfl260IPsecPhaseOneFailed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneFailed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneFailed.setDescription('Number of Phase-1 negotiations failed.')
dfl260IPsecPhaseOneRekeyed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneRekeyed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneRekeyed.setDescription('Number of Phase-1 negotiations rekeyed.')
dfl260IPsecQuickModeDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecQuickModeDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecQuickModeDone.setDescription('Number of quick mode negotiations done.')
dfl260IPsecQuickModeFailed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecQuickModeFailed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecQuickModeFailed.setDescription('Number of quick mode negotiations failed.')
dfl260IPsecInfoDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInfoDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInfoDone.setDescription('Number of informational exchanges done. (Not available in IKEv1 implementations)')
dfl260IPsecInfoFailed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInfoFailed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInfoFailed.setDescription('Number of informational exchanges failed. (Not available in IKEv1 implementations)')
dfl260IPsecInOctetsComp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInOctetsComp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInOctetsComp.setDescription('Total octets in before decompression.')
dfl260IPsecInOctetsUncomp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInOctetsUncomp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInOctetsUncomp.setDescription('Total octets in after decompression.')
dfl260IPsecOutOctetsComp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutOctetsComp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutOctetsComp.setDescription('Total octets out after compression.')
dfl260IPsecOutOctetsUncomp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutOctetsUncomp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutOctetsUncomp.setDescription('Total octets out before compression.')
dfl260IPsecForwardedOctetsComp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsComp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsComp.setDescription('Total octets forwarded after compression.')
dfl260IPsecForwardedOctetsUcomp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsUcomp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsUcomp.setDescription('Total octets forwarded before compression.')
dfl260IPsecInPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInPackets.setDescription('Total packets in.')
dfl260IPsecOutPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutPackets.setDescription('Total packets Out.')
dfl260IPsecForwardedPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecForwardedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecForwardedPackets.setDescription('Total packets forwarded.')
dfl260IPsecActiveTransforms = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 20), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecActiveTransforms.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecActiveTransforms.setDescription('Number of currently active transforms.')
dfl260IPsecTotalTransforms = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecTotalTransforms.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecTotalTransforms.setDescription('Total number of transform records created.')
dfl260IPsecOutOfTransforms = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutOfTransforms.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutOfTransforms.setDescription('Number of packets dropped due to no available transform object. (Not available in IKEv1 implementations)')
dfl260IPsecTotalRekeys = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecTotalRekeys.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecTotalRekeys.setDescription('Total number of rekeys performed.')
dfl260Rules = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3))
dfl260RuleUseTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2), )
if mibBuilder.loadTexts: dfl260RuleUseTable.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUseTable.setDescription('A list of general rules usage statistics.')
dfl260RuleUseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260RuleIndex"))
if mibBuilder.loadTexts: dfl260RuleUseEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUseEntry.setDescription('The statistics over a rule usage.')
dfl260RuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260RuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleIndex.setDescription('The rule usage index.')
dfl260RuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleName.setDescription('The name of the rule.')
dfl260RuleUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RuleUse.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUse.setDescription('The number of times o rule was used.')
dfl260IPPools = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4))
dfl260IPPoolsNumber = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolsNumber.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolsNumber.setDescription('The number of ip pools')
dfl260IPPoolTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2), )
if mibBuilder.loadTexts: dfl260IPPoolTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolTable.setDescription('A list of IP pools')
dfl260IPPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IPPoolIndex"))
if mibBuilder.loadTexts: dfl260IPPoolEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolEntry.setDescription('The attributes of an ip pool')
dfl260IPPoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IPPoolIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolIndex.setDescription('The ip pool index')
dfl260IPPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolName.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolName.setDescription('The ip pool name')
dfl260IPPoolPrepare = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolPrepare.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolPrepare.setDescription('Number of IP pool objects in prepare mode.')
dfl260IPPoolFree = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolFree.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolFree.setDescription('Number of available IPs in the pool.')
dfl260IPPoolMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolMisses.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolMisses.setDescription('Mumber of missed IP pool negotiations for other reasons than lack of available IP numbers.')
dfl260IPPoolClientFails = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolClientFails.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolClientFails.setDescription('Number of failed IP pool transactions.')
dfl260IPPoolUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolUsed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolUsed.setDescription('Number of IP numbers in use from the pool.')
dfl260DHCPServer = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5))
dfl260DHCPTotalRejected = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPTotalRejected.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPTotalRejected.setDescription('Total number of rejected packets (all rules).')
dfl260DHCPRuleTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2), )
if mibBuilder.loadTexts: dfl260DHCPRuleTable.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleTable.setDescription('A list of all DHCP server rules usage statistics.')
dfl260DHCPRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260DHCPRuleIndex"))
if mibBuilder.loadTexts: dfl260DHCPRuleEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleEntry.setDescription('The attributes of a DHCP server rule statistics.')
dfl260DHCPRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260DHCPRuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleIndex.setDescription('The DHCP server rule index')
dfl260DHCPRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleName.setDescription('The DHCP server rule name.')
dfl260DHCPRuleUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRuleUsage.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleUsage.setDescription('Number of used IPs in the pool.')
dfl260DHCPRuleUsagePercent = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRuleUsagePercent.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleUsagePercent.setDescription('The percentage of the used IPs in relation to the IP pool size.')
dfl260DHCPActiveClients = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPActiveClients.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPActiveClients.setDescription('Number of currently active clients.')
dfl260DHCPActiveClientsPercent = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPActiveClientsPercent.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPActiveClientsPercent.setDescription('The number of currently active clients as a percentage of the pool size.')
dfl260DHCPRejectedRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRejectedRequests.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRejectedRequests.setDescription('Number of rejected requests matching the current rule.')
dfl260DHCPTotalLeases = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPTotalLeases.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPTotalLeases.setDescription('Total number of leases in the pool.')
dfl260UserAuth = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6))
dfl260UserAuthHTTPUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthHTTPUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthHTTPUsers.setDescription('Number of currently logged in HTTP users.')
dfl260UserAuthXAUTHUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthXAUTHUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthXAUTHUsers.setDescription('Number of currently logged in XAUTH users.')
dfl260UserAuthHTTPSUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthHTTPSUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthHTTPSUsers.setDescription('Number of currently logged in HTTPS users.')
dfl260UserAuthPPPUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthPPPUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthPPPUsers.setDescription('Number of currently logged in PPP users.')
dfl260UserAuthEAPUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthEAPUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthEAPUsers.setDescription('Number of currently logged in EAP users.')
dfl260UserAuthRuleUseTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6), )
if mibBuilder.loadTexts: dfl260UserAuthRuleUseTable.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleUseTable.setDescription('Table of user authentication rule usage.')
dfl260UserAuthRuleUseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260UserAuthRuleIndex"))
if mibBuilder.loadTexts: dfl260UserAuthRuleUseEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleUseEntry.setDescription('The attributes of an authentication rule usage statistics.')
dfl260UserAuthRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260UserAuthRuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleIndex.setDescription('The rule usage index.')
dfl260UserAuthRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthRuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleName.setDescription('The name of the rule.')
dfl260UserAuthRuleUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthRuleUse.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleUse.setDescription('The number of times o rule was used.')
dfl260LinkMonitor = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7))
dfl260LinkMonGrp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonGrp.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrp.setDescription('The number of groups of monitored links')
dfl260LinkMonGrpTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2), )
if mibBuilder.loadTexts: dfl260LinkMonGrpTable.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpTable.setDescription('Table of link monitor groups.')
dfl260LinkMonGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260LinkMonGrpIndex"))
if mibBuilder.loadTexts: dfl260LinkMonGrpEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpEntry.setDescription('The attributes of a link monitor group')
dfl260LinkMonGrpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260LinkMonGrpIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpIndex.setDescription('The index row in the table of link monitor groups.')
dfl260LinkMonGrpName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonGrpName.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpName.setDescription('The link monitor group name.')
dfl260LinkMonGrpHostsUp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonGrpHostsUp.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpHostsUp.setDescription('The percentage of monitored hosts available.')
dfl260LinkMonHostTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3), )
if mibBuilder.loadTexts: dfl260LinkMonHostTable.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostTable.setDescription('Table of link monitored hosts in a link monnitor group.')
dfl260LinkMonHostEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260LinkMonGrpIndex"), (0, "DFL260-MIB", "dfl260LinkMonHostIndex"))
if mibBuilder.loadTexts: dfl260LinkMonHostEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostEntry.setDescription('The attributes of a monitored host.')
dfl260LinkMonHostIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260LinkMonHostIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostIndex.setDescription('The index an host in the table of link monitor hosts.')
dfl260LinkMonHostId = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonHostId.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostId.setDescription('The monitored host identifier.')
dfl260LinkMonHostShortTermLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonHostShortTermLoss.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostShortTermLoss.setDescription('The percentage of short term losst packets.')
dfl260LinkMonHostPacketsLost = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonHostPacketsLost.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostPacketsLost.setDescription('Total number of lost monitoring packets.')
dfl260Pipes = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8))
dfl260PipeUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeUsers.setDescription('The current number of users, as defined by the grouping settings of each pipe, being tracked in the pipes system. Note that this value corresponds to the number of users active in each time slice of 1/20th of a second, and not to the number of users having open connections.')
dfl260PipeTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2), )
if mibBuilder.loadTexts: dfl260PipeTable.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeTable.setDescription('Table of pipes')
dfl260PipeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260PipeIndex"))
if mibBuilder.loadTexts: dfl260PipeEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeEntry.setDescription('A entry of the pipes table')
dfl260PipeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260PipeIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeIndex.setDescription('The pipe index')
dfl260PipeName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeName.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeName.setDescription('The name of the pipe')
dfl260PipeMinPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeMinPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeMinPrec.setDescription('The minimum of the range of pipe precedences.')
dfl260PipeMaxPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeMaxPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeMaxPrec.setDescription('The maximum of the range of pipe precedences.')
dfl260PipeDefPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeDefPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeDefPrec.setDescription('The precedence assigned to a packet for which has not one allready done by a Pipe Rule.')
dfl260PipeNumPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeNumPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeNumPrec.setDescription('The number of pipe precedences')
dfl260PipeNumUsers = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeNumUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeNumUsers.setDescription('The current number of users, as defined by the grouping settings of each pipe, being tracked in the pipes system. This value corresponds to the number of users active in each time slice and not to the number of users having open connections.')
dfl260PipeCurrentBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeCurrentBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeCurrentBps.setDescription('The current throughput of a pipe, in bits per second, as a sum of the corresponding values for all precedences.')
dfl260PipeCurrentPps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeCurrentPps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeCurrentPps.setDescription('The current throughput of a pipe, in packets per second, as a sum of the corresponding values for all precedences.')
dfl260PipeDelayedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeDelayedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeDelayedPackets.setDescription('The number of times packets have been delayed as a result of a pipe, or pipe user having used up its allotted bandwidth. Note that one single packet may be delayed several times; if a pipe is really full, this count may exceed the number of packets actually passing through the pipe.')
dfl260PipeDropedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeDropedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeDropedPackets.setDescription('The number of packets dropped by a pipe. Packets are dropped when CorePlus is running out of packet buffers. This occurs when excessive amounts of packets need to be queued for later delivery. The packet dropped is always the one that has been queued the longest time globally, which means that the connection suffering from packet loss will be the one most overloading the system.')
dfl260PipePrecTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3), )
if mibBuilder.loadTexts: dfl260PipePrecTable.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecTable.setDescription('There is a one to many relation between a pipe and its precedences. The number of precedences is a instance attribute of each pipe. This table extends the pipes table in order to express the relation between a pipe and the respective precedences.')
dfl260PipePrecEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260PipeIndex"), (0, "DFL260-MIB", "dfl260PipePrecIndex"))
if mibBuilder.loadTexts: dfl260PipePrecEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecEntry.setDescription('An entry of the table of pipe pecedences. These table entries are chracterized by been indexed by two values. The first index the same as the pipe index of the corresponding row in table of pipes and the second index is the index of the set of precedences of the corresponding pipe.')
dfl260PipePrecIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260PipePrecIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecIndex.setDescription('Index of a precedence of a specific pipe. This is the second index of the entries of pipe precedence table described by object dfl260PipePrecEntry.')
dfl260PipePrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrec.setDescription('The precedence value')
dfl260PipePrecBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecBps.setDescription('The current throughput of the pipe, in bits per second, with the corresponding precedence.')
dfl260PipePrecTotalPps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecTotalPps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecTotalPps.setDescription('The current throughput of the pipe precedence, in packets per second.')
dfl260PipePrecReservedBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecReservedBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecReservedBps.setDescription('The current bandwidth allocated to the precedence.')
dfl260PipePrecDynLimBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDynLimBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDynLimBps.setDescription('The current bandwidth limit limit applied the precedence.')
dfl260PipePrecDynUsrLimBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDynUsrLimBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDynUsrLimBps.setDescription('The current precedence bandwidth limit per user of the pipe.')
dfl260PipePrecDelayedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDelayedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDelayedPackets.setDescription('The number of times packets have been delayed as a result of a precedence, or pipe user having used up its allotted bandwidth. Note that one single packet may be delayed several times; if a pipe is really full, this count may exceed the number of packets of this precedence actually passing through the pipe.')
dfl260PipePrecDropedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDropedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDropedPackets.setDescription('The number of pipe dropped packets with the corresponding precedence.')
dfl260ALG = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9))
dfl260AlgSessions = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260AlgSessions.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgSessions.setDescription('Total ALG sessions')
dfl260AlgConnections = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260AlgConnections.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgConnections.setDescription('Total ALG connections')
dfl260AlgTCPStreams = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260AlgTCPStreams.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgTCPStreams.setDescription('Total ALG TCP streams')
dfl260HttpAlg = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4))
dfl260HttpAlgTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1), )
if mibBuilder.loadTexts: dfl260HttpAlgTable.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTable.setDescription('Table of HTTP ALG objects.')
dfl260HttpAlgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260HttpAlgIndex"))
if mibBuilder.loadTexts: dfl260HttpAlgEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgEntry.setDescription('A row of the table of HTTP ALG objects.')
dfl260HttpAlgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260HttpAlgIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgIndex.setDescription('The index of an entry of the Table of HTTP ALG objects.')
dfl260HttpAlgName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgName.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgName.setDescription('The name of an HTTP ALG object.')
dfl260HttpAlgTotalRequested = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgTotalRequested.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTotalRequested.setDescription('Total number of URL requests.')
dfl260HttpAlgTotalAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgTotalAllowed.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTotalAllowed.setDescription('Total number of allowed URL requests.')
dfl260HttpAlgTotalBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgTotalBlocked.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTotalBlocked.setDescription('Total number of blocked URL requests.')
dfl260HttpAlgCntFltTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2), )
if mibBuilder.loadTexts: dfl260HttpAlgCntFltTable.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltTable.setDescription('Table of HTTP ALG content filtering rules.')
dfl260HttpAlgCntFltEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260HttpAlgIndex"), (0, "DFL260-MIB", "dfl260HttpAlgCntFltIndex"))
if mibBuilder.loadTexts: dfl260HttpAlgCntFltEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltEntry.setDescription('Entry of the table of HTTP ALG content filtering rules.')
dfl260HttpAlgCntFltIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260HttpAlgCntFltIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltIndex.setDescription('The index of an entry of the Table of HTTP ALG content filtering objects.')
dfl260HttpAlgCntFltName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltName.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltName.setDescription('The name of the a HTTP ALG content fitering.')
dfl260HttpAlgCntFltRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltRequests.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltRequests.setDescription('Total number of URLs intercepted by a content filtering object.')
dfl260HttpAlgCntFltAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltAllowed.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltAllowed.setDescription('Total number of URLs intercepted and allowed by a content filtering object.')
dfl260HttpAlgCntFltBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltBlocked.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltBlocked.setDescription('Total number of URLs intercepted and blocked by a content filtering object.')
dfl260SmtpAlg = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5))
dfl260SmtpAlgTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1), )
if mibBuilder.loadTexts: dfl260SmtpAlgTable.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTable.setDescription('Table of SMTP ALG objects.')
dfl260SmtpAlgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260SmtpAlgIndex"))
if mibBuilder.loadTexts: dfl260SmtpAlgEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgEntry.setDescription('A row of the table of SMTP ALG objects.')
dfl260SmtpAlgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260SmtpAlgIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgIndex.setDescription('The index of an entry of the Table of SMTP ALG objects.')
dfl260SmtpAlgName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgName.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgName.setDescription('The name of an SMTP ALG object.')
dfl260SmtpAlgTotCheckedSes = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgTotCheckedSes.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTotCheckedSes.setDescription('Total sessions checked by the SMTP ALG of corresponding index.')
dfl260SmtpAlgTotSpamSes = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgTotSpamSes.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTotSpamSes.setDescription('Total spam sessions detected by the SMTP ALG of corresponding index.')
dfl260SmtpAlgTotDroppedSes = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgTotDroppedSes.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTotDroppedSes.setDescription('Total deroped sessions for the SMTP ALG of corresponding index.')
dfl260SmtpAlgDnsBlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2), )
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlTable.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlTable.setDescription('Table of SMTP ALG DNS balck list objects.')
dfl260SmtpAlgDnsBlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260SmtpAlgIndex"), (0, "DFL260-MIB", "dfl260SmtpAlgDnsBlIndex"))
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlEntry.setDescription('A row of the table of SMTP ALG DNS black list objects.')
dfl260SmtpAlgDnsBlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlIndex.setDescription('The index of an entry of the SMTP ALG DNS black list objects.')
dfl260SmtpAlgDnsBlName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlName.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlName.setDescription('The SMTP DNS black list name.')
dfl260SmtpAlgDnsBlChecked = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlChecked.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlChecked.setDescription('Total sessions checked againt SMTP ALG DNS black list of corresponding index')
dfl260SmtpAlgDnsBlMatched = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlMatched.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlMatched.setDescription('Total sessions that matched SMTP ALG DNS black list of corresponding index')
dfl260SmtpAlgDnsBlFailChecks = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlFailChecks.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlFailChecks.setDescription('Total failed checks for the SMTP ALG DNS black list of corresponding index')
dfl260DHCPRelay = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11))
dfl260DHCPRelayCurClients = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayCurClients.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayCurClients.setDescription('Total DHCP relay active relayed clients.')
dfl260DHCPRelayCurTrans = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayCurTrans.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayCurTrans.setDescription('Ongoing DHCP relay transactions.')
dfl260DHCPRelayRejected = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRejected.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRejected.setDescription('Total DHCP relay packets rejected.')
dfl260DHCPRelayRuleTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4), )
if mibBuilder.loadTexts: dfl260DHCPRelayRuleTable.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleTable.setDescription('Table of DHCP relay rules.')
dfl260DHCPRelayRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260DHCPRelayRuleIndex"))
if mibBuilder.loadTexts: dfl260DHCPRelayRuleEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleEntry.setDescription('Entry of the table of DHCP relay rules')
dfl260DHCPRelayRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260DHCPRelayRuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleIndex.setDescription('Index of the table of DHCP relay rules.')
dfl260DHCPRelayRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleName.setDescription('Display name of a DHCP relay rule')
dfl260DHCPRelayRuleHits = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleHits.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleHits.setDescription('Number of the times the DHCP relay rule with corresponding index was used.')
dfl260DHCPRelayRuleCurClients = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleCurClients.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleCurClients.setDescription('Number of ctive relayed clients by the DHCP relay rule with corresponding index.')
dfl260DHCPRelayRuleRejCliPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejCliPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejCliPkts.setDescription('Number of client packets rejected by a rule.')
dfl260DHCPRelayRuleRejSrvPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejSrvPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejSrvPkts.setDescription('Number of DHCP server packets rejected by the DHCP relay rule with the corresponding index.')
dfl260HA = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12))
dfl260HASyncSendQueueLength = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendQueueLength.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendQueueLength.setDescription('Size of the queue used for the High Availability sync interface.')
dfl260HASyncSendQueueUsagePkt = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsagePkt.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsagePkt.setDescription('High Availability Sync interface queue usage in number of packets.')
dfl260HASyncSendQueueUsageOct = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsageOct.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsageOct.setDescription('High Availability Sync interface queue usage in number of octects.')
dfl260HASyncSentPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSentPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSentPackets.setDescription('Number High Availability packets sent on Sync.')
dfl260HASyncSendResentPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendResentPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendResentPackets.setDescription('Number of High Availability packets resent on Sync.')
dfl260StatsConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 2, 1))
dfl260StatsRegGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1))
dfl260SystemObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 1)).setObjects(("DFL260-MIB", "dfl260SysCpuLoad"), ("DFL260-MIB", "dfl260SysForwardedBits"), ("DFL260-MIB", "dfl260SysForwardedPackets"), ("DFL260-MIB", "dfl260SysBuffUse"), ("DFL260-MIB", "dfl260SysConns"), ("DFL260-MIB", "dfl260HWSensorName"), ("DFL260-MIB", "dfl260HWSensorValue"), ("DFL260-MIB", "dfl260HWSensorUnit"), ("DFL260-MIB", "dfl260SysMemUsage"), ("DFL260-MIB", "dfl260SysTimerUsage"), ("DFL260-MIB", "dfl260SysConnOPS"), ("DFL260-MIB", "dfl260SysConnCPS"), ("DFL260-MIB", "dfl260SysHCForwardedBits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260SystemObjectGroup = dfl260SystemObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260SystemObjectGroup.setDescription('System statistics Group')
dfl260IPsecObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 2)).setObjects(("DFL260-MIB", "dfl260IPsecPhaseOneActive"), ("DFL260-MIB", "dfl260IPsecPhaseOneAggrModeDone"), ("DFL260-MIB", "dfl260IPsecQuickModeActive"), ("DFL260-MIB", "dfl260IPsecPhaseOneDone"), ("DFL260-MIB", "dfl260IPsecPhaseOneFailed"), ("DFL260-MIB", "dfl260IPsecPhaseOneRekeyed"), ("DFL260-MIB", "dfl260IPsecQuickModeDone"), ("DFL260-MIB", "dfl260IPsecQuickModeFailed"), ("DFL260-MIB", "dfl260IPsecInfoDone"), ("DFL260-MIB", "dfl260IPsecInfoFailed"), ("DFL260-MIB", "dfl260IPsecInOctetsComp"), ("DFL260-MIB", "dfl260IPsecInOctetsUncomp"), ("DFL260-MIB", "dfl260IPsecOutOctetsComp"), ("DFL260-MIB", "dfl260IPsecOutOctetsUncomp"), ("DFL260-MIB", "dfl260IPsecForwardedOctetsComp"), ("DFL260-MIB", "dfl260IPsecForwardedOctetsUcomp"), ("DFL260-MIB", "dfl260IPsecInPackets"), ("DFL260-MIB", "dfl260IPsecOutPackets"), ("DFL260-MIB", "dfl260IPsecForwardedPackets"), ("DFL260-MIB", "dfl260IPsecActiveTransforms"), ("DFL260-MIB", "dfl260IPsecTotalTransforms"), ("DFL260-MIB", "dfl260IPsecOutOfTransforms"), ("DFL260-MIB", "dfl260IPsecTotalRekeys"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IPsecObjectGroup = dfl260IPsecObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecObjectGroup.setDescription('IPsec Group')
dfl260StateCountersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 3)).setObjects(("DFL260-MIB", "dfl260SysPscTcpSyn"), ("DFL260-MIB", "dfl260SysPscTcpOpen"), ("DFL260-MIB", "dfl260SysPscTcpFin"), ("DFL260-MIB", "dfl260SysPscUdp"), ("DFL260-MIB", "dfl260SysPscIcmp"), ("DFL260-MIB", "dfl260SysPscOther"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260StateCountersGroup = dfl260StateCountersGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260StateCountersGroup.setDescription('Per state counters')
dfl260IPPoolGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 4)).setObjects(("DFL260-MIB", "dfl260IPPoolsNumber"), ("DFL260-MIB", "dfl260IPPoolName"), ("DFL260-MIB", "dfl260IPPoolPrepare"), ("DFL260-MIB", "dfl260IPPoolFree"), ("DFL260-MIB", "dfl260IPPoolMisses"), ("DFL260-MIB", "dfl260IPPoolClientFails"), ("DFL260-MIB", "dfl260IPPoolUsed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IPPoolGroup = dfl260IPPoolGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolGroup.setDescription('IP pool entry objects group')
dfl260DHCPServerGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 5)).setObjects(("DFL260-MIB", "dfl260DHCPTotalRejected"), ("DFL260-MIB", "dfl260DHCPRuleName"), ("DFL260-MIB", "dfl260DHCPRuleUsage"), ("DFL260-MIB", "dfl260DHCPRuleUsagePercent"), ("DFL260-MIB", "dfl260DHCPActiveClients"), ("DFL260-MIB", "dfl260DHCPActiveClientsPercent"), ("DFL260-MIB", "dfl260DHCPRejectedRequests"), ("DFL260-MIB", "dfl260DHCPTotalLeases"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260DHCPServerGroup = dfl260DHCPServerGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPServerGroup.setDescription('DHCP server rules objects.')
dfl260RuleUseGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 6)).setObjects(("DFL260-MIB", "dfl260RuleName"), ("DFL260-MIB", "dfl260RuleUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260RuleUseGroup = dfl260RuleUseGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUseGroup.setDescription('Rule use objects.')
dfl260UserAuthGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 7)).setObjects(("DFL260-MIB", "dfl260UserAuthHTTPUsers"), ("DFL260-MIB", "dfl260UserAuthXAUTHUsers"), ("DFL260-MIB", "dfl260UserAuthHTTPSUsers"), ("DFL260-MIB", "dfl260UserAuthPPPUsers"), ("DFL260-MIB", "dfl260UserAuthEAPUsers"), ("DFL260-MIB", "dfl260UserAuthRuleName"), ("DFL260-MIB", "dfl260UserAuthRuleUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260UserAuthGroup = dfl260UserAuthGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthGroup.setDescription('User auth objects.')
dfl260IfStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 8)).setObjects(("DFL260-MIB", "dfl260IfName"), ("DFL260-MIB", "dfl260IfFragsIn"), ("DFL260-MIB", "dfl260IfFragReassOk"), ("DFL260-MIB", "dfl260IfFragReassFail"), ("DFL260-MIB", "dfl260IfPktsInCnt"), ("DFL260-MIB", "dfl260IfPktsOutCnt"), ("DFL260-MIB", "dfl260IfBitsInCnt"), ("DFL260-MIB", "dfl260IfBitsOutCnt"), ("DFL260-MIB", "dfl260IfPktsTotCnt"), ("DFL260-MIB", "dfl260IfBitsTotCnt"), ("DFL260-MIB", "dfl260IfHCPktsInCnt"), ("DFL260-MIB", "dfl260IfHCPktsOutCnt"), ("DFL260-MIB", "dfl260IfHCBitsInCnt"), ("DFL260-MIB", "dfl260IfHCBitsOutCnt"), ("DFL260-MIB", "dfl260IfHCPktsTotCnt"), ("DFL260-MIB", "dfl260IfHCBitsTotCnt"), ("DFL260-MIB", "dfl260IfRxRingFifoErrors"), ("DFL260-MIB", "dfl260IfRxDespools"), ("DFL260-MIB", "dfl260IfRxAvgUse"), ("DFL260-MIB", "dfl260IfRxRingSaturation"), ("DFL260-MIB", "dfl260RxRingFlooded"), ("DFL260-MIB", "dfl260IfTxDespools"), ("DFL260-MIB", "dfl260IfTxAvgUse"), ("DFL260-MIB", "dfl260IfTxRingSaturation"), ("DFL260-MIB", "dfl260RxTingFlooded"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IfStatsGroup = dfl260IfStatsGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsGroup.setDescription('DFL-260 interface statistics group.')
dfl260LinkMonitorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 9)).setObjects(("DFL260-MIB", "dfl260LinkMonGrp"), ("DFL260-MIB", "dfl260LinkMonGrpName"), ("DFL260-MIB", "dfl260LinkMonGrpHostsUp"), ("DFL260-MIB", "dfl260LinkMonHostId"), ("DFL260-MIB", "dfl260LinkMonHostShortTermLoss"), ("DFL260-MIB", "dfl260LinkMonHostPacketsLost"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260LinkMonitorGroup = dfl260LinkMonitorGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonitorGroup.setDescription('DFL-260 link monitor statistics group')
dfl260PipesObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 10)).setObjects(("DFL260-MIB", "dfl260PipeUsers"), ("DFL260-MIB", "dfl260PipeName"), ("DFL260-MIB", "dfl260PipeMinPrec"), ("DFL260-MIB", "dfl260PipeMaxPrec"), ("DFL260-MIB", "dfl260PipeDefPrec"), ("DFL260-MIB", "dfl260PipeNumPrec"), ("DFL260-MIB", "dfl260PipeNumUsers"), ("DFL260-MIB", "dfl260PipeCurrentBps"), ("DFL260-MIB", "dfl260PipeCurrentPps"), ("DFL260-MIB", "dfl260PipeDelayedPackets"), ("DFL260-MIB", "dfl260PipeDropedPackets"), ("DFL260-MIB", "dfl260PipePrec"), ("DFL260-MIB", "dfl260PipePrecBps"), ("DFL260-MIB", "dfl260PipePrecTotalPps"), ("DFL260-MIB", "dfl260PipePrecReservedBps"), ("DFL260-MIB", "dfl260PipePrecDynLimBps"), ("DFL260-MIB", "dfl260PipePrecDynUsrLimBps"), ("DFL260-MIB", "dfl260PipePrecDelayedPackets"), ("DFL260-MIB", "dfl260PipePrecDropedPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260PipesObjectGroup = dfl260PipesObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260PipesObjectGroup.setDescription('DFL-260 pipes statistics group')
dfl260DHCPRelayObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 12)).setObjects(("DFL260-MIB", "dfl260DHCPRelayCurClients"), ("DFL260-MIB", "dfl260DHCPRelayCurTrans"), ("DFL260-MIB", "dfl260DHCPRelayRejected"), ("DFL260-MIB", "dfl260DHCPRelayRuleName"), ("DFL260-MIB", "dfl260DHCPRelayRuleHits"), ("DFL260-MIB", "dfl260DHCPRelayRuleCurClients"), ("DFL260-MIB", "dfl260DHCPRelayRuleRejCliPkts"), ("DFL260-MIB", "dfl260DHCPRelayRuleRejSrvPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260DHCPRelayObjectGroup = dfl260DHCPRelayObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayObjectGroup.setDescription('DFL-260 DHCP relay statistics group')
dfl260AlgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 13)).setObjects(("DFL260-MIB", "dfl260AlgSessions"), ("DFL260-MIB", "dfl260AlgConnections"), ("DFL260-MIB", "dfl260AlgTCPStreams"), ("DFL260-MIB", "dfl260HttpAlgName"), ("DFL260-MIB", "dfl260HttpAlgTotalRequested"), ("DFL260-MIB", "dfl260HttpAlgTotalAllowed"), ("DFL260-MIB", "dfl260HttpAlgTotalBlocked"), ("DFL260-MIB", "dfl260HttpAlgCntFltName"), ("DFL260-MIB", "dfl260HttpAlgCntFltRequests"), ("DFL260-MIB", "dfl260HttpAlgCntFltAllowed"), ("DFL260-MIB", "dfl260HttpAlgCntFltBlocked"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260AlgGroup = dfl260AlgGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgGroup.setDescription('DFL-260 HTTP ALG statistics group')
dfl260HAGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 14)).setObjects(("DFL260-MIB", "dfl260HASyncSendQueueLength"), ("DFL260-MIB", "dfl260HASyncSendQueueUsagePkt"), ("DFL260-MIB", "dfl260HASyncSendQueueUsageOct"), ("DFL260-MIB", "dfl260HASyncSentPackets"), ("DFL260-MIB", "dfl260HASyncSendResentPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260HAGroup = dfl260HAGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260HAGroup.setDescription('DFL-260 HA statistics group')
dfl260IfVlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 15)).setObjects(("DFL260-MIB", "dfl260IfVlanUntaggedInPkts"), ("DFL260-MIB", "dfl260IfVlanUntaggedOutPkts"), ("DFL260-MIB", "dfl260IfVlanUntaggedTotPkts"), ("DFL260-MIB", "dfl260IfVlanUntaggedInOctets"), ("DFL260-MIB", "dfl260IfVlanUntaggedOutOctets"), ("DFL260-MIB", "dfl260IfVlanUntaggedTotOctets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IfVlanGroup = dfl260IfVlanGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanGroup.setDescription('DFL-260 VLAN statistics group')
dfl260SmtpAlgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 16)).setObjects(("DFL260-MIB", "dfl260SmtpAlgName"), ("DFL260-MIB", "dfl260SmtpAlgTotCheckedSes"), ("DFL260-MIB", "dfl260SmtpAlgTotSpamSes"), ("DFL260-MIB", "dfl260SmtpAlgTotDroppedSes"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlName"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlChecked"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlMatched"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlFailChecks"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260SmtpAlgGroup = dfl260SmtpAlgGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgGroup.setDescription('Clavister SMTP ALG objects group')
dfl260SysTCPGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 17)).setObjects(("DFL260-MIB", "dfl260SysTCPRecvSmall"), ("DFL260-MIB", "dfl260SysTCPRecvLarge"), ("DFL260-MIB", "dfl260SysTCPSendSmall"), ("DFL260-MIB", "dfl260SysTCPSendLarge"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260SysTCPGroup = dfl260SysTCPGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPGroup.setDescription('DFL-260 TCP buffer usage group')
dfl260StatsCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 2, 1, 1)).setObjects(("DFL260-MIB", "dfl260SystemObjectGroup"), ("DFL260-MIB", "dfl260IPsecObjectGroup"), ("DFL260-MIB", "dfl260StateCountersGroup"), ("DFL260-MIB", "dfl260IPPoolGroup"), ("DFL260-MIB", "dfl260DHCPServerGroup"), ("DFL260-MIB", "dfl260RuleUseGroup"), ("DFL260-MIB", "dfl260UserAuthGroup"), ("DFL260-MIB", "dfl260IfStatsGroup"), ("DFL260-MIB", "dfl260LinkMonitorGroup"), ("DFL260-MIB", "dfl260PipesObjectGroup"), ("DFL260-MIB", "dfl260DHCPRelayObjectGroup"), ("DFL260-MIB", "dfl260AlgGroup"), ("DFL260-MIB", "dfl260HAGroup"), ("DFL260-MIB", "dfl260IfVlanGroup"), ("DFL260-MIB", "dfl260SmtpAlgGroup"), ("DFL260-MIB", "dfl260SysTCPGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260StatsCompliance = dfl260StatsCompliance.setStatus('current')
if mibBuilder.loadTexts: dfl260StatsCompliance.setDescription('Module Compliance')
mibBuilder.exportSymbols("DFL260-MIB", dfl260PipeName=dfl260PipeName, dfl260StatsConformance=dfl260StatsConformance, dfl260PipePrecDropedPackets=dfl260PipePrecDropedPackets, dfl260IfFragReassFail=dfl260IfFragReassFail, dfl260DHCPRelayObjectGroup=dfl260DHCPRelayObjectGroup, dfl260DHCPRelayRuleIndex=dfl260DHCPRelayRuleIndex, dfl260IfBitsOutCnt=dfl260IfBitsOutCnt, dfl260IPsecForwardedPackets=dfl260IPsecForwardedPackets, dfl260UserAuthEAPUsers=dfl260UserAuthEAPUsers, dfl260SmtpAlgDnsBlEntry=dfl260SmtpAlgDnsBlEntry, dfl260SmtpAlgDnsBlName=dfl260SmtpAlgDnsBlName, dfl260RuleUseEntry=dfl260RuleUseEntry, dfl260LinkMonGrpIndex=dfl260LinkMonGrpIndex, dfl260IPPoolTable=dfl260IPPoolTable, dfl260IPsecOutPackets=dfl260IPsecOutPackets, dfl260IPsecQuickModeDone=dfl260IPsecQuickModeDone, dfl260PipeNumPrec=dfl260PipeNumPrec, dfl260DHCPRelayRuleName=dfl260DHCPRelayRuleName, dfl260SysConns=dfl260SysConns, dfl260PipeEntry=dfl260PipeEntry, dfl260RxTingFlooded=dfl260RxTingFlooded, dfl260SmtpAlgTotDroppedSes=dfl260SmtpAlgTotDroppedSes, dfl260SmtpAlgName=dfl260SmtpAlgName, dfl260DHCPRelayRuleHits=dfl260DHCPRelayRuleHits, dfl260SysForwardedPackets=dfl260SysForwardedPackets, dfl260SysTimerUsage=dfl260SysTimerUsage, dfl260HWSensorTable=dfl260HWSensorTable, dfl260IPsecPhaseOneFailed=dfl260IPsecPhaseOneFailed, utmFirewall=utmFirewall, dfl260DHCPRuleUsage=dfl260DHCPRuleUsage, dfl260LinkMonHostEntry=dfl260LinkMonHostEntry, dfl260IfTxRingTable=dfl260IfTxRingTable, dfl260Rules=dfl260Rules, dfl260PipeCurrentPps=dfl260PipeCurrentPps, dfl260IfVlanUntaggedTotPkts=dfl260IfVlanUntaggedTotPkts, dfl260UserAuthXAUTHUsers=dfl260UserAuthXAUTHUsers, dfl260IfRxDespools=dfl260IfRxDespools, dfl260SmtpAlgDnsBlMatched=dfl260SmtpAlgDnsBlMatched, dfl260RuleUseTable=dfl260RuleUseTable, dfl260PipePrecIndex=dfl260PipePrecIndex, dfl260StatsRegGroups=dfl260StatsRegGroups, dfl260IPsecForwardedOctetsUcomp=dfl260IPsecForwardedOctetsUcomp, dfl260StateCountersGroup=dfl260StateCountersGroup, dfl260IfVlanIndex=dfl260IfVlanIndex, dfl260IfStatsTable=dfl260IfStatsTable, dfl260IPsecInfoDone=dfl260IPsecInfoDone, dfl260HttpAlgEntry=dfl260HttpAlgEntry, dfl260SysHCForwardedBits=dfl260SysHCForwardedBits, dfl260IfVlanUntaggedOutOctets=dfl260IfVlanUntaggedOutOctets, dfl260IPsecPhaseOneRekeyed=dfl260IPsecPhaseOneRekeyed, dfl260SysPscIcmp=dfl260SysPscIcmp, dfl260DHCPRuleIndex=dfl260DHCPRuleIndex, dfl260HASyncSentPackets=dfl260HASyncSentPackets, dfl260UserAuthHTTPSUsers=dfl260UserAuthHTTPSUsers, dfl260PipeDefPrec=dfl260PipeDefPrec, dfl260RuleName=dfl260RuleName, dfl260DHCPRelayRuleTable=dfl260DHCPRelayRuleTable, dfl260IfBitsTotCnt=dfl260IfBitsTotCnt, dfl260PipePrecReservedBps=dfl260PipePrecReservedBps, dfl260DHCPServerGroup=dfl260DHCPServerGroup, dfl260HttpAlgCntFltEntry=dfl260HttpAlgCntFltEntry, dfl260HASyncSendQueueUsagePkt=dfl260HASyncSendQueueUsagePkt, dfl260PipePrecEntry=dfl260PipePrecEntry, dfl260HttpAlgTotalBlocked=dfl260HttpAlgTotalBlocked, dfl260SmtpAlgTotCheckedSes=dfl260SmtpAlgTotCheckedSes, dfl260LinkMonGrp=dfl260LinkMonGrp, dfl260DHCPRuleTable=dfl260DHCPRuleTable, dfl260PipeIndex=dfl260PipeIndex, dfl260IfHCBitsInCnt=dfl260IfHCBitsInCnt, dfl260LinkMonGrpEntry=dfl260LinkMonGrpEntry, dfl260RuleUseGroup=dfl260RuleUseGroup, dfl260DHCPTotalRejected=dfl260DHCPTotalRejected, dfl260IfTxRingSaturation=dfl260IfTxRingSaturation, dfl260DHCPRuleUsagePercent=dfl260DHCPRuleUsagePercent, dfl260IfStatsGroup=dfl260IfStatsGroup, dfl260IPsecTotalRekeys=dfl260IPsecTotalRekeys, dfl260DHCPRejectedRequests=dfl260DHCPRejectedRequests, dfl260LinkMonHostId=dfl260LinkMonHostId, dfl260PipeNumUsers=dfl260PipeNumUsers, dfl260IPsecPhaseOneActive=dfl260IPsecPhaseOneActive, dfl260SmtpAlgTotSpamSes=dfl260SmtpAlgTotSpamSes, dfl260UserAuthRuleIndex=dfl260UserAuthRuleIndex, dfl260IPsecInOctetsUncomp=dfl260IPsecInOctetsUncomp, dfl260SysCpuLoad=dfl260SysCpuLoad, dfl260SysTCPUsage=dfl260SysTCPUsage, dfl260SysTCPSendSmall=dfl260SysTCPSendSmall, dfl260IPsecTotalTransforms=dfl260IPsecTotalTransforms, dfl260HASyncSendResentPackets=dfl260HASyncSendResentPackets, netdefendMgmt=netdefendMgmt, dfl260DHCPActiveClients=dfl260DHCPActiveClients, dfl260IPsecActiveTransforms=dfl260IPsecActiveTransforms, dfl260SmtpAlg=dfl260SmtpAlg, dfl260IfFragReassOk=dfl260IfFragReassOk, dfl260UserAuthGroup=dfl260UserAuthGroup, dfl260PipeMinPrec=dfl260PipeMinPrec, dfl260SysPscTcpFin=dfl260SysPscTcpFin, dfl260IfTxRingEntry=dfl260IfTxRingEntry, dfl260UserAuthRuleUseTable=dfl260UserAuthRuleUseTable, dfl260IPsecInOctetsComp=dfl260IPsecInOctetsComp, dfl260PipePrecDelayedPackets=dfl260PipePrecDelayedPackets, dfl260DHCPRelayCurClients=dfl260DHCPRelayCurClients, dfl260HttpAlgTotalRequested=dfl260HttpAlgTotalRequested, dfl260reg=dfl260reg, dfl260HASyncSendQueueUsageOct=dfl260HASyncSendQueueUsageOct, dfl260AlgConnections=dfl260AlgConnections, dfl260DHCPRelayRuleCurClients=dfl260DHCPRelayRuleCurClients, dfl260UserAuthPPPUsers=dfl260UserAuthPPPUsers, dfl260IPPoolIndex=dfl260IPPoolIndex, dfl260DHCPTotalLeases=dfl260DHCPTotalLeases, dfl260LinkMonGrpName=dfl260LinkMonGrpName, dfl260IPsecQuickModeFailed=dfl260IPsecQuickModeFailed, dfl260PipeMaxPrec=dfl260PipeMaxPrec, dfl260IfVlanStatsEntry=dfl260IfVlanStatsEntry, dfl260HttpAlgTotalAllowed=dfl260HttpAlgTotalAllowed, dlink=dlink, dfl260IPPoolEntry=dfl260IPPoolEntry, dfl260HttpAlgTable=dfl260HttpAlgTable, dfl260IPPoolMisses=dfl260IPPoolMisses, dfl260OS=dfl260OS, dfl260SysMemUsage=dfl260SysMemUsage, dfl260IPsecOutOfTransforms=dfl260IPsecOutOfTransforms, dfl260DHCPRelay=dfl260DHCPRelay, dfl260RuleUse=dfl260RuleUse, dfl260DHCPRelayRejected=dfl260DHCPRelayRejected, dfl260IfVlanStatsTable=dfl260IfVlanStatsTable, dfl260UserAuthRuleName=dfl260UserAuthRuleName, dfl260PipeDropedPackets=dfl260PipeDropedPackets, dfl260UserAuthRuleUse=dfl260UserAuthRuleUse, dfl260IfVlanUntaggedOutPkts=dfl260IfVlanUntaggedOutPkts, dfl260SmtpAlgDnsBlChecked=dfl260SmtpAlgDnsBlChecked, dfl260IfRxRingIndex=dfl260IfRxRingIndex, dfl260PipesObjectGroup=dfl260PipesObjectGroup, dfl260IfRxRingTable=dfl260IfRxRingTable, dfl260LinkMonGrpHostsUp=dfl260LinkMonGrpHostsUp, dfl260SysPscTcpOpen=dfl260SysPscTcpOpen, dfl260LinkMonHostPacketsLost=dfl260LinkMonHostPacketsLost, dfl260SysPscTcpSyn=dfl260SysPscTcpSyn, dfl260HASyncSendQueueLength=dfl260HASyncSendQueueLength, dfl260HttpAlgCntFltAllowed=dfl260HttpAlgCntFltAllowed, dfl260SysForwardedBits=dfl260SysForwardedBits, dfl260UserAuthHTTPUsers=dfl260UserAuthHTTPUsers, dfl260IfVlanGroup=dfl260IfVlanGroup, dfl260IPPools=dfl260IPPools, dfl260HttpAlgCntFltRequests=dfl260HttpAlgCntFltRequests, dfl260RxRingFlooded=dfl260RxRingFlooded, dfl260IfHCPktsInCnt=dfl260IfHCPktsInCnt, dfl260HttpAlg=dfl260HttpAlg, dfl260IPPoolUsed=dfl260IPPoolUsed, dfl260SysTCPGroup=dfl260SysTCPGroup, dfl260IPsecQuickModeActive=dfl260IPsecQuickModeActive, dfl260SmtpAlgIndex=dfl260SmtpAlgIndex, dfl260HAGroup=dfl260HAGroup, dfl260DHCPRelayCurTrans=dfl260DHCPRelayCurTrans, dfl260StatsCompliance=dfl260StatsCompliance, dfl260HWSensorName=dfl260HWSensorName, dfl260PipePrec=dfl260PipePrec, dfl260IPPoolClientFails=dfl260IPPoolClientFails, dfl260SystemObjectGroup=dfl260SystemObjectGroup, dfl260IPPoolName=dfl260IPPoolName, dfl260IPPoolFree=dfl260IPPoolFree, dfl260SysPerStateCounters=dfl260SysPerStateCounters, dfl260IfName=dfl260IfName, dfl260SysTCPRecvLarge=dfl260SysTCPRecvLarge, dfl260ALG=dfl260ALG, dfl260IfHCBitsTotCnt=dfl260IfHCBitsTotCnt, dfl260LinkMonHostTable=dfl260LinkMonHostTable, dfl260HttpAlgCntFltName=dfl260HttpAlgCntFltName, dfl260IfRxRingSaturation=dfl260IfRxRingSaturation, dfl260HWSensorIndex=dfl260HWSensorIndex, dfl260SmtpAlgTable=dfl260SmtpAlgTable, dfl260IfHCPktsTotCnt=dfl260IfHCPktsTotCnt, dfl260HWSensorEntry=dfl260HWSensorEntry, dfl260IPsecGlobal=dfl260IPsecGlobal, dfl260PipePrecBps=dfl260PipePrecBps, dfl260HA=dfl260HA, dfl260Pipes=dfl260Pipes, dfl260IfStatsEntry=dfl260IfStatsEntry, dfl260IfVlanUntaggedTotOctets=dfl260IfVlanUntaggedTotOctets, dfl260IfPktsTotCnt=dfl260IfPktsTotCnt, dfl260DHCPRuleName=dfl260DHCPRuleName, dfl260DHCPRelayRuleEntry=dfl260DHCPRelayRuleEntry, dfl260IPPoolPrepare=dfl260IPPoolPrepare, dfl260SysTCPRecvSmall=dfl260SysTCPRecvSmall, dfl260UserAuthRuleUseEntry=dfl260UserAuthRuleUseEntry, dfl260SmtpAlgDnsBlIndex=dfl260SmtpAlgDnsBlIndex, dfl260IfPktsInCnt=dfl260IfPktsInCnt, dfl260IfFragsIn=dfl260IfFragsIn, dfl260IPPoolsNumber=dfl260IPPoolsNumber, dfl260PipePrecDynLimBps=dfl260PipePrecDynLimBps, dfl260IfStatsIndex=dfl260IfStatsIndex, dfl260IfRxAvgUse=dfl260IfRxAvgUse, dfl260PipeDelayedPackets=dfl260PipeDelayedPackets, dfl260SmtpAlgEntry=dfl260SmtpAlgEntry, dfl260SysConnCPS=dfl260SysConnCPS, dfl260IfTxDespools=dfl260IfTxDespools, dfl260IfRxRingFifoErrors=dfl260IfRxRingFifoErrors, dfl260SmtpAlgDnsBlFailChecks=dfl260SmtpAlgDnsBlFailChecks, dfl260SysPscUdp=dfl260SysPscUdp, dfl260DHCPActiveClientsPercent=dfl260DHCPActiveClientsPercent, dfl260IfVlanUntaggedInPkts=dfl260IfVlanUntaggedInPkts, dfl260PipePrecDynUsrLimBps=dfl260PipePrecDynUsrLimBps, dfl260IfHCPktsOutCnt=dfl260IfHCPktsOutCnt, dfl260IPsecPhaseOneAggrModeDone=dfl260IPsecPhaseOneAggrModeDone, dfl260IfHCBitsOutCnt=dfl260IfHCBitsOutCnt, dfl260IPsecOutOctetsUncomp=dfl260IPsecOutOctetsUncomp, dfl260SysPscOther=dfl260SysPscOther, dfl260HttpAlgCntFltIndex=dfl260HttpAlgCntFltIndex, dfl260IfVlanUntaggedInOctets=dfl260IfVlanUntaggedInOctets, dfl260PipeUsers=dfl260PipeUsers, dfl260IfTxRingIndex=dfl260IfTxRingIndex, dfl260IfTxAvgUse=dfl260IfTxAvgUse, dfl260HttpAlgIndex=dfl260HttpAlgIndex, dfl260AlgSessions=dfl260AlgSessions, dfl260AlgTCPStreams=dfl260AlgTCPStreams, dfl260IPsecOutOctetsComp=dfl260IPsecOutOctetsComp, dfl260SmtpAlgGroup=dfl260SmtpAlgGroup, dfl260HWSensorUnit=dfl260HWSensorUnit, dfl260IPsecInPackets=dfl260IPsecInPackets, dfl260LinkMonitor=dfl260LinkMonitor, dfl260UserAuth=dfl260UserAuth, dfl260_MIB=dfl260_MIB, dfl260OSStats=dfl260OSStats, dfl260MibObjectGroups=dfl260MibObjectGroups, dfl260HWSensorValue=dfl260HWSensorValue, dfl260IPsec=dfl260IPsec, dfl260DHCPRelayRuleRejSrvPkts=dfl260DHCPRelayRuleRejSrvPkts, dfl260SysConnOPS=dfl260SysConnOPS, dfl260LinkMonHostIndex=dfl260LinkMonHostIndex, dfl260IPsecPhaseOneDone=dfl260IPsecPhaseOneDone, dfl260HttpAlgName=dfl260HttpAlgName, dfl260SysBuffUse=dfl260SysBuffUse, dfl260PipeTable=dfl260PipeTable, PYSNMP_MODULE_ID=dfl260_MIB, dfl260AlgGroup=dfl260AlgGroup, dfl260VPN=dfl260VPN, dfl260PipePrecTable=dfl260PipePrecTable, dfl260IPsecObjectGroup=dfl260IPsecObjectGroup, dfl260System=dfl260System, dfl260HttpAlgCntFltTable=dfl260HttpAlgCntFltTable, dfl260PipeCurrentBps=dfl260PipeCurrentBps, dfl260IfPktsOutCnt=dfl260IfPktsOutCnt, dfl260PipePrecTotalPps=dfl260PipePrecTotalPps, dfl260HttpAlgCntFltBlocked=dfl260HttpAlgCntFltBlocked, dfl260=dfl260, dfl260DHCPServer=dfl260DHCPServer, dfl260IfRxRingEntry=dfl260IfRxRingEntry, dfl260IfBitsInCnt=dfl260IfBitsInCnt, dfl260LinkMonGrpTable=dfl260LinkMonGrpTable, dfl260RuleIndex=dfl260RuleIndex, dfl260MibConfs=dfl260MibConfs, dfl260DHCPRuleEntry=dfl260DHCPRuleEntry, dfl260IPsecForwardedOctetsComp=dfl260IPsecForwardedOctetsComp, dfl260IPPoolGroup=dfl260IPPoolGroup, dfl260IPsecInfoFailed=dfl260IPsecInfoFailed)
mibBuilder.exportSymbols("DFL260-MIB", dfl260DHCPRelayRuleRejCliPkts=dfl260DHCPRelayRuleRejCliPkts, dfl260LinkMonHostShortTermLoss=dfl260LinkMonHostShortTermLoss, dfl260LinkMonitorGroup=dfl260LinkMonitorGroup, dfl260SmtpAlgDnsBlTable=dfl260SmtpAlgDnsBlTable, dfl260SysTCPSendLarge=dfl260SysTCPSendLarge, dfl260MibModules=dfl260MibModules)
| [
"[email protected]"
] | |
d19988ad33589d48cc57918d518294a2fd6150d7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/exp-big-490.py | 878f7cc3ed471a6dd801ce1f464e12d880342a22 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = $INT
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1 | [
"[email protected]"
] | |
ac25687848306c6a9cff59e9ab2267b666d426d9 | 71f00ed87cd980bb2f92c08b085c5abe40a317fb | /BestOreo/W2V_CNN_robustchecking.py | e2d480f65e769e2d5e81dee662d63187818b0ca9 | [] | no_license | factoryofthesun/Rao-NLP | 2bd8269a8eed1cb352c14c8fde88e3111ccca088 | 87f9723f5ee51bd21310d58c3425a2a7271ec3c5 | refs/heads/master | 2023-04-18T08:54:08.370155 | 2020-06-09T23:24:07 | 2020-06-09T23:24:07 | 248,070,291 | 0 | 1 | null | 2021-04-30T21:13:04 | 2020-03-17T20:49:03 | Python | UTF-8 | Python | false | false | 9,146 | py | # -*- coding: utf-8 -*-
"""Shuaiqi_train.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11DfSUkrGQfPsEtfHoJxewiBcqBM8OlfZ
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
import tensorflow as tf
import numpy as np
import pandas as pd
import W2V_Helpers.data_helpers as data_helpers
from W2V_Helpers.w2v_kfold import train_word2vec
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Input, MaxPooling1D, Convolution1D, Embedding
from keras.layers.merge import Concatenate
from keras.backend import clear_session
from keras.preprocessing import sequence
from sklearn.model_selection import KFold
import time
# ---------------------- Parameters section -------------------
#
# Model type. See Kim Yoon's Convolutional Neural Networks for Sentence Classification, Section 3
model_type = "CNN-non-static" # CNN-rand|CNN-non-static|CNN-static
t0 = time.time()
# Data source
from pathlib import Path
data_path = str(Path(__file__).parent / "../Data")
train_data_path = data_path + "/mturk_train.csv"
train_data_x_col = "inputtext"
train_data_y_cols = ["rating1", "rating2", "rating3", "rating4", "rating5"]
output_dir = "output"
#models_dir = "models"
# Model Hyperparameters
embedding_dim = 50
filter_sizes = (3, 8)
num_filters = 8
dropout_prob = (0.7, 0.9)
hidden_dims = 70
# Training parameters
batch_size = 64
num_epochs = 50
# Prepossessing parameters
sequence_length = 400
max_words = 5000
# Word2Vec parameters (see train_word2vec)
min_word_count = 1
context = 10
# ration of training dataset
train_percent = 0.9
#
# ---------------------- Parameters end -----------------------
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
def load_train_data(kfold = False):
x, y, vocabulary, vocabulary_inv_list = data_helpers.load_train_data(train_path=train_data_path,
train_x_col=train_data_x_col,
train_y_cols=train_data_y_cols,
save_path="W2V_Helpers"
)
vocabulary_inv = {key: value for key, value in enumerate(vocabulary_inv_list)}
y = y.argmax(axis=1)
if not kfold:
# Shuffle data
shuffle_indices = np.random.permutation(np.arange(len(y)))
x = x[shuffle_indices]
y = y[shuffle_indices]
train_len = int(len(x) * train_percent)
x_train = x[:train_len]
y_train = y[:train_len]
x_val = x[train_len:]
y_val = y[train_len:]
return x_train, y_train, x_val, y_val, vocabulary_inv
else:
#Create 10 folds for 10% training/validation
train_ind_list = []
test_ind_list = []
kf = KFold(n_splits = 10)
for train_ind, test_ind in kf.split(x):
train_ind_list.append(train_ind)
test_ind_list.append(test_ind)
return x, y, train_ind_list, test_ind_list, vocabulary_inv
def loadModel(x_train, x_val, vocabulary_inv):
# Prepare embedding layer weights and convert inputs for static model
print("Model type is", model_type)
if model_type in ["CNN-non-static", "CNN-static"]:
embedding_weights = train_word2vec(np.vstack((x_train, x_val)), vocabulary_inv, num_features=embedding_dim,
min_word_count=min_word_count, context=context)
if model_type == "CNN-static":
x_train = np.stack([np.stack([embedding_weights[word] for word in sentence]) for sentence in x_train])
x_val = np.stack([np.stack([embedding_weights[word] for word in sentence]) for sentence in x_val])
print("x_train static shape:", x_train.shape)
print("x_val static shape:", x_val.shape)
elif model_type == "CNN-rand":
embedding_weights = None
else:
raise ValueError("Unknown model type")
# Build model
if model_type == "CNN-static":
input_shape = (sequence_length, embedding_dim)
else:
input_shape = (sequence_length,)
model_input = Input(shape=input_shape)
# Static model does not have embedding layer
if model_type == "CNN-static":
z = model_input
else:
z = Embedding(len(vocabulary_inv), embedding_dim, input_length=sequence_length, name="embedding")(model_input)
z = Dropout(dropout_prob[0])(z)
# Convolutional block
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(z)
conv = MaxPooling1D(pool_size=2)(conv)
conv = Flatten()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dropout(dropout_prob[1])(z)
z = Dense(hidden_dims, activation="relu")(z)
model_output = Dense(1, activation="sigmoid")(z)
model = Model(model_input, model_output)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
# Initialize weights with word2vec
if model_type == "CNN-non-static":
weights = np.array([v for v in embedding_weights.values()])
print("Initializing embedding layer with word2vec weights, shape", weights.shape)
embedding_layer = model.get_layer("embedding")
embedding_layer.set_weights([weights])
return model
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
path = str(Path(__file__).parent / "../Plots")
def plotHistory(history, i):
epoch_count = range(1, len(history.history['loss']) + 1)
plt.plot(epoch_count, history.history['loss'], 'r--')
plt.plot(epoch_count, history.history['val_loss'], 'b-')
plt.legend(['Training Loss', 'Validation Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig(path + "/rep_w2v_cnn_loss_{}.png".format(i))
plt.clf()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.savefig(path + "/rep_w2v_cnn_accuracy_{}.png".format(i))
plt.clf()
# Data Preparation
print("Load data...")
x_train, y_train, x_val, y_val, vocabulary_inv = load_train_data()
#x, y, train_ind_list, test_ind_list, vocabulary_inv = load_train_data(True)
loss_list = []
accuracy_list = []
for i in range(5):
if sequence_length != x_val.shape[1]:
print("Adjusting sequence length for actual size")
sequence_length = x_val.shape[1]
print("x_train shape:", x_train.shape)
print("x_val shape:", x_val.shape)
print("Vocabulary Size: {:d}".format(len(vocabulary_inv)))
model = loadModel(x_train, x_val, vocabulary_inv)
# Train the model
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(min_delta = 0.01, mode = 'max', monitor='val_acc', patience = 2)
callback = [early_stopping]
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epochs,
validation_data=(x_val, y_val), verbose=1)
plotHistory(history, i)
score = model.evaluate(x_val, y_val, batch_size=64, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
loss_list.append(score[0])
accuracy_list.append(score[1])
clear_session()
print("Average validation loss: {}".format(sum(loss_list)/len(loss_list)))
print("Average validation accuracy: {}".format(sum(accuracy_list)/len(accuracy_list)))
print("Total script time: {}".format(format_time(time.time() - t0)))
# Create count of the number of epochs
# Visualize learning curve. Here learning curve is not ideal. It should be much smoother as it decreases.
#As mentioned before, altering different hyper parameters especially learning rate can have a positive impact
#on accuracy and learning curve.
#
# **If validation loss >> training loss you can call it overfitting.**
#
# If validation loss > training loss you can call it some overfitting.
#
# If validation loss < training loss you can call it some underfitting.
#
# If validation loss << training loss you can call it underfitting.
#
# Just right if training loss ~ validation loss
#
# -----------------------------------------
#
# ### Steps for reducing overfitting:
#
# 1. Add more data
# 2. Use data augmentation
# 3. Use architectures that generalize well
# 4. Add regularization (mostly dropout, L1/L2 regularization are also possible)
# 5. Reduce architecture complexity.
#
# print test accuracy
'''score = model.evaluate(x_val, y_val, batch_size=32, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])'''
#data_helpers.save_model(model, models_dir)
| [
"[email protected]"
] | |
e6a936ccc3de105e36ffef350ea2096d974dc9f0 | 760e1c14d056dd75958d367242c2a50e829ac4f0 | /剑指offer/6_旋转数组最小的数字.py | 795e06ad064cd143007a5bdc31ea65296446baea | [] | no_license | lawtech0902/py_imooc_algorithm | 8e85265b716f376ff1c53d0afd550470679224fb | 74550d68cd3fd2cfcc92e1bf6579ac3b8f31aa75 | refs/heads/master | 2021-04-26T22:54:42.176596 | 2018-09-23T15:45:22 | 2018-09-23T15:45:22 | 123,894,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # _*_ coding: utf-8 _*_
"""
把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。 输入一个非递减排序的数组的一个旋转,输出旋转数组的最小元素。 例如数组{3,4,5,1,2}为{1,2,3,4,5}的一个旋转,该数组的最小值为1。 NOTE:给出的所有元素都大于0,若数组大小为0,请返回0。
__author__ = 'lawtech'
__date__ = '2018/5/9 下午9:35'
"""
class Solution:
def minNumberInRotateArray(self, rotateArray):
# write code here
size = len(rotateArray)
if size == 0:
return 0
low, high = 0, size - 1
while rotateArray[low] >= rotateArray[high]:
if high - low == 1:
return rotateArray[high]
mid = low + (high - low) // 2
if rotateArray[mid] >= rotateArray[low]:
low = mid
else:
high = mid
return rotateArray[low]
| [
"[email protected]"
] | |
a82beeb3c4f4b2b11632854d1f7251427ba6389b | 9d5c9d9373002ab4ed1b493136517e8b4ab160e5 | /saas/backend/apps/application/migrations/0009_auto_20200902_1134.py | 1040ea6a30228a6fcf4b7f4921d2955e0bf91fbf | [
"MIT"
] | permissive | robert871126/bk-iam-saas | f8299bb632fc853ef0131d445f84c6084fc84aba | 33c8f4ffe8697081abcfc5771b98a88c0578059f | refs/heads/master | 2023-08-23T19:23:01.987394 | 2021-10-22T09:45:28 | 2021-10-22T09:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 2.2.14 on 2020-09-02 03:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0008_auto_20200803_1157'),
]
operations = [
migrations.AlterField(
model_name='approvalnode',
name='approver',
field=models.TextField(help_text='多个以英文逗号分隔', verbose_name='审批人'),
),
]
| [
"[email protected]"
] | |
a315cc93eb48378a0694c0131b8d1a8bd460e157 | 432b9b1ba469ef94ffd93065d4fde5d8c89f1a6e | /DM3/src/data.py | 22298997aad2e1ff16e001bfe849a1acca56c01a | [] | no_license | NelleV/SVM | 0ea9931e2152d6200ef094325a9f1838eed99943 | a46cfecb7f5d4361a93d36bdf85c2cc76c72838b | refs/heads/master | 2020-06-05T07:31:34.034416 | 2012-03-06T19:36:40 | 2012-03-06T19:36:40 | 3,238,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | import csv
import numpy as np
def libras_movement():
"""
Fetches the Libras Movement dataset
Returns
-------
X, Y
"""
dataset = csv.reader(open('data/movement_libras.data', 'r'))
X = []
Y = []
for element in dataset:
X.append(element[:-1])
Y.append(element[-1])
return np.array(X).astype('float'), np.array(Y).astype('float')
| [
"[email protected]"
] | |
915d93ebfb350f0981dab2804861e7fe19306cc7 | b0ede55e98d454f558e5397369f9265893deedb5 | /SWEA/D3/4698_special_prime.py | 7e90335df7e3a59a8beb755177ef82b1905f53a7 | [] | no_license | YeonggilGo/python_practice | 5ff65852900c4c6769d541af16f74a27a67920ec | 43082568b5045a8efc1d596074bdca3e66b2fed1 | refs/heads/master | 2023-06-22T02:09:31.906745 | 2023-06-17T01:27:22 | 2023-06-17T01:27:22 | 280,361,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | primes = []
numbers = [True] * 1000001
numbers[0], numbers[1] = False, False
for i in range(2, 1000001):
if numbers[i]:
primes.append(i)
for j in range(i, 1000001, i):
numbers[j] = False
T = int(input())
for tc in range(1, T + 1):
D, A, B = map(int, input().split())
ans = 0
for prime in primes:
if prime < A:
continue
elif prime > B:
break
if str(D) in str(prime):
ans += 1
print(f'#{tc} {ans}')
| [
"[email protected]"
] | |
ba59ebba2e068546face3a92c586930dc6c334c9 | a45c87da1d573891a6009546b58320e6e9e0a54e | /html_compiler/compiler.py | ca07b433b7198def73b4a5f7ebd278ef26c0fcb4 | [
"MIT"
] | permissive | hsuanhauliu/html-compiler | f805254a5b58c3b21a95882d98784f55d63547fb | 17f2659b95153690b517f58964f9002426c08c03 | refs/heads/master | 2020-09-11T12:00:12.677145 | 2019-12-14T06:10:05 | 2019-12-14T06:10:05 | 222,057,278 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | """
Compiler module.
"""
import os
from bs4 import BeautifulSoup
def compile(path):
""" Recursive function for merging components """
soup = ""
next_dir, filename = _separate_dir_and_file(path)
with cd(next_dir):
with open(filename, "r") as rfile:
soup = BeautifulSoup(rfile, 'html.parser')
component_tags = soup.findAll("div", {"class": "m_component"})
for tag in component_tags:
tag_id = tag.get("id")
component_file = tag_id + ".html"
component = compile(component_file)
soup.find(id=tag_id).replaceWith(component)
return soup
def _separate_dir_and_file(path):
""" Helper function for separating file directory and the file """
temp = path.rfind("/")
if temp == -1:
return ".", path
return path[:temp], path[temp + 1:]
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
| [
"[email protected]"
] | |
b8fbc89b00c608ef7d1a47c1ca35b6688318a5ea | 2776f806297ae2f05d6c6bcbf2205ed8eb3b9db8 | /ico/tests/contracts/test_require_customer_id.py | 3dfa23e0d176551f9efd672c9a4c5a07982db7b1 | [
"Apache-2.0"
] | permissive | ZOLTbyZENUM/ico | 138207db242053ded62ecc9a4f7d273209232a3f | 26e4ae717e5f04a3f41f32f5f52f7dddedaac65d | refs/heads/master | 2022-12-12T16:01:40.922647 | 2018-02-28T12:47:33 | 2018-02-28T12:47:33 | 123,442,497 | 0 | 0 | NOASSERTION | 2022-12-08T00:42:32 | 2018-03-01T14:02:07 | Python | UTF-8 | Python | false | false | 2,433 | py | """Customer id tracking."""
import uuid
import pytest
from ethereum.tester import TransactionFailed
from eth_utils import to_wei
from ico.tests.utils import time_travel
from ico.state import CrowdsaleState
from sha3 import keccak_256
from rlp.utils import decode_hex
@pytest.fixture
def crowdsale(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig):
"""Set up a crowdsale with customer id require policy."""
uncapped_flatprice.transact({"from": team_multisig}).setRequireCustomerId(True)
return uncapped_flatprice
@pytest.fixture
def token(uncapped_token):
"""Token contract we are buying."""
return uncapped_token
@pytest.fixture
def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:
"""Generate UUID v4 customer id as a hex string."""
customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4
return customer_id
def test_only_owner_change_change_policy(crowdsale, customer):
"""Only owner change change customerId required policy."""
with pytest.raises(TransactionFailed):
crowdsale.transact({"from": customer}).setRequireCustomerId(False)
def test_participate_with_customer_id(chain, crowdsale, customer, customer_id, token):
"""Buy tokens with a proper customer id."""
time_travel(chain, crowdsale.call().startsAt() + 1)
wei_value = to_wei(1, "ether")
assert crowdsale.call().getState() == CrowdsaleState.Funding
checksumbyte = keccak_256(decode_hex(format(customer_id, 'x').zfill(32))).digest()[:1]
crowdsale.transact({"from": customer, "value": wei_value}).buyWithCustomerIdWithChecksum(customer_id, checksumbyte)
# We got credited
assert token.call().balanceOf(customer) > 0
# We have tracked the investor id
events = crowdsale.pastEvents("Invested").get()
assert len(events) == 1
e = events[0]
assert e["args"]["investor"] == customer
assert e["args"]["weiAmount"] == wei_value
assert e["args"]["customerId"] == customer_id
def test_participate_missing_customer_id(chain, crowdsale, customer, customer_id, token):
"""Cannot bypass customer id process."""
time_travel(chain, crowdsale.call().startsAt() + 1)
wei_value = to_wei(1, "ether")
assert crowdsale.call().getState() == CrowdsaleState.Funding
with pytest.raises(TransactionFailed):
crowdsale.transact({"from": customer, "value": wei_value}).buy()
| [
"[email protected]"
] | |
fc9ba580ad9a11c6f67bcea854c79af053e832b4 | 2a5145f811c0679b35af367d25fce5914c2e0e40 | /Algorithm/169_MajorityElement.py | 641252ef81bfbf55f340168da7a95c33bb00a40e | [] | no_license | lingtianwan/Leetcode | 8f93fc3fc85db289ca8f618143af2a43711425ba | bf2edab87dd96afab1ff411df35d3163c1dfdc55 | refs/heads/master | 2021-01-12T19:16:31.918703 | 2017-02-09T01:50:53 | 2017-02-09T01:50:53 | 81,396,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always exist in the array.
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
for i in set(nums):
if nums.count(i) > len(nums) / 2:
return i
return 0
| [
"[email protected]"
] | |
d935698ed1490a86579c7639a9248a6761ca3fde | 0125bbe0ce453e94604ff5834fbc280fe44f3220 | /transquest/algo/sentence_level/siamesetransquest/readers/__init__.py | ae4c6526b6069a91c156cfd8a0f55c7f847bb325 | [
"Apache-2.0"
] | permissive | mfomicheva/TransQuest | fc51bcb90e386534845841fd75a3860054e76dd7 | 4225f7195a703414ed13ce597854cc1a59703229 | refs/heads/master | 2023-06-12T14:52:49.066705 | 2021-05-07T10:35:21 | 2021-05-07T10:35:21 | 263,876,762 | 6 | 1 | Apache-2.0 | 2020-05-14T09:52:07 | 2020-05-14T09:52:06 | null | UTF-8 | Python | false | false | 231 | py | # from .input_example import InputExample
# from .label_sentence_reader import LabelSentenceReader
# from .nli_data_reader import NLIDataReader
# from .qe_data_reader import QEDataReader
# from .triplet_reader import TripletReader
| [
"[email protected]"
] | |
ce69a986c534d70a5aa60a0025175768ba380815 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/clouds_20200703155257.py | 611b20c61a4e392632a8177a3ecf3c4d6ae86dde | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | def jumpingClouds(c):
i = 0
jumps = 0
while i < len(c)-2:
if c[i+2] == 0:
print('c------>',i,c[i])
print('here1')
jumps +=1
elif c[i+1] == 0:
print('here')
jumps +=1
i +=1
print(jumps)
jumpingClouds([0,0,1,0,0,1,0]) | [
"[email protected]"
] | |
e41960c52ed9f1d8f4899297c7aa4df4e18f5413 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/og-.py | e881559c8f292114595ae1314a66e46d1d5952e6 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'oG-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
28ded34c244dfde21440e7b8a4c967128d3118be | b39d72ba5de9d4683041e6b4413f8483c817f821 | /GeneVisualization/ass1/Lib/site-packages/itk/itkImageDuplicatorPython.py | feba22d089e1bed3a6240415b04b2f8985228d76 | [] | no_license | ssalmaan/DataVisualization | d93a0afe1290e4ea46c3be5718d503c71a6f99a7 | eff072f11337f124681ce08742e1a092033680cc | refs/heads/master | 2021-03-13T05:40:23.679095 | 2020-03-11T21:37:45 | 2020-03-11T21:37:45 | 246,642,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149,049 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkImageDuplicatorPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkImageDuplicatorPython', [dirname(__file__)])
except ImportError:
import _itkImageDuplicatorPython
return _itkImageDuplicatorPython
if fp is not None:
try:
_mod = imp.load_module('_itkImageDuplicatorPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkImageDuplicatorPython = swig_import_helper()
del swig_import_helper
else:
import _itkImageDuplicatorPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkImagePython
import itkSizePython
import pyBasePython
import itkRGBAPixelPython
import itkFixedArrayPython
import itkMatrixPython
import itkCovariantVectorPython
import vnl_vector_refPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import itkVectorPython
import vnl_matrix_fixedPython
import itkPointPython
import itkRGBPixelPython
import ITKCommonBasePython
import itkSymmetricSecondRankTensorPython
import itkOffsetPython
import itkIndexPython
import itkImageRegionPython
def itkImageDuplicatorIUL3_New():
return itkImageDuplicatorIUL3.New()
def itkImageDuplicatorIUL2_New():
return itkImageDuplicatorIUL2.New()
def itkImageDuplicatorISSRTD33_New():
return itkImageDuplicatorISSRTD33.New()
def itkImageDuplicatorID3_New():
return itkImageDuplicatorID3.New()
def itkImageDuplicatorISSRTD22_New():
return itkImageDuplicatorISSRTD22.New()
def itkImageDuplicatorID2_New():
return itkImageDuplicatorID2.New()
def itkImageDuplicatorICVF43_New():
return itkImageDuplicatorICVF43.New()
def itkImageDuplicatorIVF43_New():
return itkImageDuplicatorIVF43.New()
def itkImageDuplicatorICVF33_New():
return itkImageDuplicatorICVF33.New()
def itkImageDuplicatorIVF33_New():
return itkImageDuplicatorIVF33.New()
def itkImageDuplicatorICVF23_New():
return itkImageDuplicatorICVF23.New()
def itkImageDuplicatorIVF23_New():
return itkImageDuplicatorIVF23.New()
def itkImageDuplicatorIF3_New():
return itkImageDuplicatorIF3.New()
def itkImageDuplicatorICVF42_New():
return itkImageDuplicatorICVF42.New()
def itkImageDuplicatorIVF42_New():
return itkImageDuplicatorIVF42.New()
def itkImageDuplicatorICVF32_New():
return itkImageDuplicatorICVF32.New()
def itkImageDuplicatorIVF32_New():
return itkImageDuplicatorIVF32.New()
def itkImageDuplicatorICVF22_New():
return itkImageDuplicatorICVF22.New()
def itkImageDuplicatorIVF22_New():
return itkImageDuplicatorIVF22.New()
def itkImageDuplicatorIF2_New():
return itkImageDuplicatorIF2.New()
def itkImageDuplicatorIUS3_New():
return itkImageDuplicatorIUS3.New()
def itkImageDuplicatorIUS2_New():
return itkImageDuplicatorIUS2.New()
def itkImageDuplicatorIRGBAUC3_New():
return itkImageDuplicatorIRGBAUC3.New()
def itkImageDuplicatorIRGBUC3_New():
return itkImageDuplicatorIRGBUC3.New()
def itkImageDuplicatorIUC3_New():
return itkImageDuplicatorIUC3.New()
def itkImageDuplicatorIRGBAUC2_New():
return itkImageDuplicatorIRGBAUC2.New()
def itkImageDuplicatorIRGBUC2_New():
return itkImageDuplicatorIRGBUC2.New()
def itkImageDuplicatorIUC2_New():
return itkImageDuplicatorIUC2.New()
def itkImageDuplicatorISS3_New():
return itkImageDuplicatorISS3.New()
def itkImageDuplicatorISS2_New():
return itkImageDuplicatorISS2.New()
class itkImageDuplicatorICVF22(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF22 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF22_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF22_Pointer":
"""Clone(itkImageDuplicatorICVF22 self) -> itkImageDuplicatorICVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF22') -> "void":
"""SetInputImage(itkImageDuplicatorICVF22 self, itkImageCVF22 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF22 *":
"""
GetOutput(itkImageDuplicatorICVF22 self) -> itkImageCVF22
GetOutput(itkImageDuplicatorICVF22 self) -> itkImageCVF22
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF22 *":
"""GetModifiableOutput(itkImageDuplicatorICVF22 self) -> itkImageCVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF22 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF22
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF22 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF22
Create a new object of the class itkImageDuplicatorICVF22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF22.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_Clone, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_SetInputImage, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetOutput, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetModifiableOutput, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_Update, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF22_swigregister
itkImageDuplicatorICVF22_swigregister(itkImageDuplicatorICVF22)
def itkImageDuplicatorICVF22___New_orig__() -> "itkImageDuplicatorICVF22_Pointer":
"""itkImageDuplicatorICVF22___New_orig__() -> itkImageDuplicatorICVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22___New_orig__()
def itkImageDuplicatorICVF22_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF22 *":
"""itkImageDuplicatorICVF22_cast(itkLightObject obj) -> itkImageDuplicatorICVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_cast(obj)
class itkImageDuplicatorICVF23(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF23 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF23_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF23_Pointer":
"""Clone(itkImageDuplicatorICVF23 self) -> itkImageDuplicatorICVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF23') -> "void":
"""SetInputImage(itkImageDuplicatorICVF23 self, itkImageCVF23 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF23 *":
"""
GetOutput(itkImageDuplicatorICVF23 self) -> itkImageCVF23
GetOutput(itkImageDuplicatorICVF23 self) -> itkImageCVF23
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF23 *":
"""GetModifiableOutput(itkImageDuplicatorICVF23 self) -> itkImageCVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF23 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF23
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF23 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF23
Create a new object of the class itkImageDuplicatorICVF23 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF23.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF23.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF23.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF23.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_Clone, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_SetInputImage, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetOutput, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetModifiableOutput, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_Update, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF23_swigregister
itkImageDuplicatorICVF23_swigregister(itkImageDuplicatorICVF23)
def itkImageDuplicatorICVF23___New_orig__() -> "itkImageDuplicatorICVF23_Pointer":
"""itkImageDuplicatorICVF23___New_orig__() -> itkImageDuplicatorICVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23___New_orig__()
def itkImageDuplicatorICVF23_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF23 *":
"""itkImageDuplicatorICVF23_cast(itkLightObject obj) -> itkImageDuplicatorICVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_cast(obj)
class itkImageDuplicatorICVF32(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF32 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF32_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF32_Pointer":
"""Clone(itkImageDuplicatorICVF32 self) -> itkImageDuplicatorICVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF32') -> "void":
"""SetInputImage(itkImageDuplicatorICVF32 self, itkImageCVF32 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF32 *":
"""
GetOutput(itkImageDuplicatorICVF32 self) -> itkImageCVF32
GetOutput(itkImageDuplicatorICVF32 self) -> itkImageCVF32
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF32 *":
"""GetModifiableOutput(itkImageDuplicatorICVF32 self) -> itkImageCVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF32 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF32
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF32 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF32
Create a new object of the class itkImageDuplicatorICVF32 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF32.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF32.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF32.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF32.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_Clone, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_SetInputImage, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetOutput, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetModifiableOutput, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_Update, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF32_swigregister
itkImageDuplicatorICVF32_swigregister(itkImageDuplicatorICVF32)
def itkImageDuplicatorICVF32___New_orig__() -> "itkImageDuplicatorICVF32_Pointer":
"""itkImageDuplicatorICVF32___New_orig__() -> itkImageDuplicatorICVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32___New_orig__()
def itkImageDuplicatorICVF32_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF32 *":
"""itkImageDuplicatorICVF32_cast(itkLightObject obj) -> itkImageDuplicatorICVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_cast(obj)
class itkImageDuplicatorICVF33(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF33 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF33_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF33_Pointer":
"""Clone(itkImageDuplicatorICVF33 self) -> itkImageDuplicatorICVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF33') -> "void":
"""SetInputImage(itkImageDuplicatorICVF33 self, itkImageCVF33 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF33 *":
"""
GetOutput(itkImageDuplicatorICVF33 self) -> itkImageCVF33
GetOutput(itkImageDuplicatorICVF33 self) -> itkImageCVF33
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF33 *":
"""GetModifiableOutput(itkImageDuplicatorICVF33 self) -> itkImageCVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF33 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF33
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF33 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF33
Create a new object of the class itkImageDuplicatorICVF33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF33.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_Clone, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_SetInputImage, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetOutput, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetModifiableOutput, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_Update, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF33_swigregister
itkImageDuplicatorICVF33_swigregister(itkImageDuplicatorICVF33)
def itkImageDuplicatorICVF33___New_orig__() -> "itkImageDuplicatorICVF33_Pointer":
"""itkImageDuplicatorICVF33___New_orig__() -> itkImageDuplicatorICVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33___New_orig__()
def itkImageDuplicatorICVF33_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF33 *":
"""itkImageDuplicatorICVF33_cast(itkLightObject obj) -> itkImageDuplicatorICVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_cast(obj)
class itkImageDuplicatorICVF42(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF42 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF42_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF42_Pointer":
"""Clone(itkImageDuplicatorICVF42 self) -> itkImageDuplicatorICVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF42') -> "void":
"""SetInputImage(itkImageDuplicatorICVF42 self, itkImageCVF42 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF42 *":
"""
GetOutput(itkImageDuplicatorICVF42 self) -> itkImageCVF42
GetOutput(itkImageDuplicatorICVF42 self) -> itkImageCVF42
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF42 *":
"""GetModifiableOutput(itkImageDuplicatorICVF42 self) -> itkImageCVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF42 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF42
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF42 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF42
Create a new object of the class itkImageDuplicatorICVF42 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF42.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF42.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF42.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF42.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_Clone, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_SetInputImage, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetOutput, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetModifiableOutput, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_Update, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF42_swigregister
itkImageDuplicatorICVF42_swigregister(itkImageDuplicatorICVF42)
def itkImageDuplicatorICVF42___New_orig__() -> "itkImageDuplicatorICVF42_Pointer":
"""itkImageDuplicatorICVF42___New_orig__() -> itkImageDuplicatorICVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42___New_orig__()
def itkImageDuplicatorICVF42_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF42 *":
"""itkImageDuplicatorICVF42_cast(itkLightObject obj) -> itkImageDuplicatorICVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_cast(obj)
class itkImageDuplicatorICVF43(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF43 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF43_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF43_Pointer":
"""Clone(itkImageDuplicatorICVF43 self) -> itkImageDuplicatorICVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF43') -> "void":
"""SetInputImage(itkImageDuplicatorICVF43 self, itkImageCVF43 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF43 *":
"""
GetOutput(itkImageDuplicatorICVF43 self) -> itkImageCVF43
GetOutput(itkImageDuplicatorICVF43 self) -> itkImageCVF43
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF43 *":
"""GetModifiableOutput(itkImageDuplicatorICVF43 self) -> itkImageCVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF43 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF43
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF43 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF43
Create a new object of the class itkImageDuplicatorICVF43 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF43.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF43.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF43.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF43.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_Clone, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_SetInputImage, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetOutput, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetModifiableOutput, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_Update, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF43_swigregister
itkImageDuplicatorICVF43_swigregister(itkImageDuplicatorICVF43)
def itkImageDuplicatorICVF43___New_orig__() -> "itkImageDuplicatorICVF43_Pointer":
"""itkImageDuplicatorICVF43___New_orig__() -> itkImageDuplicatorICVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43___New_orig__()
def itkImageDuplicatorICVF43_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF43 *":
"""itkImageDuplicatorICVF43_cast(itkLightObject obj) -> itkImageDuplicatorICVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_cast(obj)
class itkImageDuplicatorID2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorID2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorID2_Pointer":
"""__New_orig__() -> itkImageDuplicatorID2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorID2_Pointer":
"""Clone(itkImageDuplicatorID2 self) -> itkImageDuplicatorID2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_Clone(self)
def SetInputImage(self, _arg: 'itkImageD2') -> "void":
"""SetInputImage(itkImageDuplicatorID2 self, itkImageD2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageD2 *":
"""
GetOutput(itkImageDuplicatorID2 self) -> itkImageD2
GetOutput(itkImageDuplicatorID2 self) -> itkImageD2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageD2 *":
"""GetModifiableOutput(itkImageDuplicatorID2 self) -> itkImageD2"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorID2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorID2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorID2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorID2"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorID2
Create a new object of the class itkImageDuplicatorID2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorID2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorID2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorID2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorID2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_Clone, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_SetInputImage, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_GetOutput, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_GetModifiableOutput, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_Update, None, itkImageDuplicatorID2)
itkImageDuplicatorID2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorID2_swigregister
itkImageDuplicatorID2_swigregister(itkImageDuplicatorID2)
def itkImageDuplicatorID2___New_orig__() -> "itkImageDuplicatorID2_Pointer":
"""itkImageDuplicatorID2___New_orig__() -> itkImageDuplicatorID2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2___New_orig__()
def itkImageDuplicatorID2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorID2 *":
"""itkImageDuplicatorID2_cast(itkLightObject obj) -> itkImageDuplicatorID2"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_cast(obj)
class itkImageDuplicatorID3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorID3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorID3_Pointer":
"""__New_orig__() -> itkImageDuplicatorID3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorID3_Pointer":
"""Clone(itkImageDuplicatorID3 self) -> itkImageDuplicatorID3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_Clone(self)
def SetInputImage(self, _arg: 'itkImageD3') -> "void":
"""SetInputImage(itkImageDuplicatorID3 self, itkImageD3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageD3 *":
"""
GetOutput(itkImageDuplicatorID3 self) -> itkImageD3
GetOutput(itkImageDuplicatorID3 self) -> itkImageD3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageD3 *":
"""GetModifiableOutput(itkImageDuplicatorID3 self) -> itkImageD3"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorID3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorID3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorID3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorID3"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorID3
Create a new object of the class itkImageDuplicatorID3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorID3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorID3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorID3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorID3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_Clone, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_SetInputImage, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_GetOutput, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_GetModifiableOutput, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_Update, None, itkImageDuplicatorID3)
itkImageDuplicatorID3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorID3_swigregister
itkImageDuplicatorID3_swigregister(itkImageDuplicatorID3)
def itkImageDuplicatorID3___New_orig__() -> "itkImageDuplicatorID3_Pointer":
"""itkImageDuplicatorID3___New_orig__() -> itkImageDuplicatorID3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3___New_orig__()
def itkImageDuplicatorID3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorID3 *":
"""itkImageDuplicatorID3_cast(itkLightObject obj) -> itkImageDuplicatorID3"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_cast(obj)
class itkImageDuplicatorIF2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIF2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIF2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIF2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIF2_Pointer":
"""Clone(itkImageDuplicatorIF2 self) -> itkImageDuplicatorIF2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_Clone(self)
def SetInputImage(self, _arg: 'itkImageF2') -> "void":
"""SetInputImage(itkImageDuplicatorIF2 self, itkImageF2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageF2 *":
"""
GetOutput(itkImageDuplicatorIF2 self) -> itkImageF2
GetOutput(itkImageDuplicatorIF2 self) -> itkImageF2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageF2 *":
"""GetModifiableOutput(itkImageDuplicatorIF2 self) -> itkImageF2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIF2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIF2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIF2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIF2
Create a new object of the class itkImageDuplicatorIF2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIF2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIF2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIF2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIF2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_Clone, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_SetInputImage, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_GetOutput, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_GetModifiableOutput, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_Update, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIF2_swigregister
itkImageDuplicatorIF2_swigregister(itkImageDuplicatorIF2)
def itkImageDuplicatorIF2___New_orig__() -> "itkImageDuplicatorIF2_Pointer":
"""itkImageDuplicatorIF2___New_orig__() -> itkImageDuplicatorIF2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2___New_orig__()
def itkImageDuplicatorIF2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF2 *":
"""itkImageDuplicatorIF2_cast(itkLightObject obj) -> itkImageDuplicatorIF2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_cast(obj)
class itkImageDuplicatorIF3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIF3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIF3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIF3_Pointer":
"""Clone(itkImageDuplicatorIF3 self) -> itkImageDuplicatorIF3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_Clone(self)
def SetInputImage(self, _arg: 'itkImageF3') -> "void":
"""SetInputImage(itkImageDuplicatorIF3 self, itkImageF3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageF3 *":
"""
GetOutput(itkImageDuplicatorIF3 self) -> itkImageF3
GetOutput(itkImageDuplicatorIF3 self) -> itkImageF3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageF3 *":
"""GetModifiableOutput(itkImageDuplicatorIF3 self) -> itkImageF3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIF3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIF3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIF3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIF3
Create a new object of the class itkImageDuplicatorIF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIF3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_Clone, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_SetInputImage, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_GetOutput, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_GetModifiableOutput, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_Update, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIF3_swigregister
itkImageDuplicatorIF3_swigregister(itkImageDuplicatorIF3)
def itkImageDuplicatorIF3___New_orig__() -> "itkImageDuplicatorIF3_Pointer":
"""itkImageDuplicatorIF3___New_orig__() -> itkImageDuplicatorIF3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3___New_orig__()
def itkImageDuplicatorIF3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF3 *":
"""itkImageDuplicatorIF3_cast(itkLightObject obj) -> itkImageDuplicatorIF3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_cast(obj)
class itkImageDuplicatorIRGBAUC2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBAUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBAUC2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBAUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBAUC2_Pointer":
"""Clone(itkImageDuplicatorIRGBAUC2 self) -> itkImageDuplicatorIRGBAUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBAUC2') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBAUC2 self, itkImageRGBAUC2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBAUC2 *":
"""
GetOutput(itkImageDuplicatorIRGBAUC2 self) -> itkImageRGBAUC2
GetOutput(itkImageDuplicatorIRGBAUC2 self) -> itkImageRGBAUC2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBAUC2 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBAUC2 self) -> itkImageRGBAUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBAUC2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBAUC2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBAUC2
Create a new object of the class itkImageDuplicatorIRGBAUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBAUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBAUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBAUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBAUC2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Clone, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_SetInputImage, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetOutput, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetModifiableOutput, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Update, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_swigregister
itkImageDuplicatorIRGBAUC2_swigregister(itkImageDuplicatorIRGBAUC2)
def itkImageDuplicatorIRGBAUC2___New_orig__() -> "itkImageDuplicatorIRGBAUC2_Pointer":
"""itkImageDuplicatorIRGBAUC2___New_orig__() -> itkImageDuplicatorIRGBAUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2___New_orig__()
def itkImageDuplicatorIRGBAUC2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC2 *":
"""itkImageDuplicatorIRGBAUC2_cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_cast(obj)
class itkImageDuplicatorIRGBAUC3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBAUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBAUC3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBAUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBAUC3_Pointer":
"""Clone(itkImageDuplicatorIRGBAUC3 self) -> itkImageDuplicatorIRGBAUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBAUC3') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBAUC3 self, itkImageRGBAUC3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBAUC3 *":
"""
GetOutput(itkImageDuplicatorIRGBAUC3 self) -> itkImageRGBAUC3
GetOutput(itkImageDuplicatorIRGBAUC3 self) -> itkImageRGBAUC3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBAUC3 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBAUC3 self) -> itkImageRGBAUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBAUC3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBAUC3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBAUC3
Create a new object of the class itkImageDuplicatorIRGBAUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBAUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBAUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBAUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBAUC3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Clone, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_SetInputImage, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetOutput, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetModifiableOutput, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Update, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_swigregister
itkImageDuplicatorIRGBAUC3_swigregister(itkImageDuplicatorIRGBAUC3)
def itkImageDuplicatorIRGBAUC3___New_orig__() -> "itkImageDuplicatorIRGBAUC3_Pointer":
"""itkImageDuplicatorIRGBAUC3___New_orig__() -> itkImageDuplicatorIRGBAUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3___New_orig__()
def itkImageDuplicatorIRGBAUC3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC3 *":
"""itkImageDuplicatorIRGBAUC3_cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_cast(obj)
class itkImageDuplicatorIRGBUC2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBUC2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBUC2_Pointer":
"""Clone(itkImageDuplicatorIRGBUC2 self) -> itkImageDuplicatorIRGBUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBUC2') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBUC2 self, itkImageRGBUC2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBUC2 *":
"""
GetOutput(itkImageDuplicatorIRGBUC2 self) -> itkImageRGBUC2
GetOutput(itkImageDuplicatorIRGBUC2 self) -> itkImageRGBUC2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBUC2 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBUC2 self) -> itkImageRGBUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBUC2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBUC2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBUC2
Create a new object of the class itkImageDuplicatorIRGBUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBUC2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Clone, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_SetInputImage, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetOutput, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetModifiableOutput, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Update, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_swigregister
itkImageDuplicatorIRGBUC2_swigregister(itkImageDuplicatorIRGBUC2)
def itkImageDuplicatorIRGBUC2___New_orig__() -> "itkImageDuplicatorIRGBUC2_Pointer":
"""itkImageDuplicatorIRGBUC2___New_orig__() -> itkImageDuplicatorIRGBUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2___New_orig__()
def itkImageDuplicatorIRGBUC2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC2 *":
"""itkImageDuplicatorIRGBUC2_cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_cast(obj)
class itkImageDuplicatorIRGBUC3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBUC3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBUC3_Pointer":
"""Clone(itkImageDuplicatorIRGBUC3 self) -> itkImageDuplicatorIRGBUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBUC3') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBUC3 self, itkImageRGBUC3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBUC3 *":
"""
GetOutput(itkImageDuplicatorIRGBUC3 self) -> itkImageRGBUC3
GetOutput(itkImageDuplicatorIRGBUC3 self) -> itkImageRGBUC3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBUC3 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBUC3 self) -> itkImageRGBUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBUC3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBUC3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBUC3
Create a new object of the class itkImageDuplicatorIRGBUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBUC3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Clone, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_SetInputImage, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetOutput, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetModifiableOutput, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Update, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_swigregister
itkImageDuplicatorIRGBUC3_swigregister(itkImageDuplicatorIRGBUC3)
def itkImageDuplicatorIRGBUC3___New_orig__() -> "itkImageDuplicatorIRGBUC3_Pointer":
"""itkImageDuplicatorIRGBUC3___New_orig__() -> itkImageDuplicatorIRGBUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3___New_orig__()
def itkImageDuplicatorIRGBUC3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC3 *":
"""itkImageDuplicatorIRGBUC3_cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_cast(obj)
class itkImageDuplicatorISS2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISS2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISS2_Pointer":
"""__New_orig__() -> itkImageDuplicatorISS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISS2_Pointer":
"""Clone(itkImageDuplicatorISS2 self) -> itkImageDuplicatorISS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_Clone(self)
def SetInputImage(self, _arg: 'itkImageSS2') -> "void":
"""SetInputImage(itkImageDuplicatorISS2 self, itkImageSS2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSS2 *":
"""
GetOutput(itkImageDuplicatorISS2 self) -> itkImageSS2
GetOutput(itkImageDuplicatorISS2 self) -> itkImageSS2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSS2 *":
"""GetModifiableOutput(itkImageDuplicatorISS2 self) -> itkImageSS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISS2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISS2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISS2
Create a new object of the class itkImageDuplicatorISS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISS2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_Clone, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_SetInputImage, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_GetOutput, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_GetModifiableOutput, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_Update, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISS2_swigregister
itkImageDuplicatorISS2_swigregister(itkImageDuplicatorISS2)
def itkImageDuplicatorISS2___New_orig__() -> "itkImageDuplicatorISS2_Pointer":
"""itkImageDuplicatorISS2___New_orig__() -> itkImageDuplicatorISS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2___New_orig__()
def itkImageDuplicatorISS2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS2 *":
"""itkImageDuplicatorISS2_cast(itkLightObject obj) -> itkImageDuplicatorISS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_cast(obj)
class itkImageDuplicatorISS3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISS3_Pointer":
"""__New_orig__() -> itkImageDuplicatorISS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISS3_Pointer":
"""Clone(itkImageDuplicatorISS3 self) -> itkImageDuplicatorISS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_Clone(self)
def SetInputImage(self, _arg: 'itkImageSS3') -> "void":
"""SetInputImage(itkImageDuplicatorISS3 self, itkImageSS3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSS3 *":
"""
GetOutput(itkImageDuplicatorISS3 self) -> itkImageSS3
GetOutput(itkImageDuplicatorISS3 self) -> itkImageSS3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSS3 *":
"""GetModifiableOutput(itkImageDuplicatorISS3 self) -> itkImageSS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISS3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISS3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISS3
Create a new object of the class itkImageDuplicatorISS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISS3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_Clone, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_SetInputImage, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_GetOutput, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_GetModifiableOutput, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_Update, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISS3_swigregister
itkImageDuplicatorISS3_swigregister(itkImageDuplicatorISS3)
def itkImageDuplicatorISS3___New_orig__() -> "itkImageDuplicatorISS3_Pointer":
"""itkImageDuplicatorISS3___New_orig__() -> itkImageDuplicatorISS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3___New_orig__()
def itkImageDuplicatorISS3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS3 *":
"""itkImageDuplicatorISS3_cast(itkLightObject obj) -> itkImageDuplicatorISS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_cast(obj)
class itkImageDuplicatorISSRTD22(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISSRTD22 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISSRTD22_Pointer":
"""__New_orig__() -> itkImageDuplicatorISSRTD22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISSRTD22_Pointer":
"""Clone(itkImageDuplicatorISSRTD22 self) -> itkImageDuplicatorISSRTD22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Clone(self)
def SetInputImage(self, _arg: 'itkImageSSRTD22') -> "void":
"""SetInputImage(itkImageDuplicatorISSRTD22 self, itkImageSSRTD22 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSSRTD22 *":
"""
GetOutput(itkImageDuplicatorISSRTD22 self) -> itkImageSSRTD22
GetOutput(itkImageDuplicatorISSRTD22 self) -> itkImageSSRTD22
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSSRTD22 *":
"""GetModifiableOutput(itkImageDuplicatorISSRTD22 self) -> itkImageSSRTD22"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISSRTD22 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISSRTD22
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD22 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISSRTD22"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISSRTD22
Create a new object of the class itkImageDuplicatorISSRTD22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISSRTD22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISSRTD22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISSRTD22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISSRTD22.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Clone, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_SetInputImage, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetOutput, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetModifiableOutput, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Update, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_swigregister
itkImageDuplicatorISSRTD22_swigregister(itkImageDuplicatorISSRTD22)
def itkImageDuplicatorISSRTD22___New_orig__() -> "itkImageDuplicatorISSRTD22_Pointer":
"""itkImageDuplicatorISSRTD22___New_orig__() -> itkImageDuplicatorISSRTD22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22___New_orig__()
def itkImageDuplicatorISSRTD22_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD22 *":
"""itkImageDuplicatorISSRTD22_cast(itkLightObject obj) -> itkImageDuplicatorISSRTD22"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_cast(obj)
class itkImageDuplicatorISSRTD33(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISSRTD33 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISSRTD33_Pointer":
"""__New_orig__() -> itkImageDuplicatorISSRTD33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISSRTD33_Pointer":
"""Clone(itkImageDuplicatorISSRTD33 self) -> itkImageDuplicatorISSRTD33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Clone(self)
def SetInputImage(self, _arg: 'itkImageSSRTD33') -> "void":
"""SetInputImage(itkImageDuplicatorISSRTD33 self, itkImageSSRTD33 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSSRTD33 *":
"""
GetOutput(itkImageDuplicatorISSRTD33 self) -> itkImageSSRTD33
GetOutput(itkImageDuplicatorISSRTD33 self) -> itkImageSSRTD33
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSSRTD33 *":
"""GetModifiableOutput(itkImageDuplicatorISSRTD33 self) -> itkImageSSRTD33"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISSRTD33 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISSRTD33
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD33 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISSRTD33"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISSRTD33
Create a new object of the class itkImageDuplicatorISSRTD33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISSRTD33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISSRTD33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISSRTD33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISSRTD33.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Clone, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_SetInputImage, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetOutput, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetModifiableOutput, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Update, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_swigregister
itkImageDuplicatorISSRTD33_swigregister(itkImageDuplicatorISSRTD33)
def itkImageDuplicatorISSRTD33___New_orig__() -> "itkImageDuplicatorISSRTD33_Pointer":
"""itkImageDuplicatorISSRTD33___New_orig__() -> itkImageDuplicatorISSRTD33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33___New_orig__()
def itkImageDuplicatorISSRTD33_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD33 *":
"""itkImageDuplicatorISSRTD33_cast(itkLightObject obj) -> itkImageDuplicatorISSRTD33"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_cast(obj)
class itkImageDuplicatorIUC2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUC2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUC2_Pointer":
"""Clone(itkImageDuplicatorIUC2 self) -> itkImageDuplicatorIUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_Clone(self)
def SetInputImage(self, _arg: 'itkImageUC2') -> "void":
"""SetInputImage(itkImageDuplicatorIUC2 self, itkImageUC2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUC2 *":
"""
GetOutput(itkImageDuplicatorIUC2 self) -> itkImageUC2
GetOutput(itkImageDuplicatorIUC2 self) -> itkImageUC2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUC2 *":
"""GetModifiableOutput(itkImageDuplicatorIUC2 self) -> itkImageUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUC2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUC2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUC2
Create a new object of the class itkImageDuplicatorIUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUC2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_Clone, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_SetInputImage, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetOutput, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetModifiableOutput, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_Update, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUC2_swigregister
itkImageDuplicatorIUC2_swigregister(itkImageDuplicatorIUC2)
def itkImageDuplicatorIUC2___New_orig__() -> "itkImageDuplicatorIUC2_Pointer":
"""itkImageDuplicatorIUC2___New_orig__() -> itkImageDuplicatorIUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2___New_orig__()
def itkImageDuplicatorIUC2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC2 *":
"""itkImageDuplicatorIUC2_cast(itkLightObject obj) -> itkImageDuplicatorIUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_cast(obj)
class itkImageDuplicatorIUC3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUC3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUC3_Pointer":
"""Clone(itkImageDuplicatorIUC3 self) -> itkImageDuplicatorIUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_Clone(self)
def SetInputImage(self, _arg: 'itkImageUC3') -> "void":
"""SetInputImage(itkImageDuplicatorIUC3 self, itkImageUC3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUC3 *":
"""
GetOutput(itkImageDuplicatorIUC3 self) -> itkImageUC3
GetOutput(itkImageDuplicatorIUC3 self) -> itkImageUC3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUC3 *":
"""GetModifiableOutput(itkImageDuplicatorIUC3 self) -> itkImageUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUC3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUC3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUC3
Create a new object of the class itkImageDuplicatorIUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUC3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_Clone, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_SetInputImage, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetOutput, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetModifiableOutput, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_Update, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUC3_swigregister
itkImageDuplicatorIUC3_swigregister(itkImageDuplicatorIUC3)
def itkImageDuplicatorIUC3___New_orig__() -> "itkImageDuplicatorIUC3_Pointer":
"""itkImageDuplicatorIUC3___New_orig__() -> itkImageDuplicatorIUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3___New_orig__()
def itkImageDuplicatorIUC3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC3 *":
"""itkImageDuplicatorIUC3_cast(itkLightObject obj) -> itkImageDuplicatorIUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_cast(obj)
class itkImageDuplicatorIUL2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUL2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUL2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUL2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUL2_Pointer":
"""Clone(itkImageDuplicatorIUL2 self) -> itkImageDuplicatorIUL2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_Clone(self)
def SetInputImage(self, _arg: 'itkImageUL2') -> "void":
"""SetInputImage(itkImageDuplicatorIUL2 self, itkImageUL2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUL2 *":
"""
GetOutput(itkImageDuplicatorIUL2 self) -> itkImageUL2
GetOutput(itkImageDuplicatorIUL2 self) -> itkImageUL2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUL2 *":
"""GetModifiableOutput(itkImageDuplicatorIUL2 self) -> itkImageUL2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUL2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUL2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUL2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUL2
Create a new object of the class itkImageDuplicatorIUL2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUL2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUL2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUL2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUL2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_Clone, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_SetInputImage, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetOutput, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetModifiableOutput, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_Update, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUL2_swigregister
itkImageDuplicatorIUL2_swigregister(itkImageDuplicatorIUL2)
def itkImageDuplicatorIUL2___New_orig__() -> "itkImageDuplicatorIUL2_Pointer":
"""itkImageDuplicatorIUL2___New_orig__() -> itkImageDuplicatorIUL2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2___New_orig__()
def itkImageDuplicatorIUL2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL2 *":
"""itkImageDuplicatorIUL2_cast(itkLightObject obj) -> itkImageDuplicatorIUL2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_cast(obj)
class itkImageDuplicatorIUL3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUL3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUL3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUL3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUL3_Pointer":
"""Clone(itkImageDuplicatorIUL3 self) -> itkImageDuplicatorIUL3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_Clone(self)
def SetInputImage(self, _arg: 'itkImageUL3') -> "void":
"""SetInputImage(itkImageDuplicatorIUL3 self, itkImageUL3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUL3 *":
"""
GetOutput(itkImageDuplicatorIUL3 self) -> itkImageUL3
GetOutput(itkImageDuplicatorIUL3 self) -> itkImageUL3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUL3 *":
"""GetModifiableOutput(itkImageDuplicatorIUL3 self) -> itkImageUL3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUL3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUL3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUL3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUL3
Create a new object of the class itkImageDuplicatorIUL3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUL3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUL3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUL3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUL3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_Clone, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_SetInputImage, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetOutput, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetModifiableOutput, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_Update, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUL3_swigregister
itkImageDuplicatorIUL3_swigregister(itkImageDuplicatorIUL3)
def itkImageDuplicatorIUL3___New_orig__() -> "itkImageDuplicatorIUL3_Pointer":
"""itkImageDuplicatorIUL3___New_orig__() -> itkImageDuplicatorIUL3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3___New_orig__()
def itkImageDuplicatorIUL3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL3 *":
"""itkImageDuplicatorIUL3_cast(itkLightObject obj) -> itkImageDuplicatorIUL3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_cast(obj)
class itkImageDuplicatorIUS2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUS2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUS2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUS2_Pointer":
"""Clone(itkImageDuplicatorIUS2 self) -> itkImageDuplicatorIUS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_Clone(self)
def SetInputImage(self, _arg: 'itkImageUS2') -> "void":
"""SetInputImage(itkImageDuplicatorIUS2 self, itkImageUS2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUS2 *":
"""
GetOutput(itkImageDuplicatorIUS2 self) -> itkImageUS2
GetOutput(itkImageDuplicatorIUS2 self) -> itkImageUS2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUS2 *":
"""GetModifiableOutput(itkImageDuplicatorIUS2 self) -> itkImageUS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUS2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUS2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUS2
Create a new object of the class itkImageDuplicatorIUS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUS2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_Clone, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_SetInputImage, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetOutput, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetModifiableOutput, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_Update, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUS2_swigregister
itkImageDuplicatorIUS2_swigregister(itkImageDuplicatorIUS2)
def itkImageDuplicatorIUS2___New_orig__() -> "itkImageDuplicatorIUS2_Pointer":
"""itkImageDuplicatorIUS2___New_orig__() -> itkImageDuplicatorIUS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2___New_orig__()
def itkImageDuplicatorIUS2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS2 *":
"""itkImageDuplicatorIUS2_cast(itkLightObject obj) -> itkImageDuplicatorIUS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_cast(obj)
class itkImageDuplicatorIUS3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUS3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUS3_Pointer":
"""Clone(itkImageDuplicatorIUS3 self) -> itkImageDuplicatorIUS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_Clone(self)
def SetInputImage(self, _arg: 'itkImageUS3') -> "void":
"""SetInputImage(itkImageDuplicatorIUS3 self, itkImageUS3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUS3 *":
"""
GetOutput(itkImageDuplicatorIUS3 self) -> itkImageUS3
GetOutput(itkImageDuplicatorIUS3 self) -> itkImageUS3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUS3 *":
"""GetModifiableOutput(itkImageDuplicatorIUS3 self) -> itkImageUS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUS3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUS3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUS3
Create a new object of the class itkImageDuplicatorIUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUS3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_Clone, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_SetInputImage, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetOutput, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetModifiableOutput, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_Update, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUS3_swigregister
itkImageDuplicatorIUS3_swigregister(itkImageDuplicatorIUS3)
def itkImageDuplicatorIUS3___New_orig__() -> "itkImageDuplicatorIUS3_Pointer":
"""itkImageDuplicatorIUS3___New_orig__() -> itkImageDuplicatorIUS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3___New_orig__()
def itkImageDuplicatorIUS3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS3 *":
"""itkImageDuplicatorIUS3_cast(itkLightObject obj) -> itkImageDuplicatorIUS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_cast(obj)
class itkImageDuplicatorIVF22(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF22 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF22_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF22_Pointer":
"""Clone(itkImageDuplicatorIVF22 self) -> itkImageDuplicatorIVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF22') -> "void":
"""SetInputImage(itkImageDuplicatorIVF22 self, itkImageVF22 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF22 *":
"""
GetOutput(itkImageDuplicatorIVF22 self) -> itkImageVF22
GetOutput(itkImageDuplicatorIVF22 self) -> itkImageVF22
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF22 *":
"""GetModifiableOutput(itkImageDuplicatorIVF22 self) -> itkImageVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF22 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF22
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF22 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF22
Create a new object of the class itkImageDuplicatorIVF22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF22.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_Clone, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_SetInputImage, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetOutput, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetModifiableOutput, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_Update, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF22_swigregister
itkImageDuplicatorIVF22_swigregister(itkImageDuplicatorIVF22)
def itkImageDuplicatorIVF22___New_orig__() -> "itkImageDuplicatorIVF22_Pointer":
"""itkImageDuplicatorIVF22___New_orig__() -> itkImageDuplicatorIVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22___New_orig__()
def itkImageDuplicatorIVF22_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF22 *":
"""itkImageDuplicatorIVF22_cast(itkLightObject obj) -> itkImageDuplicatorIVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_cast(obj)
class itkImageDuplicatorIVF23(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF23 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF23_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF23_Pointer":
"""Clone(itkImageDuplicatorIVF23 self) -> itkImageDuplicatorIVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF23') -> "void":
"""SetInputImage(itkImageDuplicatorIVF23 self, itkImageVF23 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF23 *":
"""
GetOutput(itkImageDuplicatorIVF23 self) -> itkImageVF23
GetOutput(itkImageDuplicatorIVF23 self) -> itkImageVF23
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF23 *":
"""GetModifiableOutput(itkImageDuplicatorIVF23 self) -> itkImageVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF23 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF23
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF23 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF23
Create a new object of the class itkImageDuplicatorIVF23 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF23.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF23.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF23.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF23.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_Clone, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_SetInputImage, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetOutput, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetModifiableOutput, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_Update, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF23_swigregister
itkImageDuplicatorIVF23_swigregister(itkImageDuplicatorIVF23)
def itkImageDuplicatorIVF23___New_orig__() -> "itkImageDuplicatorIVF23_Pointer":
"""itkImageDuplicatorIVF23___New_orig__() -> itkImageDuplicatorIVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23___New_orig__()
def itkImageDuplicatorIVF23_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF23 *":
"""itkImageDuplicatorIVF23_cast(itkLightObject obj) -> itkImageDuplicatorIVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_cast(obj)
class itkImageDuplicatorIVF32(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF32 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF32_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF32_Pointer":
"""Clone(itkImageDuplicatorIVF32 self) -> itkImageDuplicatorIVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF32') -> "void":
"""SetInputImage(itkImageDuplicatorIVF32 self, itkImageVF32 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF32 *":
"""
GetOutput(itkImageDuplicatorIVF32 self) -> itkImageVF32
GetOutput(itkImageDuplicatorIVF32 self) -> itkImageVF32
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF32 *":
"""GetModifiableOutput(itkImageDuplicatorIVF32 self) -> itkImageVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF32 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF32
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF32 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF32
Create a new object of the class itkImageDuplicatorIVF32 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF32.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF32.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF32.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF32.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_Clone, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_SetInputImage, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetOutput, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetModifiableOutput, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_Update, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF32_swigregister
itkImageDuplicatorIVF32_swigregister(itkImageDuplicatorIVF32)
def itkImageDuplicatorIVF32___New_orig__() -> "itkImageDuplicatorIVF32_Pointer":
"""itkImageDuplicatorIVF32___New_orig__() -> itkImageDuplicatorIVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32___New_orig__()
def itkImageDuplicatorIVF32_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF32 *":
"""itkImageDuplicatorIVF32_cast(itkLightObject obj) -> itkImageDuplicatorIVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_cast(obj)
class itkImageDuplicatorIVF33(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF33 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF33_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF33_Pointer":
"""Clone(itkImageDuplicatorIVF33 self) -> itkImageDuplicatorIVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF33') -> "void":
"""SetInputImage(itkImageDuplicatorIVF33 self, itkImageVF33 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF33 *":
"""
GetOutput(itkImageDuplicatorIVF33 self) -> itkImageVF33
GetOutput(itkImageDuplicatorIVF33 self) -> itkImageVF33
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF33 *":
"""GetModifiableOutput(itkImageDuplicatorIVF33 self) -> itkImageVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF33 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF33
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF33 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF33
Create a new object of the class itkImageDuplicatorIVF33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF33.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_Clone, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_SetInputImage, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetOutput, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetModifiableOutput, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_Update, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF33_swigregister
itkImageDuplicatorIVF33_swigregister(itkImageDuplicatorIVF33)
def itkImageDuplicatorIVF33___New_orig__() -> "itkImageDuplicatorIVF33_Pointer":
"""itkImageDuplicatorIVF33___New_orig__() -> itkImageDuplicatorIVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33___New_orig__()
def itkImageDuplicatorIVF33_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF33 *":
"""itkImageDuplicatorIVF33_cast(itkLightObject obj) -> itkImageDuplicatorIVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_cast(obj)
class itkImageDuplicatorIVF42(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF42 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF42_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF42_Pointer":
"""Clone(itkImageDuplicatorIVF42 self) -> itkImageDuplicatorIVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF42') -> "void":
"""SetInputImage(itkImageDuplicatorIVF42 self, itkImageVF42 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF42 *":
"""
GetOutput(itkImageDuplicatorIVF42 self) -> itkImageVF42
GetOutput(itkImageDuplicatorIVF42 self) -> itkImageVF42
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF42 *":
"""GetModifiableOutput(itkImageDuplicatorIVF42 self) -> itkImageVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF42 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF42
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF42 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF42
Create a new object of the class itkImageDuplicatorIVF42 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF42.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF42.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF42.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF42.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_Clone, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_SetInputImage, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetOutput, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetModifiableOutput, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_Update, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF42_swigregister
itkImageDuplicatorIVF42_swigregister(itkImageDuplicatorIVF42)
def itkImageDuplicatorIVF42___New_orig__() -> "itkImageDuplicatorIVF42_Pointer":
"""itkImageDuplicatorIVF42___New_orig__() -> itkImageDuplicatorIVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42___New_orig__()
def itkImageDuplicatorIVF42_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF42 *":
"""itkImageDuplicatorIVF42_cast(itkLightObject obj) -> itkImageDuplicatorIVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_cast(obj)
class itkImageDuplicatorIVF43(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF43 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF43_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF43_Pointer":
"""Clone(itkImageDuplicatorIVF43 self) -> itkImageDuplicatorIVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF43') -> "void":
"""SetInputImage(itkImageDuplicatorIVF43 self, itkImageVF43 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF43 *":
"""
GetOutput(itkImageDuplicatorIVF43 self) -> itkImageVF43
GetOutput(itkImageDuplicatorIVF43 self) -> itkImageVF43
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF43 *":
"""GetModifiableOutput(itkImageDuplicatorIVF43 self) -> itkImageVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF43 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF43
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF43 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF43
Create a new object of the class itkImageDuplicatorIVF43 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF43.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF43.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF43.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF43.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_Clone, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_SetInputImage, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetOutput, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetModifiableOutput, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_Update, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF43_swigregister
itkImageDuplicatorIVF43_swigregister(itkImageDuplicatorIVF43)
def itkImageDuplicatorIVF43___New_orig__() -> "itkImageDuplicatorIVF43_Pointer":
"""itkImageDuplicatorIVF43___New_orig__() -> itkImageDuplicatorIVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43___New_orig__()
def itkImageDuplicatorIVF43_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF43 *":
"""itkImageDuplicatorIVF43_cast(itkLightObject obj) -> itkImageDuplicatorIVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_cast(obj)
def image_duplicator(*args, **kwargs):
"""Procedural interface for ImageDuplicator"""
import itk
instance = itk.ImageDuplicator.New(*args, **kwargs)
return instance.__internal_call__()
def image_duplicator_init_docstring():
import itk
import itkTemplate
if isinstance(itk.ImageDuplicator, itkTemplate.itkTemplate):
image_duplicator.__doc__ = itk.ImageDuplicator.values()[0].__doc__
else:
image_duplicator.__doc__ = itk.ImageDuplicator.__doc__
| [
"[email protected]"
] | |
158c8568933800e1a190e58735a06e07f2d82e6b | 21d21402c70d8a95d9a4b492078e3fb36e2c9af1 | /shivi_khanuja/django/DojoNinja/apps/dojoninja/apps.py | d4941bb2d14a3111f7bbf5c0ed2410810df42a05 | [] | no_license | hmp36/python_aug_2017 | df897a1b0aa161300386192d48e3fcac9eb495c8 | 8747429b91b09349e5b5469d8932593b06f645e1 | refs/heads/master | 2021-04-29T23:16:50.149226 | 2017-09-11T20:14:37 | 2017-09-11T20:14:37 | 121,552,666 | 1 | 0 | null | 2018-02-14T19:34:54 | 2018-02-14T19:34:54 | null | UTF-8 | Python | false | false | 166 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DojoninjaConfig(AppConfig):
name = 'dojoninja'
| [
"[email protected]"
] | |
cc7e2f7e8d161493bd6b230d519996a73308c768 | 7b6377050fba4d30f00e9fb5d56dfacb22d388e1 | /brownies/bin/lev-vis.py | f897e5f5de2f2be76fa12584493985dffe688620 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/fudge | 0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370 | 6ba80855ae47cb32c37f635d065b228fadb03412 | refs/heads/master | 2023-08-16T21:05:31.111098 | 2023-08-01T22:09:32 | 2023-08-01T22:09:32 | 203,678,373 | 21 | 4 | NOASSERTION | 2023-06-28T20:51:02 | 2019-08-21T23:22:20 | Python | UTF-8 | Python | false | false | 7,520 | py | #! /usr/bin/env python
import sys
import os
import argparse
import brownies.BNL.RIPL.level_density as LD
import brownies.BNL.RIPL.level_scheme as LS
from PoPs.chemicalElements .misc import symbolFromZ as elementSymbolFromZ
import numpy as np
import matplotlib.pyplot as plt
HOME=os.environ['HOME']
DESKTOP=HOME+'/Desktop'
ATLAS=DESKTOP+"/atlas/"
RIPL=DESKTOP+"/empire.trunk/RIPL/"
# --------------------------------------------------------------------
# Command line
# --------------------------------------------------------------------
def parse_args():
parser = argparse.ArgumentParser(description="Plot ENTIRE level schemes, as determined from RIPL and the Atlas")
parser.add_argument('Z', type=int, help='Nucleus charge')
parser.add_argument('A', type=int, help="Nuclear atomic number")
parser.add_argument('-v', dest='verbose', default=False, action='store_true', help='Run verbosely')
parser.add_argument('-q', dest='verbose', action='store_false', help='Run quietly')
parser.add_argument('--RIPL', default=RIPL, help="Path to RIPL files")
parser.add_argument('--ATLAS', default=ATLAS, help="Path to atlas project")
parser.add_argument('--dEmax', type=float, default=2.0, help="Plot from Emin=0 to Emax=Esep+dEmax, dEmax in MeV")
return parser.parse_args()
# -----------------------------------------------------------------------
# Set defaults
# -----------------------------------------------------------------------
args = parse_args()
Z, A=args.Z, args.A
elem=elementSymbolFromZ[Z]
sym='%s%i'%(elem, A)
symTarget='%s%i'%(elem, A-1) # for n+target = isotope of interest
ld, D, levs=None, None, None
sys.path.append(args.ATLAS)
import atlas.io as aio
# -----------------------------------------------------------------------
# Get data
# -----------------------------------------------------------------------
# Get the level scheme for Z,A. From RIPL-3.
fname=args.RIPL+"/levels/z%s.dat"%str(Z).zfill(3)
with open(fname, mode='r') as f:
levMap=LS.readRIPLLevelScheme(f.read(), verbose=False)
levs=levMap[sym]
spinTarget = levMap[symTarget].levels[0].spin
parityTarget = levMap[symTarget].levels[0].parity
spin0=levMap[sym].levels[0].spin
parity0=levMap[sym].levels[0].parity
if args.verbose:
print('target:', symTarget, spinTarget, parityTarget)
print('compound:', sym, spin0, parity0)
print(levs.name, levs.Sn)
print(levs.levelReport())
# Get the HFB level density for Z,A. From RIPL-3.
fname=args.RIPL+"/densities/total/level-densities-hfb/z%s.tab"%str(Z).zfill(3)
with open(fname, mode='r') as f:
ld=LD.readHFBMLevelDensityTable(f.read(), Z, A, verbose=True)
# Get the mean level spacing for the resonance region for Z,A-1.
# The compound nucleus # for neutron + Z,A-1 is Z,A. From RIPL-3.
# FIXME
# Get the resonances for Z,A-1. The compound nucleus for neutron + Z,A-1 is Z,A.
# From the Atlas of Neutron Resonances, 6th edition.
try:
res = aio.read_atlas(isotope=None, element=None, Z=Z, A=A-1, ZA=None, verbose=False)
except KeyError:
res = None
if args.verbose and res is not None:
for r in res.resonance_parameters:
print('\t'.join([str(x) for x in r]))
icut = levs.lastLevelInCompleteScheme
Ecut = levs.levels[icut].energy.value
Esep = levs.Sn.value
Emin = 0.0
Emax = Esep+args.dEmax
# -----------------------------------------------------------------------
# Hi!
# -----------------------------------------------------------------------
for NAME in ["Angie", 'Nathaniel', "Mami"]:
print('hi '+NAME)
# -----------------------------------------------------------------------
# Make plot
# -----------------------------------------------------------------------
# Set up axes and title
plt.title(levs.name)
plt.xlabel("$\Pi*J$")
plt.ylabel("$E^*$ (MeV)")
# Widget to get J & Pi, a common theme
def get_J_and_Pi(__lev, useNone=True):
if __lev.spin is None:
if useNone:
J = None
else:
J = -11.33333
else:
J = float(lev.spin)
if lev.parity is None:
if useNone:
Pi = None
else:
Pi = 1.0
else:
Pi = float(str(lev.parity))
return J, Pi
# Plot discrete levels with complete information
if True:
x, y, xerr = [], [], [] # for completely know levels
for lev in levs.levels:
J, Pi=get_J_and_Pi(lev, True)
if J is None or Pi is None:
pass
else:
if lev.energy.value > Emax:
continue
y.append(lev.energy.value)
x.append(J*Pi)
xerr.append(0.25)
plt.errorbar(x=x, y=y, xerr=xerr, linewidth=0, elinewidth=2, color='k')
# Highlight ground state
if True:
plt.errorbar(x=[float(spin0)*int(parity0)], y=[0.0], xerr=[0.25], linewidth=0, elinewidth=2, color='g')
# Highlight gamma transitions
if False:
raise NotImplementedError("Highlight gamma transitions")
# Plot discrete levels missing either J or Pi
if True:
x, y, xerr = [], [], [] # for completely know levels
for lev in levs.levels:
J, Pi=get_J_and_Pi(lev, True)
if J is None or Pi is None:
J, Pi=get_J_and_Pi(lev, False)
if lev.energy.value > Emax:
continue
y.append(lev.energy.value)
x.append(J*Pi)
xerr.append(0.25)
else:
pass
plt.errorbar(x=x, y=y, xerr=xerr, linewidth=0, elinewidth=2, color='blue')
# Highlight rotational bands
if False:
raise NotImplementedError("Highlight rotational bands")
# Highlight vibrational "bands"
if False:
raise NotImplementedError('Highlight vibrational "bands"')
# Plot Ecut
if True:
plt.axhline(y=Ecut, color='black', alpha=0.25, linestyle=':')
plt.text(11, Ecut+0.1, r'$E_{cut}$')
# Plot level density contour plot
if True:
JPigrid = []
for Pi in ld.spin_dep_level_density:
for twoJ in ld.spin_dep_level_density[Pi].keys():
if twoJ > 30: continue
JPigrid.append(Pi*twoJ/2.0)
JPigrid.sort()
JPigrid = np.array(JPigrid)
Exgrid = np.arange(Emin, Emax, 0.1) # np.arange(Ecut, 5.0+Esep, 0.1)
X, Y = np.meshgrid(JPigrid, Exgrid)
vevaluate = np.vectorize(ld.evaluate)
Z = vevaluate(Y, np.abs(X), np.sign(X))
CS = plt.contour(X, Y, Z, levels=[0, 1, 5]+[int(x) for x in np.logspace(1,3,14)])
plt.clabel(CS, fontsize=9, inline=1)
# Plot Esep
if True:
plt.axhline(y=Esep, color='black', alpha=0.25)
plt.text(11, Esep+0.1, r'$E_{sep}$')
# Plot box of levels that can be excited by n+target reactions
if False:
raise NotImplementedError("Plot box of levels that can be excited by n+target reactions")
# Plot resonances with known J
if True and res is not None:
x, y, xerr, yerr = [], [], [], [] # for completely know levels
for r in res.resonance_parameters:
Er=r[0].value*1e-6
if Er < 0.0:
continue
Er += Esep
J, L = r[1].spin, r[2].spin
if J is None or L is None:
continue
Pi = pow(-1, int(L)) * int(parityTarget) # FIXME: check math!
y.append(Er)
x.append(float(J)*Pi)
# FIXME: fuzzy band from width
xerr.append(0.25)
yerr.append(0.0)
plt.errorbar(x=x, y=y, xerr=xerr, linewidth=0, elinewidth=2, color='red')
# Highlight primary gammas
if False:
raise NotImplementedError("Highlight primary gammas")
# Plot resonances with unknown J
if False:
raise NotImplementedError("Plot resonances with unknown J")
plt.show()
| [
"[email protected]"
] | |
bd0caf3452ccfe76d5df1c8d98d8f0cb9a1b329a | 384d0be5ac54b306b945cf38c10d9b0a44c975ea | /stack/keystone/keystone/logic/types/biller.py | 00a135b11cbea421c4b431b0ff3dd0914cf9dfc5 | [
"Apache-2.0"
] | permissive | ashokcse/openstack-bill | 05ae313637b3cfecba946d2a9b32e8c7609fc721 | 1a3d7575d4b341f64fa1764ed47e47a7504a9bcc | refs/heads/master | 2021-01-18T14:05:24.696165 | 2012-09-12T11:29:20 | 2012-09-12T11:29:20 | 5,424,267 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 19,643 | py | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from lxml import etree
from datetime import datetime
from keystone.logic.types import fault
LOG = logging.getLogger('keystone.logic.service')
LOG.info('entering Bill_Unit')
class Bill_Unit(object):
"""class for holding bill unit details!"""
def __init__(self,id=None, vcpu=None, ram=None,
vdisk=None, date=None, changed_on=None, enabled=None):
LOG.info('keystone logic biller __init__ id:%s vcpu:%d ram:%d vdisk:%d date:%s changed on : %s enabled:%d'% ( id, vcpu, ram, vdisk, date, changed_on, enabled))
self.id = id
self.vcpu = vcpu
self.ram = ram
self.vdisk = vdisk
self.date = date
self.changed_on = changed_on
self.enabled = enabled and True or False
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"biller")
if root == None:
raise fault.BadRequestFault("Expecting Bill_Unit")
vcpu = root.get("vcpu")
ram = root.get("ram")
vdisk = root.get("vdisk")
date = root.get("date")
enabled = root.get("enabled")
if not vcpu:
raise fault.BadRequestFault("Expecting Bill_Unit")
elif not vdisk:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
LOG.info('keystone logic biller py from_xml dom id:%d vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, vcpu, ram, vdisk, date, enabled))
return Bill_Unit( id, vcpu, ram, vdisk, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Bill_Unit", str(e))
@staticmethod
def from_json(json_str):
LOG.info('keystone logic types biller py from_json before try %s' %json_str)
try:
obj = json.loads(json_str)
if not "biller" in obj:
raise fault.BadRequestFault("Expecting Bill_Unit")
LOG.info('keystone logic types biller py from_json object %s' %obj)
biller = obj["biller"]
LOG.info('keystone logic types biller py from_json biller %s' %biller)
vcpu = biller.get('vcpu', None)
LOG.info('keystone logic types biller py from_json before IF vcpu%s' %vcpu)
if(vcpu == None or vcpu == 0):
raise fault.BadRequestFault("Expecting Bill_Unit")
LOG.info('keystone logic types biller py from_json before ram')
if "ram" in biller:
ram = biller["ram"]
else:
ram = None
LOG.info('keystone logic types biller py from_json afterram')
if "date" in biller:
date = biller["date"]
#date =datetime.strptime(biller["date"], "%Y-%m-%d")
if "changed_on" in biller:
changed_on = biller["changed_on"]
LOG.info('keystone logic types biller py from_json after date : %s created date: %s' %(date, changed_on))
if "vdisk" not in biller:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
vdisk = biller["vdisk"]
LOG.info('keystone logic types biller py from_json vdisk : %s ' %vdisk)
if "enabled" in biller:
set_enabled = biller["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
LOG.info('keystone logic types biller py from_json set_enabled : %s ' %set_enabled)
id = biller.get('id', None)
LOG.info('before return id :%s vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, vcpu, ram, vdisk, date, set_enabled))
return Bill_Unit(id, vcpu, ram, vdisk, date, changed_on, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse bill Unit", str(e))
def to_dom(self):
dom = etree.Element("biller",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.vdisk:
dom.set("vdisk", unicode(self.vdisk))
if self.ram:
dom.set("ram", unicode(self.ram))
if self.id:
dom.set("id", unicode(self.id))
if self.vcpu:
dom.set("vcpu", unicode(self.vcpu))
if self.date:
dom.set("date", unicode(self.date))
if self.changed_on:
dom.set("created_on", unicode(self.changed_on))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
LOG.info('keystone logic biller py to_ dom id:%d vcpu:%d ram:%d vdisk:%d date:%s changed_on : %s enabled:%d'% ( dom.id, dom.vcpu, dom.ram, dom.vdisk, dom.date, dom.changed_on, dom.enabled))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
biller = {}
if self.id:
biller["id"] = unicode(self.id)
if self.vcpu:
biller["vcpu"] = unicode(self.vcpu)
if self.ram:
biller["ram"] = unicode(self.ram)
biller["vdisk"] = unicode(self.vdisk)
biller["date"] = unicode(self.date)
biller["changed_on"] = unicode(self.changed_on)
biller["enabled"] = self.enabled
return {'biller':biller}
def to_json(self):
return json.dumps(self.to_dict())
class Instance_Bill(object):
"""class for holding instance bill details!"""
def __init__(self,id=None, name=None, total_vcpu=None, total_ram=None,
total_vdisk=None, changed_on=None, total_cost=None, enabled=None):
LOG.info('keystone logic instance biller __init__ start' )
# LOG.info('keystone logic instance biller __init__ id: name : %s toatl vcpu:%d ram:%d vdisk:%d total_cost:%s changed on : %s enabled:%d'% ( name, total_vcpu, total_ram, total_vdisk, total_cost, changed_on, enabled))
self.id = id
self.name = name
self.total_vcpu = total_vcpu
self.total_ram = total_ram
self.total_vdisk = total_vdisk
self.total_cost = total_cost
self.changed_on = changed_on
self.enabled = enabled and True or False
LOG.info('keystone logic instance biller __init__ end' )
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"biller")
if root == None:
raise fault.BadRequestFault("Expecting Bill_Unit")
total_vcpu = root.get("total_vcpu")
total_ram = root.get("total_ram")
total_vdisk = root.get("total_vdisk")
name = root.get("name")
enabled = root.get("enabled")
if not total_vcpu:
raise fault.BadRequestFault("Expecting Bill_Unit")
elif not total_vdisk:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
LOG.info('keystone logic biller py from_xml dom id:%d vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, total_vcpu, total_ram, total_vdisk, name, enabled))
return Bill_Unit( id, name, total_vcpu, total_ram, total_vdisk, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Bill_Unit", str(e))
@staticmethod
def from_json(json_str):
LOG.info('keystone logic types biller py from_json before try %s' %json_str)
try:
obj = json.loads(json_str)
if not "biller" in obj:
raise fault.BadRequestFault("Expecting Bill_Unit")
LOG.info('keystone logic types biller py from_json object %s' %obj)
biller = obj["biller"]
LOG.info('keystone logic types biller py from_json biller %s' %biller)
total_vcpu = biller.get('total_vcpu', None)
LOG.info('keystone lllogic types biller py from_json before IF vcpu%s' %total_vcpu)
if(total_vcpu == None or total_vcpu == 0):
raise fault.BadRequestFault("Expecting Instance_Bill_Unit")
LOG.info('keystone logic types biller py from_json before ram')
if "total_ram" in biller:
total_ram = biller["total_ram"]
else:
total_ram = None
LOG.info('keystone logic types biller py from_json afterram')
if "name" in biller:
name = biller["name"]
#date =datetime.strptime(biller["date"], "%Y-%m-%d")
if "total_cost" in biller:
total_cost = biller["total_cost"]
if "changed_on" in biller:
changed_on = biller["changed_on"]
LOG.info('\n keystone logic types biller py from_json after name : %s created date: %s' %(name, changed_on))
if "total_vdisk" not in biller:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
total_vdisk = biller["total_vdisk"]
LOG.info('keystone logic types biller py from_json vdisk : %s ' %total_vdisk)
if "enabled" in biller:
set_enabled = biller["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
LOG.info('keystone logic types biller py from_json set_enabled : %s ' %set_enabled)
id = biller.get('id', None)
LOG.info('before instance bill json return id : %s name :%s total_vcpu:%d total_ram:%d total_vdisk:%d total_cost: %s enabled:%d'% (id, name, total_vcpu, total_ram, total_vdisk, total_cost, set_enabled))
return Instance_Bill(id, name, total_vcpu, total_ram, total_vdisk, changed_on, total_cost, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse Instance bill ", str(e))
def to_dom(self):
dom = etree.Element("biller",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.vdisk:
dom.set("total_vdisk", unicode(self.total_vdisk))
if self.ram:
dom.set("total_ram", unicode(self.total_ram))
if self.id:
dom.set("id", unicode(self.id))
if self.vcpu:
dom.set("total_vcpu", unicode(self.total_vcpu))
if self.date:
dom.set("name", unicode(self.name))
if self.total_cost:
dom.set("total_cost", unicode(self.total_cost))
if self.changed_on:
dom.set("created_on", unicode(self.changed_on))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
LOG.info('keystone logic biller py to_ dom id:%d name :- %s vcpu:%d ram:%d vdisk:%d date:%s changed_on : %s enabled:%d'% ( dom.id, dom.total_vcpu, dom.total_ram, dom.total_vdisk, dom.name, dom.changed_on, dom.enabled))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
biller = {}
if self.id:
biller["id"] = unicode(self.id)
if self.total_vcpu:
biller["total_vcpu"] = unicode(self.total_vcpu)
if self.total_ram:
biller["total_ram"] = unicode(self.total_ram)
biller["total_vdisk"] = unicode(self.total_vdisk)
biller["name"] = unicode(self.name)
biller["total_cost"] = unicode(self.total_cost)
biller["changed_on"] = unicode(self.changed_on)
biller["enabled"] = self.enabled
return {'biller':biller}
def to_json(self):
return json.dumps(self.to_dict())
#-User Bill----------#
class User_Bill(object):
"""class for holding instance bill details!"""
def __init__(self,id=None, user_id=None, tenant_id=None, total_vcpu=None, total_ram=None,
total_vdisk=None, bill_month=None, total_cost=None, enabled=None):
LOG.info('keystone logic User_Billbiller __init__ start' )
# LOG.info('keystone logic instance biller __init__ id: name : %s toatl vcpu:%d ram:%d vdisk:%d total_cost:%s changed on : %s enabled:%d'% ( name, total_vcpu, total_ram, total_vdisk, total_cost, changed_on, enabled))
self.id = id
self.user_id = user_id
self.tenant_id = tenant_id
self.total_vcpu = total_vcpu
self.total_ram = total_ram
self.total_vdisk = total_vdisk
self.total_cost = total_cost
self.bill_month = bill_month
self.enabled = enabled and True or False
LOG.info('keystone logic User_Bill biller __init__ end' )
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"biller")
if root == None:
raise fault.BadRequestFault("Expecting Bill_Unit")
total_vcpu = root.get("total_vcpu")
total_ram = root.get("total_ram")
total_vdisk = root.get("total_vdisk")
name = root.get("name")
enabled = root.get("enabled")
if not total_vcpu:
raise fault.BadRequestFault("Expecting Bill_Unit")
elif not total_vdisk:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
LOG.info('keystone logic biller py from_xml dom id:%d vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, total_vcpu, total_ram, total_vdisk, name, enabled))
return Bill_Unit( id, name, total_vcpu, total_ram, total_vdisk, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Bill_Unit", str(e))
@staticmethod
def from_json(json_str):
LOG.info('keystone logic types User Bill biller py from_json before try %s' %json_str)
try:
obj = json.loads(json_str)
if not "biller" in obj:
raise fault.BadRequestFault("Expecting User_Bill")
LOG.info('keystone logic types biller py from_json object %s' %obj)
biller = obj["biller"]
LOG.info('keystone logic types biller py from_json user_bill %s' %biller)
total_vcpu = biller.get('total_vcpu', None)
LOG.info('keystone lllogic types biller py from_json before IF vcpu%s' %total_vcpu)
if(total_vcpu == None or total_vcpu == 0):
raise fault.BadRequestFault("Expecting User_Bill")
LOG.info('keystone logic types biller py from_json before ram')
if "total_ram" in biller:
total_ram = biller["total_ram"]
else:
total_ram = None
LOG.info('keystone logic types biller py from_json afterram')
if "user_id" in biller:
user_id = biller["user_id"]
#date =datetime.strptime(biller["date"], "%Y-%m-%d")
if "tenant_id" in biller:
tenant_id = biller["tenant_id"]
if "total_cost" in biller:
total_cost = biller["total_cost"]
if "bill_month" in biller:
bill_month = biller["bill_month"]
LOG.info('\n keystone logic types biller py from_json after name : %s created date: %s' %(user_id, bill_month))
if "total_vdisk" not in biller:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
total_vdisk = biller["total_vdisk"]
LOG.info('keystone logic types biller py from_json vdisk : %s ' %total_vdisk)
if "enabled" in biller:
set_enabled = biller["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
LOG.info('keystone logic types biller py from_json usr_bill set_enabled : %s ' %set_enabled)
id = biller.get('id', None)
LOG.info('before instance bill json return id : %s user_id :%s tenant_id =%s total_vcpu:%d total_ram:%d total_vdisk:%d total_cost: %s billmonth= %s enabled:%d'% (id, user_id, tenant_id, total_vcpu, total_ram, total_vdisk, total_cost, bill_month, set_enabled))
return User_Bill(id, user_id, tenant_id, total_vcpu, total_ram, total_vdisk, bill_month, total_cost, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse keystone logic types biller py from_json User bill ", str(e))
def to_dom(self):
dom = etree.Element("biller",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.vdisk:
dom.set("total_vdisk", unicode(self.total_vdisk))
if self.ram:
dom.set("total_ram", unicode(self.total_ram))
if self.id:
dom.set("id", unicode(self.id))
if self.vcpu:
dom.set("total_vcpu", unicode(self.total_vcpu))
if self.user_id:
dom.set("user_id", unicode(self.user_id))
if self.tenant_id:
dom.set("tenant_id", unicode(self.tenant_id))
if self.total_cost:
dom.set("total_cost", unicode(self.total_cost))
if self.bill_month:
dom.set("bill_month", unicode(self.bill_month))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
LOG.info('keystone logic biller py to_ dom id:%d user_id :- %s vcpu:%d ram:%d vdisk:%d date:%s changed_on : %s enabled:%d'% ( dom.id, dom.user_id, dom.total_vcpu, dom.total_ram, dom.total_vdisk, dom.bill_month, dom.enabled))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
biller = {}
if self.id:
biller["id"] = unicode(self.id)
if self.total_vcpu:
biller["total_vcpu"] = unicode(self.total_vcpu)
if self.total_ram:
biller["total_ram"] = unicode(self.total_ram)
biller["user_id"] = unicode(self.user_id)
biller["tenant_id"] = unicode(self.tenant_id)
biller["total_vdisk"] = unicode(self.total_vdisk)
biller["total_cost"] = unicode(self.total_cost)
biller["bill_month"] = unicode(self.bill_month)
biller["enabled"] = self.enabled
return {'biller':biller}
def to_json(self):
return json.dumps(self.to_dict())
| [
"[email protected]"
] | |
56c82dd9a2f16f67ef47c7062fa1ce5db1ae45cf | 029948b3fd0e41d80d66c84d808abff4fcb24ac8 | /test/test_path_response_result_response_egress_physical_interface.py | b1100af354156581698006d61033889305c3445f | [] | no_license | yijxiang/dnac-api-client | 842d1da9e156820942656b8f34342d52c96d3c37 | 256d016e2df8fc1b3fdad6e28f441c6005b43b07 | refs/heads/master | 2021-09-25T21:10:09.502447 | 2018-10-25T14:39:57 | 2018-10-25T14:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | # coding: utf-8
"""
Cisco DNA Center Platform v. 1.2.x (EFT)
REST API (EFT) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dnac_api_client
from dnac_api_client.models.path_response_result_response_egress_physical_interface import PathResponseResultResponseEgressPhysicalInterface # noqa: E501
from dnac_api_client.rest import ApiException
class TestPathResponseResultResponseEgressPhysicalInterface(unittest.TestCase):
"""PathResponseResultResponseEgressPhysicalInterface unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPathResponseResultResponseEgressPhysicalInterface(self):
"""Test PathResponseResultResponseEgressPhysicalInterface"""
# FIXME: construct object with mandatory attributes with example values
# model = dnac_api_client.models.path_response_result_response_egress_physical_interface.PathResponseResultResponseEgressPhysicalInterface() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d3238509ecaea8d3e0a51a8943890b4578e5a8da | e3d447a81c5462d2d14201f2bc6b82cdcbbca51a | /chapter10/c10_6_addition.py | af50d5e3378247cb7a726c51df05b727370cecc4 | [] | no_license | barcern/python-crash-course | f6026f13f75ecddc7806711d65bc53cb88e24496 | 8b55775c9f0ed49444becb35b8d529620537fa54 | refs/heads/master | 2023-04-19T17:28:44.342022 | 2021-02-07T23:51:06 | 2021-02-07T23:51:06 | 257,201,280 | 2 | 3 | null | 2021-05-12T17:35:56 | 2020-04-20T07:14:28 | Python | UTF-8 | Python | false | false | 2,214 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 12:27:25 2020
@author: barbora
One common problem when prompting for numerical input occurs when people
provide text instead of numbers. When you try to convert the input to an int,
you'll get a ValueError. Write a program that prompts for two numbers.
Add them together and print the result. Catch the ValueError if either input
value is not a number, and print a friendly error message. Test your program
by entering two numbers and then by entering some text instead of a number.
"""
# Option 1 - while loop
# Create a while loop to allow for users to input the two values
flag = True
while flag:
message1 = "Please input the first value to add. To quit, type 'q': "
message2 = "Please input the second value to add. To quit, type 'q': "
value1 = input(message1)
# Exit conditions
if (value1 == 'q'):
print("Ending program")
break
value2 = input(message2)
if (value2 == 'q'):
print("Ending program")
break
# Convert to integer and check for a ValueError
try:
int1 = int(value1)
int2 = int(value2)
except ValueError:
print("Please input two integer values")
else:
result = int1 + int2
print(f"Final result: {result}")
# Option 2 - while loop and function
# Create a function to add two values
def addition(value1, value2):
"""Function to add two integer values, with a ValueError check."""
try:
int1 = int(value1)
int2 = int(value2)
except ValueError:
return("Please input two integer values")
else:
result = int1 + int2
return(f"Final result: {result}")
print(addition(2,3))
# While loop to obtain user input
flag = True
while flag:
message1 = "Please input the first value to add. To quit, type 'q': "
message2 = "Please input the second value to add. To quit, type 'q': "
value1 = input(message1)
# Exit conditions
if (value1 == 'q'):
print("Ending program")
break
value2 = input(message2)
if (value2 == 'q'):
print("Ending program")
break
# Call function
print(addition(value1, value2))
| [
"[email protected]"
] | |
562d159153258105237dee275a61136e7c194853 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.3/django/contrib/localflavor/generic/forms.py | b8a111a6b5f57fa81698f292b86258925d561b4a | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.3/django/contrib/localflavor/generic/forms.py | [
"[email protected]"
] | |
bdf4f576aceba31d7d274c2ec7efd61e1f4a337c | 5d48aba44824ff9b9ae7e3616df10aad323c260e | /bfs/127.word_ladder.py | 0e02bffe5c4014c13978aea31a08fd842253ceea | [] | no_license | eric496/leetcode.py | 37eab98a68d6d3417780230f4b5a840f6d4bd2a6 | 32a76cf4ced6ed5f89b5fc98af4695b8a81b9f17 | refs/heads/master | 2021-07-25T11:08:36.776720 | 2021-07-01T15:49:31 | 2021-07-01T15:49:31 | 139,770,188 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | """
Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
Example 1:
Input:
beginWord = "hit",
endWord = "cog",
wordList = ["hot","dot","dog","lot","log","cog"]
Output: 5
Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Example 2:
Input:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Output: 0
Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
"""
from collections import deque
import string
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
word_set = set(wordList)
q = deque([(beginWord, 1)])
visited = {beginWord}
while q:
word, step = q.popleft()
if word == endWord:
return step
for i in range(len(word)):
for c in string.ascii_lowercase:
new_word = word[:i] + c + word[i + 1 :]
if new_word in word_set and new_word not in visited:
q.append((new_word, step + 1))
visited.add(new_word)
return 0
| [
"[email protected]"
] | |
8ddbe698c1c73e311e9b99a820e4b99697b3fd9b | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/576811_Rename_MP3_files_ID3_tags_does_not_require/recipe-576811.py | 292eebcc019eb27b01d8285188c18bb6380e649e | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 26,410 | py | """ Read ID3 tags from a file.
Ned Batchelder, http://nedbatchelder.com/code/modules/id3reader.html
http://nedbatchelder.com/code/modules/id3reader.py
* original code modified by ccpizza: added code to main method to rename
files in current folder from ID3 tags,
e.g. 'Track_01.mp3' >> '01 - Chan chan.mp3'
* added safe console printing of unicode characters
* added indexing for duplicate file names, i.e. '01 - Chan
chan.mp3[2]'
* fixed indexing for duplicated ID3 tags
* added -d option to create "artist\album" directories:
e.g. 'Track_01.mp3' >> 'Compay Segundo\Mojito\01 - Chan chan.mp3'
* added fallback to 'latin1' in case of non-unicode tag text
"""
__version__ = '1.53.20070415' # History at the end of the file.
# ID3 specs: http://www.id3.org/develop.html
import struct, sys, zlib
import re
MP3=u'mp3'
# These are the text encodings, indexed by the first byte of a text value.
_encodings = ['iso8859-1', 'utf-16', 'utf-16be', 'utf-8']
# Simple pseudo-id's, mapped to their various representations.
# Use these ids with getValue, and you don't need to know what
# version of ID3 the file contains.
_simpleDataMapping = {
'album': ('TALB', 'TAL', 'v1album', 'TOAL'),
'performer': ('TPE1', 'TP1', 'v1performer', 'TOPE'),
'title': ('TIT2', 'TT2', 'v1title'),
'track': ('TRCK', 'TRK', 'v1track'),
'year': ('TYER', 'TYE', 'v1year'),
'genre': ('TCON', 'TCO', 'v1genre'),
'comment': ('COMM', 'COM', 'v1comment'),
}
# Provide booleans for older Pythons.
try:
True, False
except NameError:
True, False = 1==1, 1==0
# Tracing
_t = False
def _trace(msg):
print msg
# Coverage
_c = False
_features = {}
def _coverage(feat):
#if _t: _trace('feature '+feat)
_features[feat] = _features.setdefault(feat, 0)+1
def _safestr(s):
""" Get a good string for printing, that won't throw exceptions,
no matter what's in it.
"""
try:
return unicode(s).encode(sys.getdefaultencoding())
except UnicodeError:
return '?: '+repr(s)
# Can I just say that I think the whole concept of genres is bogus,
# since they are so subjective? And the idea of letting someone else pick
# one of these things and then have it affect the categorization of my music
# is extra bogus. And the list itself is absurd. Polsk Punk?
_genres = [
# 0-19
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco', 'Funk', 'Grunge', 'Hip - Hop', 'Jazz', 'Metal',
'New Age', 'Oldies', 'Other', 'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
# 20-39
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack', 'Euro - Techno', 'Ambient', 'Trip - Hop', 'Vocal', 'Jazz + Funk',
'Fusion', 'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game', 'Sound Clip', 'Gospel', 'Noise',
# 40-59
'Alt Rock', 'Bass', 'Soul', 'Punk', 'Space', 'Meditative', 'Instrumental Pop', 'Instrumental Rock', 'Ethnic', 'Gothic',
'Darkwave', 'Techno - Industrial', 'Electronic', 'Pop - Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy', 'Cult', 'Gangsta Rap',
# 60-79
'Top 40', 'Christian Rap', 'Pop / Funk', 'Jungle', 'Native American', 'Cabaret', 'New Wave', 'Psychedelic', 'Rave', 'Showtunes',
'Trailer', 'Lo - Fi', 'Tribal', 'Acid Punk', 'Acid Jazz', 'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock',
# 80-99
'Folk', 'Folk / Rock', 'National Folk', 'Swing', 'Fast - Fusion', 'Bebob', 'Latin', 'Revival', 'Celtic', 'Bluegrass',
'Avantgarde', 'Gothic Rock', 'Progressive Rock', 'Psychedelic Rock', 'Symphonic Rock', 'Slow Rock', 'Big Band', 'Chorus', 'Easy Listening', 'Acoustic',
# 100-119
'Humour', 'Speech', 'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony', 'Booty Bass', 'Primus', 'Porn Groove',
'Satire', 'Slow Jam', 'Club', 'Tango', 'Samba', 'Folklore', 'Ballad', 'Power Ballad', 'Rhythmic Soul', 'Freestyle',
# 120-139
'Duet', 'Punk Rock', 'Drum Solo', 'A Cappella', 'Euro - House', 'Dance Hall', 'Goa', 'Drum & Bass', 'Club - House', 'Hardcore',
'Terror', 'Indie', 'BritPop', 'Negerpunk', 'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal', 'Black Metal', 'Crossover',
# 140-147
'Contemporary Christian', 'Christian Rock', 'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'JPop', 'Synthpop'
]
class Id3Error(Exception):
""" An exception caused by id3reader properly handling a bad ID3 tag.
"""
pass
class _Header:
""" Represent the ID3 header in a tag.
"""
def __init__(self):
self.majorVersion = 0
self.revision = 0
self.flags = 0
self.size = 0
self.bUnsynchronized = False
self.bExperimental = False
self.bFooter = False
def __str__(self):
return str(self.__dict__)
class _Frame:
""" Represent an ID3 frame in a tag.
"""
def __init__(self):
self.id = ''
self.size = 0
self.flags = 0
self.rawData = ''
self.bTagAlterPreserve = False
self.bFileAlterPreserve = False
self.bReadOnly = False
self.bCompressed = False
self.bEncrypted = False
self.bInGroup = False
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def _interpret(self):
""" Examine self.rawData and create a self.value from it.
"""
if len(self.rawData) == 0:
# This is counter to the spec, but seems harmless enough.
#if _c: _coverage('zero data')
return
if self.bCompressed:
# Decompress the compressed data.
self.rawData = zlib.decompress(self.rawData)
if self.id[0] == 'T':
# Text fields start with T
encoding = ord(self.rawData[0])
if 0 <= encoding < len(_encodings):
#if _c: _coverage('encoding%d' % encoding)
value = self.rawData[1:].decode(_encodings[encoding])
else:
#if _c: _coverage('bad encoding')
value = self.rawData[1:]
# Don't let trailing zero bytes fool you.
if value:
value = value.strip('\0')
# The value can actually be a list.
if '\0' in value:
value = value.split('\0')
#if _c: _coverage('textlist')
self.value = value
elif self.id[0] == 'W':
# URL fields start with W
self.value = self.rawData.strip('\0')
if self.id == 'WXXX':
self.value = self.value.split('\0')
elif self.id == 'CDM':
# ID3v2.2.1 Compressed Data Metaframe
if self.rawData[0] == 'z':
self.rawData = zlib.decompress(self.rawData[5:])
else:
#if _c: _coverage('badcdm!')
raise Id3Error, 'Unknown CDM compression: %02x' % self.rawData[0]
#@TODO: re-interpret the decompressed frame.
elif self.id in _simpleDataMapping['comment']:
# comment field
# In limited testing a typical comment looks like
# '\x00XXXID3v1 Comment\x00comment test' so in this
# case we need to find the second \x00 to know where
# where we start for a comment. In case we only find
# one \x00, lets just start at the beginning for the
# value
s = str(self.rawData)
pos = 0
count = 0
while pos < len(s) and count < 2:
if ord(s[pos]) == 0:
count = count + 1
pos = pos + 1
if count < 2:
pos = 1
if pos > 0 and pos < len(s):
s = s[pos:]
if ord(s[-1]) == 0:
s = s[:-1]
self.value = s
class Reader:
""" An ID3 reader.
Create one on a file object, and then use getValue('TIT2') (for example)
to pull values.
"""
def __init__(self, file):
""" Create a reader from a file or filename. """
self.file = file
self.header = None
self.frames = {}
self.allFrames = []
self.bytesLeft = 0
self.padbytes = ''
bCloseFile = False
# If self.file is a string of some sort, then open it to get a file.
if isinstance(self.file, (type(''), type(u''))):
self.file = open(self.file, 'rb')
bCloseFile = True
self._readId3()
if bCloseFile:
self.file.close()
def _readBytes(self, num, desc=''):
""" Read some bytes from the file.
This method implements the "unsynchronization" scheme,
where 0xFF bytes may have had 0x00 bytes stuffed after
them. These zero bytes have to be removed transparently.
"""
#if _t: _trace("ask %d (%s)" % (num,desc))
if num > self.bytesLeft:
#if _c: _coverage('long!')
raise Id3Error, 'Long read (%s): (%d > %d)' % (desc, num, self.bytesLeft)
bytes = self.file.read(num)
self.bytesLeft -= num
if len(bytes) < num:
#if _t: _trace("short read with %d left, %d total" % (self.bytesLeft, self.header.size))
#if _c: _coverage('short!')
raise Id3Error, 'Short read (%s): (%d < %d)' % (desc, len(bytes), num)
if self.header.bUnsynchronized:
nUnsync = 0
i = 0
while True:
i = bytes.find('\xFF\x00', i)
if i == -1:
break
#if _t: _trace("unsync at %d" % (i+1))
#if _c: _coverage('unsyncbyte')
nUnsync += 1
# This is a stuffed byte to remove
bytes = bytes[:i+1] + bytes[i+2:]
# Have to read one more byte from the file to adjust
bytes += self.file.read(1)
self.bytesLeft -= 1
i += 1
#if _t: _trace("unsync'ed %d" % (nUnsync))
return bytes
def _unreadBytes(self, num):
self.file.seek(-num, 1)
self.bytesLeft += num
def _getSyncSafeInt(self, bytes):
assert len(bytes) == 4
if type(bytes) == type(''):
bytes = [ ord(c) for c in bytes ]
return (bytes[0] << 21) + (bytes[1] << 14) + (bytes[2] << 7) + bytes[3]
def _getInteger(self, bytes):
i = 0;
if type(bytes) == type(''):
bytes = [ ord(c) for c in bytes ]
for b in bytes:
i = i*256+b
return i
def _addV1Frame(self, id, rawData):
if id == 'v1genre':
assert len(rawData) == 1
nGenre = ord(rawData)
try:
value = _genres[nGenre]
except IndexError:
value = "(%d)" % nGenre
else:
value = rawData.strip(' \t\r\n').split('\0')[0]
if value:
frame = _Frame()
frame.id = id
frame.rawData = rawData
frame.value = value
self.frames[id] = frame
self.allFrames.append(frame)
def _pass(self):
""" Do nothing, for when we need to plug in a no-op function.
"""
pass
def _readId3(self):
header = self.file.read(10)
if len(header) < 10:
return
hstuff = struct.unpack('!3sBBBBBBB', header)
if hstuff[0] != "ID3":
# Doesn't look like an ID3v2 tag,
# Try reading an ID3v1 tag.
self._readId3v1()
return
self.header = _Header()
self.header.majorVersion = hstuff[1]
self.header.revision = hstuff[2]
self.header.flags = hstuff[3]
self.header.size = self._getSyncSafeInt(hstuff[4:8])
self.bytesLeft = self.header.size
self._readExtHeader = self._pass
if self.header.majorVersion == 2:
#if _c: _coverage('id3v2.2.%d' % self.header.revision)
self._readFrame = self._readFrame_rev2
elif self.header.majorVersion == 3:
#if _c: _coverage('id3v2.3.%d' % self.header.revision)
self._readFrame = self._readFrame_rev3
elif self.header.majorVersion == 4:
#if _c: _coverage('id3v2.4.%d' % self.header.revision)
self._readFrame = self._readFrame_rev4
else:
#if _c: _coverage('badmajor!')
raise Id3Error, "Unsupported major version: %d" % self.header.majorVersion
# Interpret the flags
self._interpretFlags()
# Read any extended header
self._readExtHeader()
# Read the frames
while self.bytesLeft > 0:
frame = self._readFrame()
if frame:
frame._interpret()
self.frames[frame.id] = frame
self.allFrames.append(frame)
else:
#if _c: _coverage('padding')
break
def _interpretFlags(self):
""" Interpret ID3v2.x flags.
"""
if self.header.flags & 0x80:
self.header.bUnsynchronized = True
#if _c: _coverage('unsynctag')
if self.header.majorVersion == 2:
if self.header.flags & 0x40:
#if _c: _coverage('compressed')
# "Since no compression scheme has been decided yet,
# the ID3 decoder (for now) should just ignore the entire
# tag if the compression bit is set."
self.header.bCompressed = True
if self.header.majorVersion >= 3:
if self.header.flags & 0x40:
#if _c: _coverage('extheader')
if self.header.majorVersion == 3:
self._readExtHeader = self._readExtHeader_rev3
else:
self._readExtHeader = self._readExtHeader_rev4
if self.header.flags & 0x20:
#if _c: _coverage('experimental')
self.header.bExperimental = True
if self.header.majorVersion >= 4:
if self.header.flags & 0x10:
#if _c: _coverage('footer')
self.header.bFooter = True
def _readExtHeader_rev3(self):
""" Read the ID3v2.3 extended header.
"""
# We don't interpret this yet, just eat the bytes.
size = self._getInteger(self._readBytes(4, 'rev3ehlen'))
self._readBytes(size, 'rev3ehdata')
def _readExtHeader_rev4(self):
""" Read the ID3v2.4 extended header.
"""
# We don't interpret this yet, just eat the bytes.
size = self._getSyncSafeInt(self._readBytes(4, 'rev4ehlen'))
self._readBytes(size-4, 'rev4ehdata')
def _readId3v1(self):
""" Read the ID3v1 tag.
spec: http://www.id3.org/id3v1.html
"""
self.file.seek(-128, 2)
tag = self.file.read(128)
if len(tag) != 128:
return
if tag[0:3] != 'TAG':
return
self.header = _Header()
self.header.majorVersion = 1
self.header.revision = 0
self._addV1Frame('v1title', tag[3:33])
self._addV1Frame('v1performer', tag[33:63])
self._addV1Frame('v1album', tag[63:93])
self._addV1Frame('v1year', tag[93:97])
self._addV1Frame('v1comment', tag[97:127])
self._addV1Frame('v1genre', tag[127])
if tag[125] == '\0' and tag[126] != '\0':
#if _c: _coverage('id3v1.1')
self.header.revision = 1
self._addV1Frame('v1track', str(ord(tag[126])))
else:
#if _c: _coverage('id3v1.0')
pass
return
_validIdChars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def _isValidId(self, id):
""" Determine if the id bytes make a valid ID3 id.
"""
for c in id:
if not c in self._validIdChars:
#if _c: _coverage('bad id')
return False
#if _c: _coverage('id '+id)
return True
def _readFrame_rev2(self):
""" Read a frame for ID3v2.2: three-byte ids and lengths.
spec: http://www.id3.org/id3v2-00.txt
"""
if self.bytesLeft < 6:
return None
id = self._readBytes(3, 'rev2id')
if len(id) < 3 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBB', self._readBytes(3, 'rev2len'))
frame = _Frame()
frame.id = id
frame.size = self._getInteger(hstuff[0:3])
frame.rawData = self._readBytes(frame.size, 'rev2data')
return frame
def _readFrame_rev3(self):
""" Read a frame for ID3v2.3: four-byte ids and lengths.
"""
if self.bytesLeft < 10:
return None
id = self._readBytes(4,'rev3id')
if len(id) < 4 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBBBh', self._readBytes(6,'rev3head'))
frame = _Frame()
frame.id = id
frame.size = self._getInteger(hstuff[0:4])
cbData = frame.size
frame.flags = hstuff[4]
#if _t: _trace('flags = %x' % frame.flags)
frame.bTagAlterPreserve = (frame.flags & 0x8000 != 0)
frame.bFileAlterPreserve = (frame.flags & 0x4000 != 0)
frame.bReadOnly = (frame.flags & 0x2000 != 0)
frame.bCompressed = (frame.flags & 0x0080 != 0)
if frame.bCompressed:
frame.decompressedSize = self._getInteger(self._readBytes(4, 'decompsize'))
cbData -= 4
#if _c: _coverage('compress')
frame.bEncrypted = (frame.flags & 0x0040 != 0)
if frame.bEncrypted:
frame.encryptionMethod = self._readBytes(1, 'encrmethod')
cbData -= 1
#if _c: _coverage('encrypt')
frame.bInGroup = (frame.flags & 0x0020 != 0)
if frame.bInGroup:
frame.groupid = self._readBytes(1, 'groupid')
cbData -= 1
#if _c: _coverage('groupid')
frame.rawData = self._readBytes(cbData, 'rev3data')
return frame
def _readFrame_rev4(self):
""" Read a frame for ID3v2.4: four-byte ids and lengths.
"""
if self.bytesLeft < 10:
return None
id = self._readBytes(4,'rev4id')
if len(id) < 4 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBBBh', self._readBytes(6,'rev4head'))
frame = _Frame()
frame.id = id
frame.size = self._getSyncSafeInt(hstuff[0:4])
cbData = frame.size
frame.flags = hstuff[4]
frame.bTagAlterPreserve = (frame.flags & 0x4000 != 0)
frame.bFileAlterPreserve = (frame.flags & 0x2000 != 0)
frame.bReadOnly = (frame.flags & 0x1000 != 0)
frame.bInGroup = (frame.flags & 0x0040 != 0)
if frame.bInGroup:
frame.groupid = self._readBytes(1, 'groupid')
cbData -= 1
#if _c: _coverage('groupid')
frame.bCompressed = (frame.flags & 0x0008 != 0)
if frame.bCompressed:
#if _c: _coverage('compress')
pass
frame.bEncrypted = (frame.flags & 0x0004 != 0)
if frame.bEncrypted:
frame.encryptionMethod = self._readBytes(1, 'encrmethod')
cbData -= 1
#if _c: _coverage('encrypt')
frame.bUnsynchronized = (frame.flags & 0x0002 != 0)
if frame.bUnsynchronized:
#if _c: _coverage('unsyncframe')
pass
if frame.flags & 0x0001:
frame.datalen = self._getSyncSafeInt(self._readBytes(4, 'datalen'))
cbData -= 4
#if _c: _coverage('datalenindic')
frame.rawData = self._readBytes(cbData, 'rev3data')
return frame
def getValue(self, id):
""" Return the value for an ID3 tag id, or for a
convenience label ('title', 'performer', ...),
or return None if there is no such value.
"""
if self.frames.has_key(id):
if hasattr(self.frames[id], 'value'):
return self.frames[id].value
if _simpleDataMapping.has_key(id):
for id2 in _simpleDataMapping[id]:
v = self.getValue(id2)
if v:
return v
return None
def getRawData(self, id):
if self.frames.has_key(id):
return self.frames[id].rawData
return None
def dump(self):
import pprint
print "Header:"
print self.header
print "Frames:"
for fr in self.allFrames:
if len(fr.rawData) > 30:
fr.rawData = fr.rawData[:30]
pprint.pprint(self.allFrames)
for fr in self.allFrames:
if hasattr(fr, 'value'):
print '%s: %s' % (fr.id, _safestr(fr.value))
else:
print '%s= %s' % (fr.id, _safestr(fr.rawData))
for label in _simpleDataMapping.keys():
v = self.getValue(label)
if v:
print 'Label %s: %s' % (label, _safestr(v))
def dumpCoverage(self):
feats = _features.keys()
feats.sort()
for feat in feats:
print "Feature %-12s: %d" % (feat, _features[feat])
# chars not allowed in filenames
illegal_chars = u'/\?=+<>:;"*|!@#$%^&*'
# http://code.activestate.com/recipes/65441/
def has_chars(raw, bad_chars):
try:
for c in bad_chars:
if c in raw: return True
return False
except UnicodeDecodeError:
return False
def replace_illegal_chars(raw):
return ''.join([c in illegal_chars and '_' or c for c in raw])
def asci(*args):
for arg in args:
print arg.encode('us-ascii','xmlcharrefreplace'),
print
def is_dupe(oldmp3, newmp3):
#return bool(re.search(u'^'+orig+ r'(\[\d+\])?$', new))
old=os.path.splitext(oldmp3)[0]
new=os.path.splitext(newmp3)[0]
return old.lower().startswith(new.lower())
def parse_index(f):
rx = re.compile('(?P<name>.+)\[(?P<index>\d+?)\]$')
mo = rx.search(f)
if mo:
return mo.group('name'), mo.group('index')
else:
return f, 0
if __name__ == '__main__':
import os
import optparse
dodirs = False
parser = optparse.OptionParser()
parser.add_option('-l','--list',
action="store_true",
help='List ID3 tags only with no renaming',
default=False
)
parser.add_option('-d', '--dirs',
action="store_true",
help="create album dirs",
default=False)
(opts, args) = parser.parse_args(sys.argv[1:])
if len(args):
mp3dir = unicode(args[0])
else:
mp3dir = u'.'
print
if opts.list:
print 'Listing ID3 tags in folder:', asci(os.path.abspath(mp3dir))
else:
print 'Renaming MP3 files in folder:', asci(os.path.abspath(mp3dir))
print
for fname in os.listdir(mp3dir):
# uncomment if you want to process only files with .mp3 extension
# if not fname.lower().endswith(MP3):
# continue
if os.path.isdir(fname):
continue
absfname = os.path.join(mp3dir, fname)
try:
id3r = Reader(absfname)
except (Id3Error, UnicodeDecodeError), e:
print e
continue
#id3r.dump()
album = id3r.getValue('album')
track = id3r.getValue('track')
artist = id3r.getValue('performer')
title = id3r.getValue('title')
year = id3r.getValue('year')
### move files to dirs according to artist, album
if opts.dirs and artist and album:
dodirs = True
mp3dir_full = os.path.join(mp3dir,
replace_illegal_chars(artist),
replace_illegal_chars(album))
if not os.path.exists(mp3dir_full):
try:
os.makedirs(mp3dir_full)
except (IOError,WindowsError), e :
print
else:
mp3dir_full = mp3dir
if not title:
continue
# replace tracks like '2/15' >> '02'
if track and u'/' in track:
track = track.split('/')[0]
if track:
track = track.zfill(2) # zero fill, i. e. '1' >> '01'
if not track:
track = ''
if has_chars(title, illegal_chars):
title = replace_illegal_chars(title)
try:
if isinstance(track, unicode) or isinstance(title, unicode):
new_fname = track + u' - ' + title + u'.' + MP3
## try to fix non-unicode strings, only trying 'latin1'
if isinstance(track, str) or isinstance(title, str):
new_fname = track + ' - ' + title.decode('latin1') + '.' + MP3
except UnicodeDecodeError:
print 'Encoding error while processing title/track'
continue
new_dir = dodirs and mp3dir_full or mp3dir
proposed_new_name = os.path.join(new_dir, new_fname)
maxwidth = 35
if opts.list:
print '>',
else:
if not is_dupe(absfname, proposed_new_name):
if os.path.exists(proposed_new_name):
for i in range(1,1000): # max 200 duplicates
parsed_name, idx = parse_index(os.path.splitext(proposed_new_name)[0])
new_fname = parsed_name + u'[' + unicode(idx+i) + u'].' + MP3
if not os.path.exists(new_fname):
break
try:
os.rename(absfname, os.path.join(new_dir, new_fname))
except Exception, e:
asci( 'Error: ', absfname.ljust(maxwidth), '>>>', proposed_new_name, str(e) )
else:
maxwidth -= len('Skipping...') + 1
print 'Skipping...',
asci((len(fname) > maxwidth and fname[:maxwidth-3] or fname).ljust(maxwidth), ' >>> ', new_fname)
| [
"[email protected]"
] | |
be45bcb1e674793f5bb4889a3cdcada07a013a45 | 5b71e2952f34dd3bb20148874d952fee06d31857 | /app/mf/crud/migrations/0100_auto_20210206_1820.py | 41f0df779972b165576ab9f2962e9261c1ec7a13 | [] | no_license | isela1998/facebook | a937917cddb9ef043dd6014efc44d59d034102b1 | a0f2f146eb602b45c951995a5cb44409426250c5 | refs/heads/master | 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 3.1.1 on 2021-02-06 22:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0099_debts_rate'),
]
operations = [
migrations.AlterField(
model_name='debts',
name='rate',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Tasa(Bs.)'),
),
]
| [
"[email protected]"
] | |
f22af6b6113dc3091f9553766e30977fce309d38 | db5264994305e8c926f89cb456f33bd3a4d64f76 | /Sklep zielarski/account/urls.py | 8f5e4dae0dd33bbbd640d540c02340a153233e68 | [] | no_license | marcinpelszyk/Django | 7842e20d5e8b213c4cd42c421c1db9ab7d5f01d5 | aff2b9bd20e978a22a4a98994bf8424892d3c82f | refs/heads/main | 2023-05-01T19:20:37.267010 | 2021-05-18T17:51:53 | 2021-05-18T17:51:53 | 356,532,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | from django.contrib.auth import views as auth_views
from django.urls import path
from django.views.generic import TemplateView
from . import views
from .forms import PwdResetConfirmForm, PwdResetForm, UserLoginForm
app_name = 'account'
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name='account/login.html',
form_class=UserLoginForm), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page='/account/login/'), name='logout'),
path('register/', views.account_register, name='register'),
path('activate/<slug:uidb64>/<slug:token>)/', views.account_activate, name='activate'),
# Reset password
path('password_reset/', auth_views.PasswordResetView.as_view(template_name="account/password_reset/password_reset_form.html",
success_url='password_reset_email_confirm',
email_template_name='account/password_reset/password_reset_email.html',
form_class=PwdResetForm), name='pwdreset'),
path('password_reset_confirm/<uidb64>/<token>', auth_views.PasswordResetConfirmView.as_view(template_name='account/password_reset/password_reset_confirm.html',
success_url='password_reset_complete/',
form_class=PwdResetConfirmForm), name="password_reset_confirm"),
path('password_reset/password_reset_email_confirm/',
TemplateView.as_view(template_name="account/password_reset/reset_status.html"), name='password_reset_done'),
path('password_reset_confirm/MTU/password_reset_complete/',
TemplateView.as_view(template_name="account/password_reset/reset_status.html"), name='password_reset_complete'),
# User dashboard
path('dashboard/', views.dashboard, name='dashboard'),
path('profile/edit/', views.edit_details, name='edit_details'),
path('profile/delete_user/', views.delete_user, name='delete_user'),
path('profile/delete_confirm/', TemplateView.as_view(template_name="account/dashboard/delete_confirm.html"), name='delete_confirmation'),
# Addresses
path('addresses/', views.view_address, name='addresses'),
path("add_address/", views.add_address, name="add_address"),
path("addresses/edit/<slug:id>/", views.edit_address, name="edit_address"),
path("addresses/delete/<slug:id>/", views.delete_address, name="delete_address"),
path("addresses/set_default/<slug:id>/", views.set_default, name="set_default"),
path("user_orders/", views.user_orders, name="user_orders"),
#Favorite list
path('favoritelist/', views.favoritelist, name='favoritelist'),
path('favoritelist/add_to_favoritelist/<int:id>', views.add_to_favoritelist, name='user_favorite'),
]
| [
"[email protected]"
] | |
88850f9c8b1aef4142ac6d51fb5ce192a8482057 | be1e8444482e40df5d02d57964f61cfbd9249f13 | /Django-0.90/django/core/db/backends/postgresql.py | b1b2d9cb52d964e5d1fd6012266dabed23eedd4c | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | tungvx/deploy | 9946d4350f5fbc5da25d45505b75384fd40e6088 | 9e1917c6c645b4ce0efe115b0da76027d4bc634c | refs/heads/master | 2021-01-02T09:08:45.691746 | 2011-11-12T19:44:48 | 2011-11-12T19:44:48 | 2,763,145 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,276 | py | """
PostgreSQL database backend for Django.
Requires psycopg 1: http://initd.org/projects/psycopg1
"""
from django.core.db import base, typecasts
import psycopg as Database
DatabaseError = Database.DatabaseError
class DatabaseWrapper:
def __init__(self):
self.connection = None
self.queries = []
def cursor(self):
from django.conf.settings import DATABASE_USER, DATABASE_NAME, DATABASE_HOST, DATABASE_PORT, DATABASE_PASSWORD, DEBUG, TIME_ZONE
if self.connection is None:
if DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "You need to specify DATABASE_NAME in your Django settings file."
conn_string = "dbname=%s" % DATABASE_NAME
if DATABASE_USER:
conn_string = "user=%s %s" % (DATABASE_USER, conn_string)
if DATABASE_PASSWORD:
conn_string += " password='%s'" % DATABASE_PASSWORD
if DATABASE_HOST:
conn_string += " host=%s" % DATABASE_HOST
if DATABASE_PORT:
conn_string += " port=%s" % DATABASE_PORT
self.connection = Database.connect(conn_string)
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
cursor = self.connection.cursor()
cursor.execute("SET TIME ZONE %s", [TIME_ZONE])
if DEBUG:
return base.CursorDebugWrapper(cursor, self)
return cursor
def commit(self):
return self.connection.commit()
def rollback(self):
if self.connection:
return self.connection.rollback()
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def dictfetchone(cursor):
"Returns a row from the cursor as a dict"
return cursor.dictfetchone()
def dictfetchmany(cursor, number):
"Returns a certain number of rows from a cursor as a dict"
return cursor.dictfetchmany(number)
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
return cursor.dictfetchall()
def get_last_insert_id(cursor, table_name, pk_name):
cursor.execute("SELECT CURRVAL('%s_%s_seq')" % (table_name, pk_name))
return cursor.fetchone()[0]
def get_date_extract_sql(lookup_type, table_name):
# lookup_type is 'year', 'month', 'day'
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
return "EXTRACT('%s' FROM %s)" % (lookup_type, table_name)
def get_date_trunc_sql(lookup_type, field_name):
# lookup_type is 'year', 'month', 'day'
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def get_limit_offset_sql(limit, offset=None):
sql = "LIMIT %s" % limit
if offset and offset != 0:
sql += " OFFSET %s" % offset
return sql
def get_random_function_sql():
return "RANDOM()"
def get_table_list(cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_relations(cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
try:
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[int(row[0][1:-1]) - 1] = (int(row[1][1:-1]) - 1, row[2])
except ValueError:
continue
return relations
# Register these custom typecasts, because Django expects dates/times to be
# in Python's native (standard-library) datetime/time format, whereas psycopg
# use mx.DateTime by default.
try:
Database.register_type(Database.new_type((1082,), "DATE", typecasts.typecast_date))
except AttributeError:
raise Exception, "You appear to be using psycopg version 2, which isn't supported yet, because it's still in beta. Use psycopg version 1 instead: http://initd.org/projects/psycopg1"
Database.register_type(Database.new_type((1083,1266), "TIME", typecasts.typecast_time))
Database.register_type(Database.new_type((1114,1184), "TIMESTAMP", typecasts.typecast_timestamp))
Database.register_type(Database.new_type((16,), "BOOLEAN", typecasts.typecast_boolean))
OPERATOR_MAPPING = {
'exact': '=',
'iexact': 'ILIKE',
'contains': 'LIKE',
'icontains': 'ILIKE',
'ne': '!=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
'startswith': 'LIKE',
'endswith': 'LIKE',
'istartswith': 'ILIKE',
'iendswith': 'ILIKE',
}
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
DATA_TYPES = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(maxlength)s)',
'CommaSeparatedIntegerField': 'varchar(%(maxlength)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'EmailField': 'varchar(75)',
'FileField': 'varchar(100)',
'FilePathField': 'varchar(100)',
'FloatField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'ImageField': 'varchar(100)',
'IntegerField': 'integer',
'IPAddressField': 'inet',
'ManyToManyField': None,
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PhoneNumberField': 'varchar(20)',
'PositiveIntegerField': 'integer CHECK (%(column)s >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK (%(column)s >= 0)',
'SlugField': 'varchar(50)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'URLField': 'varchar(200)',
'USStateField': 'varchar(2)',
}
# Maps type codes to Django Field types.
DATA_TYPES_REVERSE = {
16: 'BooleanField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
869: 'IPAddressField',
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'FloatField',
}
| [
"[email protected]"
] | |
c3a204b93156cbcd8e27787d9c7665ae8196a3c3 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/instance/DistributedTeleportMgr.py | b9c0736e2218759b8f8a6859c8f75acb8541aa1b | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,217 | py | from pandac.PandaModules import *
from direct.task import Task
from direct.distributed import DistributedObject
from pirates.piratesbase import PiratesGlobals
from pirates.world import ZoneLOD
from direct.showbase.PythonUtil import report
from otp.otpbase import OTPLocalizer
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PDialog
from otp.otpgui import OTPDialog
from pirates.quest import QuestDB, QuestLadderDB
'''
Congratulations, Disney! You've managed to write this very gay code.
DistributedTeleportMgr is the gayest thing ever existed.
Do not try to understand this shit, I've already done it for you.
By the way it gave me cancer and aids.
'''
class DistributedTeleportMgr(DistributedObject.DistributedObject):
notify = directNotify.newCategory('DistributedTeleportMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.instanceType = None
self.fromInstanceType = None
self.lastLocalTeleportLoc = None
self.teleportQueryId = None
self.inInstanceType = PiratesGlobals.INSTANCE_MAIN
self.instanceName = 'mainWorld'
self.doneCallback = None
self.startedCallback = None
self.oldWorld = None
self.requestData = None
self.localTeleportId = None
self.localTeleportingObj = None
self.localTeleportCallback = None
self.localTeleportDestPos = None
self.popupDialog = None
self.doEffect = False
self.stowawayEffect = False
self.miniLog = None
self.teleportQueue = []
self.teleportQueueProcess = None
def generate(self):
DistributedObject.DistributedObject.generate(self)
base.cr.teleportMgr = self
self.localTeleportingObj = localAvatar
self.__pendingGoNow = [True]
localAvatar.readyToTeleport(self)
self.accept('localAvTeleportFinishedRequest', self.localAvTeleportFinishedRequest)
def requestLocalTeleport(self, locationName = None):
self.requestData = ((), {
'locationName': locationName })
localAvatar.confirmTeleport(self.localTeleportConfirmation, feedback = True)
def localTeleportConfirmation(self, confirmed):
if confirmed:
requestData = self.requestData
self.localTeleport(*requestData[0], **requestData[1])
locationUid = requestData['locationUid']
base.cr.loadingScreen.showTarget(locationUid)
base.cr.loadingScreen.showHint(locationUid)
self.requestData = None
def localTeleportEffect(self, teleportPosHpr, parent=None, smooth=False, goNow=False):
if localAvatar.testTeleportFlag(PiratesGlobals.TFInWater) or goNow:
self.localTeleportPos(teleportPosHpr, parent, smooth)
else:
localAvatar.b_setGameState('TeleportOut')
taskMgr.doMethodLater(5, self.localTeleportPos, self.uniqueName('localTeleportPos'), extraArgs = [
teleportPosHpr,
parent,
smooth])
def localTeleportPos(self, teleportPosHpr, parent = None, smooth = False):
localAvatar.b_setGameState('TeleportOut', [
None,
False])
currParent = localAvatar.getParentObj()
if isinstance(currParent, ZoneLOD.ZoneLOD):
localAvatar.leaveZoneLOD(currParent)
if parent == None:
parent = self.cr.activeWorld.worldGrid
messenger.send('islandPlayerBarrier', [
0])
teleportZone = parent.getZoneFromXYZ(teleportPosHpr[:3])
localAvatar.reparentTo(parent)
localAvatar.setPosHpr(*teleportPosHpr)
localAvatar.spawnWiggle()
localAvatar.b_setLocation(parent.getDoId(), teleportZone)
parent.addObjectToGrid(localAvatar)
parent.setPlayerBarrier(1)
currParent = localAvatar.getParentObj()
if isinstance(currParent, ZoneLOD.ZoneLOD):
localAvatar.enterZoneLOD(currParent)
parent.processVisibility(None)
if base.cr._completeEventCount.num > 0:
self.acceptOnce(base.cr.getAllInterestsCompleteEvent(), localAvatar.b_setGameState, extraArgs = [
'TeleportIn'])
else:
localAvatar.b_setGameState('TeleportIn')
def localTeleport(self, locationName=None, goNow=False, locationUid=None):
if locationName and locationUid:
locationName = None
for currIsle in base.cr.doId2do.values():
if not (hasattr(currIsle, 'getName') and hasattr(currIsle, 'getUniqueId')):
continue
if currIsle.getName() == locationName:
break
elif currIsle.getUniqueId() == locationUid:
break
else:
self.notify.error('not found: (%s, %s)' % (locationName, locationUid))
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
questStateSpawnIdx = QuestLadderDB.getPreferredAreaSpawnNode(currIsle.getUniqueId(), localAvatar)
teleportPos = base.cr.activeWorld.getPlayerSpawnPt(currIsle.getDoId(), index = questStateSpawnIdx)
if teleportPos == None:
teleportPos = (0, 0, 0, 0, 0, 0)
self.localTeleportEffect(teleportPos, currIsle, goNow=goNow)
self.lastLocalTeleportLoc = currIsle.getDoId()
def requestTeleportToFishingShip(self):
print 'requestTeleportToFishingShip'
self.cr.teleportMgr.sendUpdate('requestTeleportToFishingShip')
def teleportToFishingShipResponse(self, shipId):
print 'teleportToFishingShipResponse'
print 'shipId=', shipId
self.cr.teleportMgr.localTeleportToId(shipId, localAvatar, showLoadingScreen = False)
def localTeleportToId(self, locationId, teleportingObj = None, destPos = None, callback = None, objectLocation = None, showLoadingScreen = True):
if showLoadingScreen:
self.cr.loadingScreen.show(waitForLocation = True)
if locationId in base.cr.doId2do and base.cr.doId2do[locationId].dclass.getName() == 'DistributedOceanGrid':
logBlock(1, 'localTeleportToId(%s,%s,%s,%s,%s,%s) to ocean grid\n\n' % (locationId, teleportingObj, destPos, callback, objectLocation, showLoadingScreen) + str(StackTrace()))
self.localTeleportId = locationId
self.localTeleportingObj = teleportingObj
self.localTeleportCallback = callback
self.localTeleportDestPos = destPos
destObj = self.cr.doId2do.get(locationId)
if destObj:
self._localTeleportToIdInterestComplete()
self.notify.debug('destination object %s found, teleporting to there now' % locationId)
elif objectLocation:
self._localTeleportToIdResponse(objectLocation[0], objectLocation[1])
self.notify.debug('destination object %s not found, but location %s given' % (locationId, objectLocation))
else:
self.sendUpdate('requestTargetsLocation', [
int(locationId)])
self.notify.debug('destination object %s not found, querying AI for its location' % locationId)
def _localTeleportToIdResponse(self, objectId, parentId, zoneId):
self.localTeleportId = objectId
if parentId != 0 and zoneId != 0:
if self.cr.doId2do.get(parentId):
localAvatar.setInterest(parentId, zoneId, [
'localTeleportToId'], 'localTeleportToIdInterestAddComplete')
self.acceptOnce('localTeleportToIdInterestAddComplete', self._localTeleportToIdInterestComplete)
self.notify.debug('parent %s of destination object found, setting up interest' % parentId)
else:
self.notify.warning('parent %s of destination object not found, teleport failure' % parentId)
else:
self.failTeleport(parentId, zoneId)
def failTeleport(self, parentId = None, zoneId = None, message = PLocalizer.TeleportToPlayerFailMessage):
self.sendUpdate('requestClearPreventDamage')
fallbackAreaId = localAvatar.getReturnLocation()
if fallbackAreaId != '':
areaDoId = base.cr.uidMgr.getDoId(fallbackAreaId)
self.clearAmInTeleport()
if areaDoId:
destPos = base.cr.activeWorld.getPlayerSpawnPt(areaDoId)
if destPos and self.localTeleportingObj:
self.localTeleportToId(areaDoId, self.localTeleportingObj, destPos)
else:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, 'mainWorld', doEffect = False)
else:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, 'mainWorld', doEffect = False)
self._DistributedTeleportMgr__createDialog(message)
else:
self.notify.warning(" teleport to object (%s %s) AND 'return location' %s failed" % (parentId, zoneId, fallbackAreaId))
def _DistributedTeleportMgr__cleanupDialog(self, value = None):
if self.popupDialog:
self.popupDialog.destroy()
del self.popupDialog
self.popupDialog = None
def _DistributedTeleportMgr__createDialog(self, message):
if message:
popupDialogText = message
if self.popupDialog:
self._DistributedTeleportMgr__cleanupDialog()
self.popupDialog = PDialog.PDialog(text = popupDialogText, style = OTPDialog.Acknowledge, command = self._DistributedTeleportMgr__cleanupDialog)
def _localTeleportToIdInterestComplete(self):
teleportToObj = self.cr.doId2do.get(self.localTeleportId)
if not teleportToObj:
self.sendUpdate('requestTargetsLocation', [
self.localTeleportId])
return None
curParent = localAvatar.getParentObj()
parentIsZoneLOD = isinstance(curParent, ZoneLOD.ZoneLOD)
if parentIsZoneLOD:
localAvatar.leaveZoneLOD(curParent)
try:
isAShip = teleportToObj._isShip()
except AttributeError:
isAShip = False
if isAShip:
if not teleportToObj.isSailable():
self.failTeleport(0, 0, PLocalizer.TeleportToGoneShipFailMessage)
return None
elif teleportToObj.gameFSM.getCurrentOrNextState() in ('InBoardingPosition', 'OtherShipBoarded'):
self.failTeleport(0, 0, PLocalizer.TeleportToBoardingShipFailMessage)
return None
teleportToObj.setZoneLevel(3)
teleportToObj.registerMainBuiltFunction(localAvatar.placeOnShip, [
teleportToObj])
teleportToObj.registerBuildCompleteFunction(teleportToObj.enableOnDeckInteractions)
teleportToObj.registerBuildCompleteFunction(self._localTeleportToIdDone)
base.setLocationCode('Ship')
else:
self.__pendingGoNow.append(False)
goNow = self.__pendingGoNow.pop(0)
self.localTeleport(locationUid=teleportToObj.getUniqueId(), goNow=goNow)
def _localTeleportToIdDone(self):
self.cr.loadingScreen.scheduleHide(base.cr.getAllInterestsCompleteEvent())
curParent = localAvatar.getParentObj()
if isinstance(curParent, ZoneLOD.ZoneLOD):
localAvatar.enterZoneLOD(curParent)
if self.localTeleportCallback:
self.localTeleportCallback()
self.localTeleportId = None
self.localTeleportingObj = None
self.localTeleportCallback = None
self.localTeleportDestPos = None
localAvatar.guiMgr.socialPanel.updateAll()
def disable(self):
DistributedObject.DistributedObject.disable(self)
messenger.send('destroyCrewMatchInvite')
taskMgr.removeTasksMatching('teleportRemoveInterest')
taskMgr.removeTasksMatching('teleportAddInterest')
taskMgr.removeTasksMatching(self.uniqueName('localTeleportPos'))
taskMgr.removeTasksMatching(self.uniqueName('fadeDone'))
self.requestData = None
self.ignoreAll()
if base.cr.teleportMgr == self:
base.cr.teleportMgr = None
requestData = self.requestData
self.requestData = None
if self.teleportQueueProcess:
taskMgr.remove(self.teleportQueueProcess)
def requestTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True):
self.requestData = ((instanceType, instanceName), {
'shardId': shardId,
'locationUid': locationUid,
'instanceDoId': instanceDoId,
'doneCallback': doneCallback,
'startedCallback': startedCallback,
'gameType': gameType,
'friendDoId': friendDoId,
'friendAreaDoId': friendAreaDoId,
'doEffect': doEffect })
localAvatar.confirmTeleport(self.teleportConfirmation, feedback = True)
def teleportConfirmation(self, confirmed):
if confirmed:
requestData = self.requestData
self.initiateTeleport(*requestData[0], **requestData[0])
locationUid = requestData[1]['locationUid']
base.cr.loadingScreen.showTarget(locationUid)
base.cr.loadingScreen.showHint(locationUid)
self.requestData = None
def requestTeleportToAvatar(self, shardId, instanceDoId, avatarId, avatarParentId):
self.requestTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceDoId, friendDoId = avatarId, friendAreaDoId = avatarParentId)
def teleportToObjectResp(self, shardId, instanceId, objId, parentId):
self.requestTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceId, friendDoId = objId, friendAreaDoId = parentId)
def requestTeleportToShip(self, shardId, instanceDoId, shipId):
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceDoId, friendDoId = 0, friendAreaDoId = shipId)
def requestTeleportToIsland(self, islandUid):
def teleportConfirmation(confirmed, islandUid = islandUid):
self.islandTeleportConfirmation(confirmed, islandUid)
localAvatar.setTeleportFlag(PiratesGlobals.TFNoIslandToken, localAvatar.confirmIslandTokenTeleport, [
islandUid])
localAvatar.setTeleportFlag(PiratesGlobals.TFSameArea, localAvatar.confirmNotSameAreaTeleport, [
islandUid])
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
localAvatar.clearTeleportFlag(PiratesGlobals.TFNoIslandToken)
localAvatar.clearTeleportFlag(PiratesGlobals.TFSameArea)
def islandTeleportConfirmation(self, confirmed, islandUid):
if confirmed:
islandDoId = self.cr.uidMgr.getDoId(islandUid)
island = self.cr.getDo(islandDoId)
if island and island.getParentObj() is self.cr.activeWorld:
self.localTeleport(locationName = island.getName())
else:
self.sendUpdate('requestTeleportToIsland', [
islandUid])
base.cr.loadingScreen.showTarget(islandUid)
base.cr.loadingScreen.showHint(islandUid)
def teleportToIslandResponse(self, instanceDoId, islandDoId):
if instanceDoId and islandDoId:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', self.cr.distributedDistrict.doId, '', instanceDoId, friendAreaDoId = islandDoId)
def stowawayTeleportResponse(self, instanceDoId, islandDoId):
if instanceDoId and islandDoId:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', self.cr.distributedDistrict.doId, '', instanceDoId, friendAreaDoId = islandDoId, doEffect = False, stowawayEffect = True)
base.cr.loadingScreen.showTarget(base.cr.doId2do[islandDoId].getUniqueId())
def queryAvatarForTeleport(self, avId):
self.setTeleportQueryId(avId)
def teleportConfirmation(confirmed, avId = avId):
if confirmed:
handle = self.cr.identifyAvatar(avId)
if handle:
shardId = self.cr.distributedDistrict.doId
if not localAvatar.getBandId():
pass
(bandMgr, bandId) = (0, 0)
guildId = localAvatar.getGuildId()
handle.sendTeleportQuery(avId, bandMgr, bandId, guildId, shardId)
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
def handleAvatarTeleportQuery(self, requesterId, requesterBandMgrId, requesterBandId, requesterGuildId, requesterShardId):
handle = self.cr.identifyAvatar(requesterId)
if not handle:
return None
if self.cr.identifyFriend(requesterId):
if requesterId in localAvatar.ignoreList or self.cr.avatarFriendsManager.checkIgnored(requesterId):
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(PiratesGlobals.TFIgnore), 0, 0, 0, sendToId = requesterId)
return None
avName = handle.getName()
def confirmed(canTeleportTo, avId, failedFlag, avName = avName):
if canTeleportTo:
if self.cr.getActiveWorld() and self.cr.distributedDistrict and localAvatar.getParentObj():
handle.sendTeleportResponse(PiratesGlobals.TAAvailable, self.cr.distributedDistrict.doId, self.cr.getActiveWorld().doId, localAvatar.getParentObj().doId, sendToId = requesterId)
else:
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(PiratesGlobals.TFUnavailable), 0, 0, 0, sendToId = requesterId)
elif localAvatar.failedTeleportMessageOk(requesterId):
localAvatar.setSystemMessage(requesterId, OTPLocalizer.WhisperFailedVisit % avName)
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(failedFlag), 0, 0, 0, sendToId = requesterId)
localAvatar.confirmTeleportTo(confirmed, requesterId, avName, requesterBandMgrId, requesterBandId, requesterGuildId)
def handleAvatarTeleportResponse(self, avId, available, shardId, instanceDoId, areaDoId):
if not avId == self.teleportQueryId:
self.clearTeleportQueryId()
return None
self.clearTeleportQueryId()
handle = self.cr.identifyAvatar(avId)
if handle:
avName = handle.getName()
else:
return None
if available == PiratesGlobals.TAAvailable:
def teleportConfirmation(confirmed, shardId = shardId, instanceDoID = instanceDoId, avId = avId, avatarParentId = areaDoId):
if confirmed:
self.requestTeleportToAvatar(shardId, instanceDoId, avatarId = avId, avatarParentId = areaDoId)
localAvatar.setTeleportFlag(PiratesGlobals.TFSameArea, localAvatar.confirmNotSameAreaTeleportToPlayer, [
areaDoId])
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
localAvatar.clearTeleportFlag(PiratesGlobals.TFSameArea)
else:
flag = PiratesGlobals.decodeTeleportFlag(available)
if flag == PiratesGlobals.TAIgnore:
pass
1
if flag in PiratesGlobals.TFNoTeleportToReasons:
localAvatar.guiMgr.createWarning(PiratesGlobals.TFNoTeleportToReasons[flag] % avName, duration = 10)
def initiateTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True, queue = False, stowawayEffect = False):
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
if self.cr.activeWorld:
fromInstanceType = self.cr.activeWorld.getType()
else:
fromInstanceType = PiratesGlobals.INSTANCE_NONE
if instanceType not in [
PiratesGlobals.INSTANCE_MAIN,
PiratesGlobals.INSTANCE_WELCOME] and fromInstanceType not in [
PiratesGlobals.INSTANCE_MAIN,
PiratesGlobals.INSTANCE_GENERIC,
PiratesGlobals.INSTANCE_NONE]:
if not base.config.GetBool('can-break-teleport-rules', 0):
import pdb as pdb
pdb.set_trace()
return None
if self.amInTeleport():
if queue:
self.queueInitiateTeleport(instanceType, instanceName, shardId, locationUid, instanceDoId, doneCallback, startedCallback, gameType, friendDoId, friendAreaDoId, doEffect, stowawayEffect)
return None
return None
self.setAmInTeleport()
if instanceType == PiratesGlobals.INSTANCE_MAIN and not locationUid:
locationUid = localAvatar.returnLocation
localAvatar.teleportFriendDoId = friendDoId
self.doEffect = doEffect
self.stowawayEffect = stowawayEffect
self.sendUpdate('initiateTeleport', [
instanceType,
fromInstanceType,
shardId,
locationUid,
instanceDoId,
instanceName,
gameType,
friendDoId,
friendAreaDoId])
self.doneCallback = doneCallback
self.startedCallback = startedCallback
self.teleportInit(instanceType, fromInstanceType, instanceName)
def queueInitiateTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True, stowawayEffect = False):
teleInfo = [
instanceType,
instanceName,
shardId,
locationUid,
instanceDoId,
doneCallback,
startedCallback,
gameType,
friendDoId,
friendAreaDoId,
doEffect,
stowawayEffect]
self.teleportQueue.append(teleInfo)
def processTeleportQueue(task = None):
if self.amInTeleport():
return Task.again
if not self.teleportQueue:
return Task.done
teleportInfo = self.teleportQueue.pop(0)
self.initiateTeleport(*teleportInfo)
if self.teleportQueue:
return Task.again
return Task.done
self.teleportQueueProcess = taskMgr.doMethodLater(1, processTeleportQueue, 'processTeleportQueue')
def amInTeleport(self):
return localAvatar.testTeleportFlag(PiratesGlobals.TFInTeleport)
def setAmInTeleport(self):
localAvatar.b_setTeleportFlag(PiratesGlobals.TFInTeleport)
localAvatar.b_clearTeleportFlag(PiratesGlobals.TFLookoutJoined)
def clearAmInTeleport(self):
localAvatar.clearTeleportFlag(PiratesGlobals.TFInInitTeleport)
localAvatar.b_clearTeleportFlag(PiratesGlobals.TFInTeleport)
def setTeleportQueryId(self, avId):
self.teleportQueryId = avId
def clearTeleportQueryId(self):
self.teleportQueryId = 0
def initiateTeleportAI(self, instanceType, instanceName):
self.teleportInit(instanceType, instanceName)
def teleportInit(self, instanceType, fromInstanceType, instanceName, gameType = None):
self.clearTeleportQueryId()
self.oldWorld = base.cr.activeWorld
self.instanceType = instanceType
self.fromInstanceType = fromInstanceType
self.instanceName = instanceName
self.gameType = gameType
self.miniLog = MiniLog('TeleportLog')
MiniLogSentry(self.miniLog, 'teleportInit', instanceType, fromInstanceType, instanceName, gameType)
def teleportHasBegun(self, instanceType, fromInstanceType, instanceName, gameType):
if not self.miniLog:
self.miniLog = MiniLog('TeleportLog')
s = MiniLogSentry(self.miniLog, 'teleportHasBegun', instanceType, fromInstanceType, instanceName, gameType)
if self.startedCallback:
self.startedCallback()
self.startedCallback = None
if self.oldWorld == None or self.oldWorld.isEmpty():
self.teleportInit(instanceType, fromInstanceType, instanceName, gameType)
def getRemoveInterestEventName(self):
return self.uniqueName('teleportRemoveInterest')
def getAddInterestEventName(self):
return self.uniqueName('teleportAddInterest')
def forceTeleportStart(self, instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone):
s = MiniLogSentry(self.miniLog, 'forceTeleportStart', instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
self.setAmInTeleport()
localAvatar.guiMgr.request('Cutscene')
if not base.transitions.fadeOutActive():
base.transitions.fadeOut()
if self.fromInstanceType == PiratesGlobals.INSTANCE_MAIN:
self.inInstanceType = PiratesGlobals.INSTANCE_MAIN
else:
self.inInstanceType = self.instanceType
if self.fromInstanceType == PiratesGlobals.INSTANCE_PVP:
localAvatar.clearTeleportFlag(PiratesGlobals.TFInPVP)
elif self.fromInstanceType == PiratesGlobals.INSTANCE_TUTORIAL:
localAvatar.clearTeleportFlag(PiratesGlobals.TFInTutorial)
def fadeDone():
base.cr.loadingScreen.show()
s = MiniLogSentry(self.miniLog, 'fadeDone')
curParent = localAvatar.getParentObj()
parentIsZoneLOD = isinstance(curParent, ZoneLOD.ZoneLOD)
if parentIsZoneLOD:
localAvatar.leaveZoneLOD(curParent)
curParent.turnOff()
if self.cr.doId2do.get(tzParent) == None:
self.failTeleport(None, None, PLocalizer.TeleportGenericFailMessage)
else:
self.teleportAddInterestTZ(instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
localAvatar.guiMgr.request('Interactive')
taskMgr.removeTasksMatching(self.uniqueName('fadeDone'))
taskMgr.doMethodLater(1, fadeDone, self.uniqueName('fadeDone'), extraArgs = [])
def teleportAddInterestTZ(self, instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone):
s = MiniLogSentry(self.miniLog, 'teleportAddInterestTZ', instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
addEvent = self.getAddInterestEventName()
self.accept(addEvent, self.teleportAddInterestCompleteTZ, extraArgs = [
tzDoId,
thDoId,
worldGridDoId])
localAvatar.setInterest(tzParent, tzZone, [
'TZInterest'], addEvent)
self.instanceName = instanceName
def teleportAddInterestCompleteTZ(self, tzDoId, thDoId, worldGridDoId):
s = MiniLogSentry(self.miniLog, 'teleportAddInterestCompleteTZ', tzDoId, thDoId, worldGridDoId)
base.cr.relatedObjectMgr.requestObjects([
tzDoId], eachCallback = lambda param1, param2 = thDoId: self.teleportZoneExists(param1, param2))
def teleportZoneExists(self, teleportZone, thDoId):
s = MiniLogSentry(self.miniLog, 'teleportZoneExists', teleportZone, thDoId)
base.cr.relatedObjectMgr.requestObjects([
thDoId], eachCallback = lambda param1, param2 = teleportZone: self.teleportHandlerExists(param1, param2))
def teleportHandlerExists(self, teleportHandler, teleportZone):
s = MiniLogSentry(self.miniLog, 'teleportHandlerExists', teleportHandler, teleportZone)
teleportHandler.instanceName = self.instanceName
teleportHandler.instanceType = self.instanceType
teleportHandler.doneCallback = self.doneCallback
self.doneCallback = None
teleportHandler.oldWorld = self.oldWorld
self.oldWorld = None
teleportHandler.miniLog = self.miniLog
self.miniLog = None
teleportHandler.startTeleport()
def localAvTeleportFinishedRequest(self, task = None):
if not self.amInTeleport():
messenger.send('localAvTeleportFinished')
def createSpawnInterests(self, parents, callback, destGrid, teleportingObj):
s = MiniLogSentry(self.miniLog, 'createSpawnInterests', parents, callback.__name__, destGrid, teleportingObj)
parentsLen = len(parents)
if self.miniLog:
self.miniLog.appendLine('parents - %s' % (parents,))
self.miniLog.appendLine('destGrid - %s' % (destGrid,))
if parentsLen == 0:
logBlock(2, self.miniLog)
callback(destGrid, teleportingObj)
else:
parentObj = base.cr.doId2do.get(parents[0])
if parentObj:
callback(parentObj, teleportingObj)
elif parentsLen > 2 and parents[2] in base.cr.doId2do:
base.cr.relatedObjectMgr.requestObjects([
parents[0]], eachCallback = lambda param1 = None, param2 = teleportingObj: callback(param1, param2))
localAvatar.setInterest(parents[2], parents[1], [
'instanceInterest'])
elif parentsLen > 2:
parentParentId = parents[2]
parentParentZone = parents[1]
else:
parentParentId = '<None Given>'
parentParentZone = '<None Given>'
parentId = parents[0]
self.notify.warning(('createSpawnInterests: parent %s of parent %s in zone %s ' + 'does not exist locally, aborting teleport') % (parentParentId, parentId, parentParentZone))
self.failTeleport(None, None, PLocalizer.TeleportGenericFailMessage)
def initiateCrossShardDeploy(self, shardId = 0, islandUid = '', shipId = 0, doneCallback = None, startedCallback = None, doEffect = True):
if not islandUid or not shipId:
return None
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
if self.cr.activeWorld:
fromInstanceType = self.cr.activeWorld.getType()
else:
fromInstanceType = PiratesGlobals.INSTANCE_NONE
if self.amInTeleport():
return None
self.setAmInTeleport()
self.doEffect = doEffect
self.sendUpdate('requestCrossShardDeploy', [
shardId,
islandUid,
shipId])
self.doneCallback = doneCallback
self.startedCallback = startedCallback
self.teleportInit(PiratesGlobals.INSTANCE_MAIN, fromInstanceType, 'Main World')
def notifyFriendVisit(self, avId):
av = base.cr.identifyAvatar(avId)
if av:
avName = av.getName()
else:
avName = PLocalizer.Someone
localAvatar.setSystemMessage(avId, OTPLocalizer.WhisperComingToVisit % avName)
localAvatar.guiMgr.messageStack.addTextMessage(OTPLocalizer.WhisperComingToVisit % avName, icon = ('friends', None))
| [
"[email protected]"
] | |
3ea0bb442577424dd93a06877b4cb480971dc827 | d7f4e330f5d803c8cd495729fd86da61b89565f3 | /torch/_meta_registrations.py | 0511b5188fbea63e9c0427f06428dc9859aa3885 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | awf/pytorch | 55ff84549c17579a1f62910ef2ac7b1dcd6fa897 | 0dceaf07cd1236859953b6f85a61dc4411d10f87 | refs/heads/master | 2023-02-08T13:19:22.073279 | 2023-01-29T10:36:40 | 2023-01-29T10:36:43 | 239,372,903 | 0 | 0 | NOASSERTION | 2020-02-09T20:55:23 | 2020-02-09T20:55:22 | null | UTF-8 | Python | false | false | 82,649 | py | import math
from typing import List, Optional, Union
import torch
import torch._prims_common as utils
from torch import Tensor
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
from torch._ops import OpOverload
from torch._prims import _elementwise_meta, ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND
from torch._prims_common import (
check,
corresponding_complex_dtype,
corresponding_real_dtype,
elementwise_dtypes,
ELEMENTWISE_TYPE_PROMOTION_KIND,
FloatLike,
IntLike,
make_contiguous_strides_for,
)
from torch._prims_common.wrappers import out_wrapper
from torch._refs import _broadcast_shapes
from torch._subclasses.fake_tensor import check_no_bool_index_tensors
from torch.utils._pytree import tree_map
aten = torch.ops.aten
_meta_lib_dont_use_me_use_register_meta = torch.library.Library("aten", "IMPL", "Meta")
def register_meta(op):
def wrapper(fn):
def register(op):
_add_op_to_registry(meta_table, op, fn)
tree_map(register, op)
return fn
return wrapper
def toRealValueType(dtype):
from_complex = {
torch.complex32: torch.half,
torch.cfloat: torch.float,
torch.cdouble: torch.double,
}
return from_complex.get(dtype, dtype)
@register_meta([aten._fft_c2c.default, aten._fft_c2c.out])
@out_wrapper()
def meta_fft_c2c(self, dim, normalization, forward):
assert self.dtype.is_complex
return self.new_empty(self.size())
@register_meta([aten._fft_r2c.default, aten._fft_r2c.out])
@out_wrapper()
def meta_fft_r2c(self, dim, normalization, onesided):
assert self.dtype.is_floating_point
output_sizes = list(self.size())
if onesided:
last_dim = dim[-1]
last_dim_halfsize = (output_sizes[last_dim] // 2) + 1
output_sizes[last_dim] = last_dim_halfsize
return self.new_empty(
output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype)
)
@register_meta(aten.randperm.generator_out)
def meta_randperm(n, *, generator=None, out):
assert out.ndim == 1 and out.size(0) == n
return out
@register_meta(aten.randint.default)
def meta_randint(
high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.randint.low)
def meta_randint_low(
low, high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.rand.default)
def meta_rand_default(size, *, dtype=None, layout=None, device=None, pin_memory=None):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta([aten._fft_c2r.default, aten._fft_c2r.out])
@out_wrapper()
def meta_fft_c2r(self, dim, normalization, lastdim):
assert self.dtype.is_complex
output_sizes = list(self.size())
output_sizes[dim[-1]] = lastdim
return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype))
@register_meta(aten.copy_.default)
def meta_copy_(self, src, non_blocking=False):
return self
def inferUnsqueezeGeometry(tensor, dim):
result_sizes = list(tensor.size())
result_strides = list(tensor.stride())
new_stride = 1 if dim >= tensor.dim() else result_sizes[dim] * result_strides[dim]
result_sizes.insert(dim, 1)
result_strides.insert(dim, new_stride)
return result_sizes, result_strides
@register_meta(aten.unsqueeze_.default)
def meta_unsqueeze_(self, dim):
dim = maybe_wrap_dim(dim, self.dim() + 1)
g_sizes, g_strides = inferUnsqueezeGeometry(self, dim)
self.as_strided_(g_sizes, g_strides)
return self
# Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py
@register_meta(aten.index_select.default)
def meta_index_select(self, dim, index):
result_size = list(self.size())
if self.dim() > 0:
result_size[dim] = index.numel()
return self.new_empty(result_size)
@register_meta(aten.index_select.out)
def meta_index_select_out(self, dim, index, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.index_select(self, dim, index))
@register_meta([aten.max.default, aten.max.unary_out])
@out_wrapper()
def meta_max(self):
return self.new_empty(())
@register_meta(aten.max.dim)
def meta_max_dim(self, dim, keepdim=False):
dim = utils.reduction_dims(self.shape, (dim,))
output_shape = _compute_reduction_shape(self, dim, keepdim)
return (
self.new_empty(output_shape),
self.new_empty(output_shape, dtype=torch.long),
)
@register_meta([aten.min.default])
def meta_min(self):
return self.new_empty(())
@register_meta(aten.angle.default)
def meta_angle(self):
if self.is_complex():
result_dtype = corresponding_real_dtype(self.dtype)
else:
_, result_dtype = elementwise_dtypes(
self, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
return torch.empty_like(self, dtype=result_dtype)
@register_meta(aten.angle.out)
def meta_angle_out(self, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.angle(self))
# From aten/src/ATen/native/LinearAlgebraUtils.h
def squareCheckInputs(self: Tensor, f_name: str):
assert (
self.dim() >= 2
), f"{f_name}: The input tensor must have at least 2 dimensions."
assert self.size(-1) == self.size(
-2
), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices"
# From aten/src/ATen/native/LinearAlgebraUtils.h
def checkFloatingOrComplex(
t: Tensor, f_name: str, allow_low_precision_dtypes: bool = True
):
dtype = t.dtype
check(
t.is_floating_point() or t.is_complex(),
lambda: f"{f_name}, : Expected a floating point or complex tensor as input. Got , {dtype}",
)
if allow_low_precision_dtypes:
check(
dtype in (torch.float, torch.double, torch.cfloat, torch.cdouble),
lambda: f"{f_name} : Low precision dtypes not supported. Got {dtype}",
)
# From aten/src/ATen/native/LinearAlgebraUtils.h
def checkIsMatrix(A: Tensor, f_name: str, arg_name: str = "A"):
check(
A.dim() >= 2,
lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
)
def checkUplo(uplo: str):
uplo_uppercase = uplo.upper()
assert (
len(uplo) == 1 and uplo_uppercase == "U" or uplo_uppercase == "L"
), f"Expected UPLO argument to be 'L' or 'U', but got {uplo}"
# @register_meta(aten.linalg_eigh.default)
def meta_linalg_eigh(self, uplo="L"):
squareCheckInputs(self, "linalg_eigh")
checkUplo(uplo)
real_dtype = toRealValueType(self.dtype)
assert self.dim() >= 2
values = self.new_empty(self.shape, dtype=real_dtype)
values.transpose_(-2, -1)
vectors = self.new_empty(self.shape[:-1])
return (values, vectors)
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
@register_meta(aten.linalg_cholesky_ex.default)
def linalg_cholesky_ex(A: Tensor, upper: bool = False, check_errors: bool = False):
squareCheckInputs(A, "linalg.cholesky")
checkFloatingOrComplex(A, "linalg.cholesky")
A_shape = A.shape
ndim = len(A_shape)
# L
L_strides = make_contiguous_strides_for(A_shape, False)
L = A.new_empty(A_shape)
L.as_strided_(A_shape, L_strides)
# infos
infos = A.new_empty(A_shape[0 : ndim - 2], dtype=torch.int32)
return L, infos
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
@register_meta(aten.linalg_inv_ex.default)
def linalg_inv_ex_meta(A: Tensor, check_errors: bool = False):
squareCheckInputs(A, "linalg.inv_ex")
checkFloatingOrComplex(A, "linalg.inv_ex", allow_low_precision_dtypes=False)
L = A.new_empty(A.shape)
L.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
infos = A.new_empty(A.shape[:-2], dtype=torch.int32)
return L, infos
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
# NOTE: matching defaults in aten/src/ATen/native/native_functions.yaml
@register_meta(aten._linalg_svd.default)
def _linalg_svd_meta(
A: Tensor, full_matrices: bool = False, compute_uv: bool = True, driver: str = None
):
checkIsMatrix(A, "linalg.svd")
checkFloatingOrComplex(A, "linalg.svd")
batch_dims = list(A.shape[:-2])
m = A.shape[-2]
n = A.shape[-1]
k = min(m, n)
if compute_uv:
U_shape = batch_dims + [m, m if full_matrices else k]
U = A.new_empty(U_shape)
U.as_strided_(U_shape, make_contiguous_strides_for(U_shape, row_major=False))
V_shape = batch_dims + [n if full_matrices else k, n]
V = A.new_empty(V_shape)
# TODO: need to distinguish cuSOLVER case? (see original code)
V.as_strided_(V_shape, make_contiguous_strides_for(V_shape, row_major=False))
else:
# doesn't matter
U = A.new_empty([0])
V = A.new_empty([0])
# S is always real, even when A is complex.
S = A.new_empty(batch_dims + [k], dtype=toRealValueType(A.dtype))
return U, S, V
# From aten/src/ATen/native/LinearAlgebra.cpp
@register_meta(aten._linalg_det.default)
def _linalg_det_meta(A):
squareCheckInputs(A, "linalg.det")
checkFloatingOrComplex(A, "linalg.det")
det = A.new_empty(A.shape[:-2])
LU = A.new_empty(A.shape)
LU.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
pivots = A.new_empty(A.shape[:-1], dtype=torch.int32)
return det, LU, pivots
# From aten/src/ATen/native/ReflectionPad.cpp
@register_meta(
[aten.reflection_pad2d_backward.default, aten.replication_pad2d_backward.default]
)
def meta_pad2d_backward(grad_output, self, padding):
dim_w = 2
dim_h = 1
dim_plane = 0
nbatch = 1
self_shape = self.shape
if self.dim() == 4:
nbatch = self_shape[0]
dim_w += 1
dim_h += 1
dim_plane += 1
pad_l = padding[0]
pad_r = padding[1]
pad_t = padding[2]
pad_b = padding[3]
nplane = self_shape[dim_plane]
input_h = self_shape[dim_h]
input_w = self_shape[dim_w]
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
check(
output_w == grad_output.shape[dim_w],
lambda: f"gradOutput width unexpected. Expected: {output_w}, Got: {grad_output.shape[dim_w]}",
)
check(
output_h == grad_output.shape[dim_h],
lambda: f"gradOutput height unexpected. Expected: {output_h}, Got: {grad_output.shape[dim_h]}",
)
return self.new_empty(self.shape)
@register_meta(aten.reflection_pad2d.default)
def meta_pad2d(self, padding):
valid_dims = self.size(1) != 0 and self.size(2) != 0
check(
(self.ndim == 3 and valid_dims)
or (self.ndim == 4 and valid_dims and self.size(3) != 0),
lambda: f"3D or 4D (batch mode) tensor expected for input, but got: {self}",
)
if self.ndim == 4:
nbatch, nplane, input_h, input_w = self.shape
else:
nbatch = 1
nplane, input_h, input_w = self.shape
pad_l, pad_r, pad_t, pad_b = padding
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
if self.ndim == 3:
return self.new_empty((nplane, output_h, output_w))
else:
return self.new_empty((nbatch, nplane, output_h, output_w))
@register_meta([aten.bernoulli.default, aten.bernoulli.out])
@out_wrapper()
def meta_bernoulli(self, *, generator=None):
# https://github.com/pytorch/pytorch/issues/88612
return torch.empty_like(self).contiguous()
@register_meta(aten.bernoulli_.float)
def meta_bernoulli_(self, p=0.5, generator=None):
return self
@register_meta(aten.bernoulli.p)
def meta_bernoulli_p(self, p=0.5, generator=None):
# https://github.com/pytorch/pytorch/issues/88612
return torch.empty_like(self).contiguous()
@register_meta(aten._fused_moving_avg_obs_fq_helper.default)
def meta__fused_moving_avg_obs_fq_helper(
self,
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant=False,
symmetric_quant=False,
):
check(
ch_axis < self.dim(),
lambda: "Error in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()",
)
mask = torch.empty_like(self, dtype=torch.bool)
return (torch.empty_like(self), mask)
def dot_check(self, other):
check(
self.dim() == 1 and other.dim() == 1,
lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors",
)
@register_meta(aten.dot.default)
def meta_dot(self, tensor):
dot_check(self, tensor)
return self.new_empty(())
@register_meta([aten.mm.default])
def meta_mm(a, b):
check(a.dim() == 2, lambda: "a must be 2D")
check(b.dim() == 2, lambda: "b must be 2D")
N, M1 = a.shape
M2, P = b.shape
check(M1 == M2, lambda: "a and b must have same reduction dim")
return a.new_empty(N, P)
def _compute_reduction_shape(self, dims, keepdim):
if keepdim:
return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim))
return utils.compute_reduction_output_shape(self.shape, dims)
# FakeTensors (meta tensors with a device) will report device as meta
# when running meta kernels. Here, access the "fake device" of FakeTensor if it
# exists so meta kernels which have diverge per device will be more
# accurate when run with FakeTensors
def device_hint(tensor) -> "str":
if isinstance(tensor, torch._subclasses.FakeTensor):
return tensor.fake_device.type
else:
return "cuda" # default to cuda
def calc_conv_nd_return_shape(
input_tensor: torch.Tensor,
weight: torch.Tensor,
stride: Union[List[int], int],
padding: Union[List[int], int],
dilation: Union[List[int], int],
is_transposed: bool,
groups: int,
output_padding: Optional[Union[List[int], int]] = None,
):
def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
Returns:
The output length
"""
return (ln + 2 * p - d * (k - 1) - 1) // s + 1
def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
if transposed convolution is used.
See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
op: output padding in that dim
Returns:
The output length
"""
return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
kernel_size = weight.shape[2:]
dims = input_tensor.shape[2:]
if is_transposed:
out_channels = groups * weight.shape[1]
else:
out_channels = weight.shape[0]
if weight.shape[1] * groups != input_tensor.shape[1]:
raise RuntimeError("Invalid channel dimensions")
ret_shape = [input_tensor.shape[0], out_channels]
if isinstance(stride, IntLike):
stride = [stride] * len(dims)
elif len(stride) == 1:
stride = [stride[0]] * len(dims)
if isinstance(padding, IntLike):
padding = [padding] * len(dims)
elif len(padding) == 1:
padding = [padding[0]] * len(dims)
if isinstance(dilation, IntLike):
dilation = [dilation] * len(dims)
elif len(dilation) == 1:
dilation = [dilation[0]] * len(dims)
output_padding_list: Optional[List[int]] = None
if output_padding:
if isinstance(output_padding, IntLike):
output_padding_list = [output_padding] * len(dims)
elif len(output_padding) == 1:
output_padding_list = [output_padding[0]] * len(dims)
else:
output_padding_list = output_padding
for i in range(len(dims)):
# If output_padding is present, we are dealing with a transposed convolution
if output_padding_list:
ret_shape.append(
_formula_transposed(
dims[i],
padding[i],
dilation[i],
kernel_size[i],
stride[i],
output_padding_list[i],
)
)
else:
ret_shape.append(
_formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i])
)
return ret_shape
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
@register_meta(aten.convolution.default)
def meta_conv(
input_tensor: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
stride: List[int],
padding: List[int],
dilation: List[int],
is_transposed: bool,
output_padding: List[int],
groups: int,
):
def pick_memory_format():
if device_hint(input_tensor) == "cuda":
if is_channels_last(input_tensor) or is_channels_last(weight):
return torch.channels_last
else:
if is_channels_last(input_tensor):
return torch.channels_last
if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
shape_out = calc_conv_nd_return_shape(
input_tensor,
weight,
stride,
padding,
dilation,
is_transposed,
groups,
output_padding if is_transposed else None,
)
out = input_tensor.new_empty(shape_out)
out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
return out
if torch._C.has_mkldnn:
_meta_lib_dont_use_me_use_register_meta_for_mkldnn = torch.library.Library(
"mkldnn", "IMPL", "Meta"
)
def pick_mkldnn_conv_memory_format(input_tensor, weight):
if weight.is_mkldnn:
return torch.channels_last
if is_channels_last(input_tensor) or is_channels_last(weight):
return torch.channels_last
if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
@register_meta(torch.ops.mkldnn._convolution_pointwise.default)
def meta_mkldnn_convolution_default(
input_tensor,
weight,
bias,
padding,
stride,
dilation,
groups,
attr,
scalars,
algorithm,
):
shape_out = calc_conv_nd_return_shape(
input_tensor, weight, stride, padding, dilation, False, groups, []
)
out = input_tensor.new_empty(shape_out)
out_memory_format = torch.channels_last
out = out.to(memory_format=out_memory_format) # type: ignore[call-overload]
return out
@register_meta(torch.ops.mkldnn._convolution_pointwise.binary)
def meta_mkldnn_convolution_binary(
input_tensor,
other,
weight,
bias,
padding,
stride,
dilation,
groups,
binary_attr,
alpha,
unary_attr,
unary_scalars,
unary_algorithm,
):
out = input_tensor.new_empty(other.size())
out = out.to(memory_format=torch.channels_last) # type: ignore[call-overload]
return out
@register_meta(torch.ops.mkldnn._convolution_pointwise_.binary)
def meta_mkldnn_convolution_binary_inplace(
input_tensor,
other,
weight,
bias,
padding,
stride,
dilation,
groups,
binary_attr,
alpha,
unary_attr,
unary_scalars,
unary_algorithm,
):
return other
@register_meta(torch.ops.mkldnn._linear_pointwise.default)
def meta_linear_pointwise_default(
input_tensor, weight, bias, attr, scalars, algorithm
):
return input_tensor.new_empty((*input_tensor.shape[:-1], weight.shape[0]))
@register_meta(torch.ops.mkldnn._linear_pointwise.binary)
def meta_linear_pointwise_binary(input_tensor, other, weight, bias, attr):
out = input_tensor.new_empty(other.size())
return out
if torch._C.has_mkl:
_meta_lib_dont_use_me_use_register_meta_for_mkl = torch.library.Library(
"mkl", "IMPL", "Meta"
)
@register_meta(torch.ops.mkl._mkl_linear)
def meta_mkl_linear(
input_tensor,
packed_weight,
orig_weight,
bias,
batch_size,
):
return input_tensor.new_empty(
(*input_tensor.shape[:-1], orig_weight.shape[0])
)
# from check_dim_size() in aten/src/ATen/TensorUtils.cpp.
def check_dim_size(tensor, dim, dim_size, size):
check(
tensor.dim() == dim and tensor.shape[dim_size] == size,
lambda: f"Expected a tensor of dimension {dim} and tensor.size[{dim_size}] == {size}, "
+ f"but got : dimension {tensor.dim()} and tensor.size[{dim_size}] = {tensor.shape[dim_size]}",
)
@register_meta(aten.avg_pool2d.default)
def meta_avg_pool2d(
input,
kernel_size,
stride=(),
padding=(0,),
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
):
def unpack(name, val):
check(
len(val) in [1, 2],
lambda: f"avg_pool2d: {name} must either be a single int, or a tuple of two ints",
)
H = val[0]
W = H if len(val) == 1 else val[1]
return H, W
kH, kW = unpack("kernel_size", kernel_size)
check(
len(stride) in [0, 1, 2],
lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
if len(stride) == 0:
dH, dW = kH, kW
elif len(stride) == 1:
dH, dW = stride[0], stride[0]
else:
dH, dW = unpack("stride", stride)
padH, padW = unpack("padding", padding)
check(
divisor_override is None or divisor_override != 0,
lambda: "divisor must be not zero",
)
nbatch = input.size(-4) if input.dim() == 4 else 1
nInputPlane = input.size(-3)
inputHeight = input.size(-2)
inputWidth = input.size(-1)
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
memory_format = utils.suggest_memory_format(input)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
1,
1,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
)
if input.dim() == 3:
size = [nInputPlane, outputHeight, outputWidth]
else:
size = [nbatch, nInputPlane, outputHeight, outputWidth]
return torch.empty(
size, dtype=input.dtype, device=input.device, memory_format=memory_format
)
# from avg_pool2d_backward_shape_check() in aten/src/ATen/native/Pool.h.
def avg_pool2d_backward_shape_check(
input,
gradOutput,
nbatch,
kH,
kW,
dH,
dW,
padH,
padW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
):
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
1,
1,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
)
ndim = input.dim()
nOutputPlane = nInputPlane
check_dim_size(gradOutput, ndim, ndim - 3, nOutputPlane)
check_dim_size(gradOutput, ndim, ndim - 2, outputHeight)
check_dim_size(gradOutput, ndim, ndim - 1, outputWidth)
# Don't override the C++ registration.
@register_meta(aten.avg_pool2d_backward.default)
def meta_avg_pool2d_backward(
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
):
# From aten/src/ATen/native/AveragePool2d.cpp structured kernel meta func.
check(
len(kernel_size) == 1 or len(kernel_size) == 2,
lambda: "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints",
)
kH = kernel_size[0]
kW = kH if len(kernel_size) == 1 else kernel_size[1]
check(
len(stride) == 0 or len(stride) == 1 or len(stride) == 2,
lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
dH = kH if len(stride) == 0 else stride[0]
dW = kW if len(stride) == 0 else dH if len(stride) == 1 else stride[1]
check(
len(padding) == 1 or len(padding) == 2,
lambda: "avg_pool2d: padding must either be a single int, or a tuple of two ints",
)
padH = padding[0]
padW = padH if len(padding) == 1 else padding[1]
check(
divisor_override is None or divisor_override != 0,
lambda: "divisor must be not zero",
)
input_size = input.shape
nbatch = input_size[-4] if input.dim() == 4 else 1
nInputPlane = input_size[-3]
inputHeight = input_size[-2]
inputWidth = input_size[-1]
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
mem_format = utils.suggest_memory_format(input)
avg_pool2d_backward_shape_check(
input,
gradOutput_,
nbatch,
kH,
kW,
dH,
dW,
padH,
padW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
)
return torch.empty(
input_size, dtype=input.dtype, device=input.device, memory_format=mem_format
)
@register_meta(aten._adaptive_avg_pool2d.default)
def meta_adaptive_avg_pool2d(self, output_size):
check(
self.ndim == 3 or self.ndim == 4,
lambda: f"Expected 3D or 4D tensor, but got {self.shape}",
)
output_shape = self.shape[:-2] + tuple(output_size)
memory_format = utils.suggest_memory_format(self)
# need to set memory_format to preserve the memory format of the input
# channel last input should have channel last output
return torch.empty(
output_shape, dtype=self.dtype, device=self.device, memory_format=memory_format
)
@register_meta(aten._adaptive_avg_pool3d.default)
def meta_adaptive_avg_pool3d(self, output_size):
check(
self.ndim == 4 or self.ndim == 5,
lambda: f"Expected 4D or 5D tensor, but got {self.shape}",
)
return self.new_empty(self.shape[:-3] + tuple(output_size))
@register_meta(aten._adaptive_avg_pool2d_backward.default)
def meta__adaptive_avg_pool2d_backward(grad_out, self):
ndim = grad_out.ndim
for i in range(1, ndim):
check(
grad_out.size(i) > 0,
lambda: f"adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero \
size for non-batch dimensions, {grad_out.shape} with dimension {i} being empty",
)
check(
ndim == 3 or ndim == 4,
lambda: f"adaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got {self.shape}",
)
check(
self.dtype == grad_out.dtype,
lambda: f"expected dtype {self.dtype} for `grad_output` but got dtype {grad_out.dtype}",
)
return self.new_empty(self.shape)
@register_meta(aten.repeat_interleave.Tensor)
def meta_repeat_interleave_Tensor(repeats, output_size=None):
if output_size is None:
raise RuntimeError("cannot repeat_interleave a meta tensor without output_size")
return repeats.new_empty(output_size)
@register_meta([aten.complex.default, aten.complex.out])
@out_wrapper()
def meta_complex(real, imag):
assert real.dtype.is_floating_point
assert imag.dtype.is_floating_point
out_shape = _broadcast_shapes(real.shape, imag.shape)
return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype))
@register_meta(aten.vdot.default)
def vdot(self, other):
if not self.is_complex:
return torch.dot(self, other)
if self.is_conj():
if other.is_conj():
return torch.vdot(other.conj(), self.conj())
else:
return torch.dot(self.conj(), other)
elif other.is_conj():
return torch.dot(self, other.conj()).conj()
dot_check(self, other)
return self.new_empty(())
# Leaving this function around because a python implementation
# of indexing shape inference is useful,
# but not registering it to the dispatcher because we already
# get shape inference through structured kernels
@register_meta(aten.index.Tensor)
def meta_index_Tensor(self, indices):
check_no_bool_index_tensors(aten.index.Tensor, self, indices)
check(indices, lambda: "at least one index must be provided")
# aten::index is the internal advanced indexing implementation
# checkIndexTensorTypes and expandTensors
result: List[Optional[Tensor]] = []
for i, index in enumerate(indices):
if index is not None:
check(
index.dtype in [torch.long, torch.int, torch.int8, torch.bool],
lambda: "tensors used as indices must be long, int, byte or bool tensors",
)
if index.dtype in [torch.int8, torch.bool]:
nonzero = index.nonzero()
k = len(result)
check(
k + index.ndim <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim}",
IndexError,
)
for j in range(index.ndim):
check(
index.shape[j] == self.shape[k + j],
lambda: f"The shape of the mask {index.shape} at index {i} "
f"does not match the shape of the indexed tensor {self.shape} at index {k + j}",
IndexError,
)
result.append(nonzero.select(1, j))
else:
result.append(index)
else:
result.append(index)
indices = result
check(
len(indices) <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})",
)
# expand_outplace
import torch._refs as refs # avoid import cycle in mypy
indices = list(refs._maybe_broadcast(*indices))
# add missing null tensors
while len(indices) < self.ndim:
indices.append(None)
# hasContiguousSubspace
# true if all non-null tensors are adjacent
# See:
# https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing
# https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency
state = 0
has_contiguous_subspace = False
for index in indices:
if state == 0:
if index is not None:
state = 1
elif state == 1:
if index is None:
state = 2
else:
if index is not None:
break
else:
has_contiguous_subspace = True
# transposeToFront
# This is the logic that causes the newly inserted dimensions to show up
# at the beginning of the tensor, if they're not contiguous
if not has_contiguous_subspace:
dims = []
transposed_indices = []
for i, index in enumerate(indices):
if index is not None:
dims.append(i)
transposed_indices.append(index)
for i, index in enumerate(indices):
if index is None:
dims.append(i)
transposed_indices.append(index)
self = self.permute(dims)
indices = transposed_indices
# AdvancedIndex::AdvancedIndex
# Now we can assume the indices have contiguous subspace
# This is simplified from AdvancedIndex which goes to more effort
# to put the input and indices in a form so that TensorIterator can
# take them. If we write a ref for this, probably that logic should
# get implemented
before_shape: List[int] = []
after_shape: List[int] = []
replacement_shape: List[int] = []
for dim, index in enumerate(indices):
if index is None:
if replacement_shape:
after_shape.append(self.shape[dim])
else:
before_shape.append(self.shape[dim])
else:
replacement_shape = list(index.shape)
return self.new_empty(before_shape + replacement_shape + after_shape)
@register_meta([aten.convolution_backward.default])
def meta_convolution_backward(
grad_output_,
input_,
weight_,
bias_sizes_opt,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
output_mask,
):
# High level logic taken from slow_conv3d_backward_cpu which should
# be representative of all convolution_backward impls
backend_grad_input = None
backend_grad_weight = None
backend_grad_bias = None
if output_mask[0]:
backend_grad_input = grad_output_.new_empty(input_.size())
if output_mask[1]:
backend_grad_weight = grad_output_.new_empty(weight_.size())
if output_mask[2]:
backend_grad_bias = grad_output_.new_empty(bias_sizes_opt)
return (backend_grad_input, backend_grad_weight, backend_grad_bias)
@register_meta([aten.addbmm.default, aten.addbmm.out])
@out_wrapper()
def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1):
dim1 = batch1.size(1)
dim2 = batch2.size(2)
self = self.expand((dim1, dim2))
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
check(
batch1.size(0) == batch2.size(0),
lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}",
)
check(
batch1.size(2) == batch2.size(1),
lambda: (
f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} "
f"and {batch2.size(1)}x{batch2.size(2)})"
),
)
check(
self.size(0) == dim1 and self.size(1) == dim2,
lambda: "self tensor does not match matmul output shape",
)
return self.new_empty(self.size())
@register_meta(aten._cdist_forward.default)
def meta_cdist_forward(x1, x2, p, compute_mode):
check(
x1.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
)
check(
x2.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
)
check(
x1.size(-1) == x2.size(-1),
lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}",
)
check(
utils.is_float_dtype(x1.dtype),
lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
)
check(
utils.is_float_dtype(x2.dtype),
lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
)
check(p >= 0, lambda: "cdist only supports non-negative p values")
check(
compute_mode in (None, 1, 2),
lambda: f"possible modes: None, 1, 2, but was: {compute_mode}",
)
r1 = x1.size(-2)
r2 = x2.size(-2)
batch_tensor1 = x1.shape[:-2]
batch_tensor2 = x2.shape[:-2]
output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
output_shape.extend([r1, r2])
return x1.new_empty(output_shape)
@register_meta(aten._embedding_bag.default)
def meta_embedding_bag(
weight,
indices,
offsets,
scale_grad_by_freq=False,
mode=0,
sparse=False,
per_sample_weights=None,
include_last_offset=False,
padding_idx=-1,
):
check(
indices.dtype in (torch.long, torch.int),
lambda: f"expected indices to be long or int, got {indices.dtype}",
)
check(
offsets.dtype in (torch.long, torch.int),
lambda: f"expected offsets to be long or int, got {offsets.dtype}",
)
check(
utils.is_float_dtype(weight.dtype),
lambda: f"expected weight to be floating point type, got {weight.dtype}",
)
num_bags = offsets.size(0)
if include_last_offset:
check(
num_bags >= 1, lambda: "include_last_offset: numBags should be at least 1"
)
num_bags -= 1
output = weight.new_empty(num_bags, weight.size(1))
MODE_SUM, MODE_MEAN, MODE_MAX = range(3)
if per_sample_weights is not None:
check(
mode == MODE_SUM,
lambda: "embedding_bag: per_sample_weights only supported with mode='sum'",
)
check(
per_sample_weights.dtype == weight.dtype,
lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype",
)
check(
per_sample_weights.ndim == 1,
lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D",
)
check(
per_sample_weights.numel() == indices.numel(),
lambda: (
f"expected per_sample_weights.numel() ({per_sample_weights.numel()} "
f"to be the same as indices.numel() ({indices.numel()})"
),
)
def is_fast_path_index_select_scale(src, scale, output, padding_idx):
return (
is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1
)
def is_fast_path_index_select(src, output, padding_idx):
return (
(src.dtype == torch.float or src.dtype == torch.half)
and src.stride(1) == 1
and output.stride(1) == 1
and padding_idx < 0
)
def is_fast_path(src, scale, output, padding_idx):
if scale is not None:
return is_fast_path_index_select_scale(src, scale, output, padding_idx)
else:
return is_fast_path_index_select(src, output, padding_idx)
if device_hint(offsets) != "cpu":
offset2bag = indices.new_empty(indices.size(0))
bag_size = indices.new_empty(offsets.size())
if mode == MODE_MAX:
max_indices = indices.new_empty(num_bags, weight.size(1))
else:
max_indices = indices.new_empty(0)
else:
fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx)
if mode == MODE_MEAN or mode == MODE_MAX or not fast_path_sum:
offset2bag = offsets.new_empty(indices.size(0))
else:
offset2bag = offsets.new_empty(0)
bag_size = offsets.new_empty(num_bags)
# This part of the logic comes from make_max_indices_out in EmbeddingBag.cpp
numBags = offsets.shape[0]
if mode == MODE_MAX:
if include_last_offset:
check(
numBags >= 1,
lambda: "include_last_offset: numBags should be at least 1",
)
numBags -= 1
max_indices = offsets.new_empty(numBags, weight.shape[1])
else:
max_indices = offsets.new_empty(bag_size.size())
return output, offset2bag, bag_size, max_indices
@register_meta(aten._embedding_bag_forward_only.default)
def meta_embedding_bag_forward_only(weight, indices, offsets, *args):
output, offset2bag, bag_size, max_indices = meta_embedding_bag(
weight, indices, offsets, *args
)
if device_hint(offsets) == "cpu":
bag_size = offsets.new_empty(offsets.size())
return output, offset2bag, bag_size, max_indices
def _get_reduction_dtype(input, dtype, promote_int_to_long=True):
# if specified, dtype takes precedence
if dtype:
return dtype
if input.dtype.is_floating_point or input.dtype.is_complex:
return input.dtype
elif promote_int_to_long:
return torch.long
return input.dtype
@register_meta([aten.nansum.default, aten.nansum.out])
@out_wrapper()
def meta_nansum(input, dims=None, keepdim=False, *, dtype=None):
output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True)
dims = utils.reduction_dims(input.shape, dims)
output_shape = _compute_reduction_shape(input, dims, keepdim)
return input.new_empty(output_shape, dtype=output_dtype)
@register_meta(aten.nanmedian.default)
def meta_nanmedian(input):
output_shape = utils.compute_reduction_output_shape(
input.shape, tuple(range(input.dim()))
)
return input.new_empty(output_shape)
@register_meta([aten.nanmedian.dim, aten.nanmedian.dim_values])
@out_wrapper("values", "indices")
def meta_nanmedian_dim(input, dim=-1, keepdim=False):
dim = utils.reduction_dims(input.shape, (dim,))
output_shape = _compute_reduction_shape(input, dim, keepdim)
return (
input.new_empty(output_shape),
input.new_empty(output_shape, dtype=torch.long),
)
@register_meta(aten.logical_not_.default)
def meta_logical_not_(self):
return self
@register_meta(aten.repeat.default)
def meta_repeat(self, repeats):
check(
len(repeats) >= self.dim(),
lambda: "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor",
)
# Add new leading dimensions to the tensor if the
# number of target dimensions is larger than the
# number of source dimensions.
num_new_dimensions = len(repeats) - self.dim()
padded_size = (1,) * num_new_dimensions + tuple(self.shape)
target_size = [padded_size[i] * repeats[i] for i in range(len(repeats))]
return self.new_empty(target_size)
@register_meta(aten.zero_.default)
def meta_zero_(self):
return self
@register_meta(
[
aten.mul_.Scalar,
aten.div_.Scalar,
aten.mul_.Tensor,
aten.div_.Tensor,
aten.logical_and_.default,
aten.logical_or_.default,
aten.logical_xor_.default,
],
)
def meta_binop_inplace(self, other):
return self
@register_meta(
[
aten.add_.Scalar,
aten.sub_.Scalar,
aten.add_.Tensor,
aten.sub_.Tensor,
],
)
def meta_binop_inplace_alpha(self, other, alpha=1):
return self
@register_meta([aten.round.default, aten.round.decimals])
def meta_round(self, **kwargs):
return _elementwise_meta(
self, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT
)
@register_meta(aten.zero.default)
def meta_zero(self):
return self.new_empty(self.shape)
@register_meta([aten.fill_.Tensor, aten.fill_.Scalar])
def meta_fill_(self, val):
return self
@register_meta([aten.fill.Tensor, aten.fill.Scalar])
def meta_fill(self, val):
return torch.empty_like(self)
@register_meta(aten.relu_.default)
def meta_relu_(self):
return self
@register_meta(aten.index_put.default)
def meta_index_put(self, indices, values, accumulate=False):
return torch.empty_like(self)
@register_meta(aten.masked_fill_.Scalar)
def meta_masked_fill_(self, mask, value):
return self
@register_meta(aten.index_put_.default)
def meta_index_put_(self, indices, values, accumulate=False):
return self
@register_meta(aten.alias.default)
def meta_alias(self):
return self.view(self.shape)
def common_meta_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None):
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
batch1_sizes = batch1.size()
batch2_sizes = batch2.size()
bs = batch1_sizes[0]
contraction_size = batch1_sizes[2]
res_rows = batch1_sizes[1]
res_cols = batch2_sizes[2]
output_size = (bs, res_rows, res_cols)
check(
batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size,
lambda: f"Expected size for first two dimensions of batch2 tensor to be: [{bs}"
f", {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}].",
)
# TODO: handle out
output = batch2.new_empty(output_size)
if not is_bmm and self_baddbmm is not None:
check(self_baddbmm.dim() == 3, lambda: "self must be a 3D tensor")
check(
self_baddbmm.size() == output_size,
lambda: "Expected an input tensor shape with shape {output_size} but got shape: {self.size()}",
)
return output
@register_meta(aten.bmm.default)
def meta_bmm(self, mat2):
return common_meta_baddbmm_bmm(self, mat2, True)
def div_rtn(x, y):
q = x // y
r = x % y
# WARNING: explicit bool conversion here is necessary;
# would be fixed by SymBool
if r != 0 and (bool(r < 0) != bool(y < 0)):
q -= 1
return q
def pooling_output_shape_pad_lr(
inputSize, kernelSize, pad_l, pad_r, stride, dilation, ceil_mode
):
outputSize = (
div_rtn(
inputSize
+ pad_l
+ pad_r
- dilation * (kernelSize - 1)
- 1
+ (stride - 1 if ceil_mode else 0),
stride,
)
+ 1
)
if ceil_mode:
if (outputSize - 1) * stride >= inputSize + pad_l:
outputSize -= 1
return outputSize
def pooling_output_shape(inputSize, kernelSize, pad, stride, dilation, ceil_mode):
check(stride != 0, lambda: "stride should not be zero")
check(pad >= 0, lambda: f"pad must be non-negative, but got pad: {pad}")
check(
pad <= kernelSize // 2,
lambda: f"pad should be at most half of kernel size, but got pad={pad} and kernel_size={kernelSize}",
)
return pooling_output_shape_pad_lr(
inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode
)
def pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
):
ndim = input.dim()
nOutputPlane = nInputPlane
check(
kW > 0 and kH > 0,
lambda: "kernel size should be greater than zero, but got kH: {kH}, kW: {kW}",
)
check(
dW > 0 and dH > 0,
lambda: "stride should be greater than zero, but got dH: {dH}, dW: {dW}",
)
check(
dilationH > 0 and dilationW > 0,
lambda: "dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}",
)
valid_dims = input.size(1) != 0 and input.size(2) != 0
if memory_format == torch.channels_last:
check(
ndim == 4 and valid_dims and input.size(3) != 0,
lambda: "Expected 4D (batch mode) tensor expected for input with channels_last layout"
" with optional 0 dim batch size for input, but got: {input.size()}",
)
else:
check(
(ndim == 3 and input.size(0) != 0 and valid_dims)
or (ndim == 4 and valid_dims and input.size(3) != 0),
lambda: f"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: {input.size()}",
)
check(
kW // 2 >= padW and kH // 2 >= padH,
lambda: "pad should be smaller than or equal to half of kernel size, but got "
f"padW = {padW}, padH = {padH}, kW = {kW}, kH = {kH}",
)
check(
outputWidth >= 1 and outputHeight >= 1,
lambda: f"Given input size: ({nInputPlane}x{inputHeight}x{inputWidth}). "
f"Calculated output size: ({nOutputPlane}x{outputHeight}x{outputWidth}). "
"Output size is too small",
)
def max_pool2d_checks_and_compute_shape(
input, kernel_size, stride, padding, dilation, ceil_mode
):
# Reference: aten/src/ATen/native/DilatedMaxPool2d.cpp
def unpack(name, val):
check(
len(val) in [1, 2],
lambda: f"max_pool2d: {name} must either be a single int, or a tuple of two ints",
)
H = val[0]
W = H if len(val) == 1 else val[1]
return H, W
kH, kW = unpack("kernel_size", kernel_size)
check(
len(stride) in [0, 1, 2],
lambda: "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
if len(stride) == 0:
dH, dW = kH, kW
else:
dH, dW = unpack("stride", stride)
padH, padW = unpack("padding", padding)
dilationH, dilationW = unpack("dilation", dilation)
nInputPlane = input.size(-3)
inputHeight = input.size(-2)
inputWidth = input.size(-1)
memory_format = utils.suggest_memory_format(input)
if memory_format == torch.channels_last:
check(
input.dim() == 4,
lambda: "non-empty 4D (batch mode) tensor expected for input with channels_last layout",
)
elif memory_format == torch.contiguous_format:
check(
input.dim() in [3, 4],
lambda: "non-empty 3D or 4D (batch mode) tensor expected for input",
)
else:
check(
False,
lambda: "Unsupport memory format. Supports only ChannelsLast, Contiguous",
)
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
)
return nInputPlane, outputHeight, outputWidth
@register_meta(aten.max_pool2d_with_indices_backward.default)
def meta_max_pool2d_with_indices_backward(
grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices
):
nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
self, kernel_size, stride, padding, dilation, ceil_mode
)
check(
self.dtype == grad_output.dtype,
lambda: "expected dtype {self.dtype} for `gradOutput` but got dtype {grad_output.dtype}",
)
nOutputPlane = nInputPlane
ndim = self.ndim
def _check_dim_size(t):
check_dim_size(t, ndim, ndim - 3, nOutputPlane)
check_dim_size(t, ndim, ndim - 2, outputHeight)
check_dim_size(t, ndim, ndim - 1, outputWidth)
_check_dim_size(grad_output)
_check_dim_size(indices)
memory_format = utils.suggest_memory_format(self)
return torch.empty(
self.shape, dtype=self.dtype, device=self.device, memory_format=memory_format
)
@register_meta(aten.max_pool2d_with_indices.default)
def meta_max_pool2d_with_indices(
input, kernel_size, stride=(), padding=(0,), dilation=(1,), ceil_mode=False
):
nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
input, kernel_size, stride, padding, dilation, ceil_mode
)
nbatch = input.size(-4) if input.dim() == 4 else 1
memory_format = utils.suggest_memory_format(input)
if input.dim() == 3:
size = [nInputPlane, outputHeight, outputWidth]
else:
size = [nbatch, nInputPlane, outputHeight, outputWidth]
return (
torch.empty(
size, dtype=input.dtype, device=input.device, memory_format=memory_format
),
torch.empty(
size, dtype=torch.int64, device=input.device, memory_format=memory_format
),
)
@register_meta(aten.grid_sampler_2d_backward.default)
def grid_sampler_2d_backward_meta(
grad_output,
input,
grid,
interpolation_mode,
padding_mode,
align_corners,
output_mask,
):
input_requires_grad = output_mask[0]
if input_requires_grad:
grad_input = torch.zeros_like(input, memory_format=torch.contiguous_format)
else:
grad_input = None
grad_grid = torch.empty_like(grid, memory_format=torch.contiguous_format)
return (grad_input, grad_grid)
@register_meta([aten.full.default])
def full(size, fill_value, *args, **kwargs):
return torch.empty(size, *args, **kwargs)
@register_meta(
[
aten.randint_like.default,
aten.randint_like.low_dtype,
aten.randn_like.default,
aten.rand_like.default,
aten.full_like.default,
aten.ones_like.default,
]
)
def meta_like(self, *args, **kwargs):
return aten.empty_like.default(self, **kwargs)
# zeros_like is special cased to work for sparse
@register_meta(aten.zeros_like.default)
def zeros_like(
self, dtype=None, layout=None, device=None, pin_memory=None, memory_format=None
):
if layout == torch.sparse_coo:
check(
memory_format is None,
lambda: "memory format option is only supported by strided tensors",
)
res = torch.empty(
0,
dtype=self.dtype if dtype is None else dtype,
layout=layout,
device=self.device if device is None else device,
pin_memory=pin_memory,
)
if self.is_sparse:
res.sparse_resize_and_clear_(
self.size(), self.sparse_dim(), self.dense_dim()
)
else:
res.sparse_resize_and_clear_(self.size(), self.dim(), 0)
res._coalesced_(True)
return res
return aten.empty_like.default(
self,
dtype=dtype,
layout=layout,
device=device,
pin_memory=pin_memory,
memory_format=memory_format,
)
# hacky: Please remove after math.ceil works with arange
@register_meta(aten.arange.default)
def arange(end, **kwargs):
if isinstance(end, FloatLike):
end = math.ceil(end) # type: ignore[arg-type]
def is_integral(x):
return isinstance(x, IntLike) or isinstance(x, bool)
set_to_integral_dtype = kwargs.get("dtype", None) is None and is_integral(end)
if set_to_integral_dtype:
kwargs["dtype"] = torch.int64
return aten.empty([end], **kwargs)
@register_meta(aten.arange.start)
def arange_start(start, end, **kwargs):
return aten.arange(end - start, **kwargs)
@register_meta(aten.select.int)
def meta_select(self, dim, index):
ndim = self.dim()
check(
ndim != 0, lambda: "select() cannot be applied to a 0-dim tensor.", IndexError
)
dim = dim if dim >= 0 else dim + ndim
size = self.size(dim)
check(
not (-index > size or index >= size),
lambda: f"select(): index {index} out of range for tensor of size "
f"{self.size()} at dimension {dim}",
IndexError,
)
index = index if index >= 0 else index + size
new_size = list(self.size())
new_stride = list(self.stride())
new_storage_offset = self.storage_offset() + index * new_stride[dim]
del new_size[dim]
del new_stride[dim]
return self.as_strided(new_size, new_stride, new_storage_offset)
@register_meta(aten.select_scatter.default)
def meta_select_scatter(self, src, dim, index):
return utils.clone_preserve_strides(self)
@register_meta(aten.slice_scatter.default)
def meta_slice_scatter(self, src, dim=0, start=None, end=None, step=1):
return utils.clone_preserve_strides(self)
# TODO: Deduplicate this with canonicalize_dim
def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
if dim_post_expr <= 0:
assert wrap_scalar
dim_post_expr = 1
min = -dim_post_expr
max = dim_post_expr - 1
assert not (dim < min or dim > max), f"dim {dim} out of bounds ({min}, {max})"
if dim < 0:
dim += dim_post_expr
return dim
def ensure_nonempty_size(t, dim):
return 1 if t.dim() == 0 else t.shape[dim]
# From aten/src/ATen/native/ScatterGatherChecks.h
def gather_shape_check(self, dim, index):
self_dims = max(self.dim(), 1)
index_dims = max(index.dim(), 1)
check(
self_dims == index_dims,
lambda: "Index tensor must have the same number of dimensions as input tensor",
)
for i in range(self_dims):
if i != dim:
check(
ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
lambda: f"Size does not match at dimension {i} expected index {index.shape}"
+ f" to be smaller than self {self.shape} apart from dimension {dim}",
)
@register_meta(aten.gather.default)
def meta_gather(self, dim, index, sparse_grad=False):
wrapped_dim = maybe_wrap_dim(dim, self.dim())
is_index_empty = index.numel() == 0
if not is_index_empty:
check(
index.dtype == torch.long,
lambda: f"gather(): Expected dtype int64 for index, but got {index.dtype}",
)
gather_shape_check(self, wrapped_dim, index)
return self.new_empty(index.shape)
# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
def get_operator_enum(reduce_, use_new_options=False):
if use_new_options:
if reduce_ == "sum":
return "REDUCE_ADD"
elif reduce_ == "prod":
return "REDUCE_MULTIPLY"
elif reduce_ == "mean":
return "REDUCE_MEAN"
elif reduce_ == "amax":
return "REDUCE_MAXIMUM"
elif reduce_ == "amin":
return "REDUCE_MINIMUM"
check(
False,
lambda: "reduce argument must be either sum, prod, mean, amax or amin.",
)
return
else:
if reduce_ == "add":
return "REDUCE_ADD"
elif reduce_ == "multiply":
return "REDUCE_MULTIPLY"
check(False, lambda: "reduce argument must be either add or multiply.")
return
# From aten/src/ATen/native/ScatterGatherChecks.h
def scatter_gather_dtype_check(method_name, self, index, src_opt=None):
if index.numel() != 0:
check(
index.dtype == torch.long,
lambda: f"{method_name}(): Expected dtype int64 for index",
)
if src_opt is not None:
check(
self.dtype == src_opt.dtype,
lambda: f"{method_name}(): Expected self.dtype to be equal to src.dtype",
)
def ensure_nonempty_dim(dim):
return max(dim, 1)
# From aten/src/ATen/native/ScatterGatherChecks.h
def scatter_shape_check(self, dim, index, src_opt=None):
if index.numel() == 0:
return
check(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
lambda: "Index tensor must have the same number of dimensions as self tensor",
)
is_wrong_shape = False
self_dims = ensure_nonempty_dim(self.dim())
# Check: index.size(d) <= self.size(d) for all d != dim
for d in range(self_dims):
index_d_size = ensure_nonempty_size(index, d)
if d == dim:
continue
if index_d_size > ensure_nonempty_size(self, d):
is_wrong_shape = True
break
# Check: index.size(d) <= src.size(d) for all d if src is Tensor
if not is_wrong_shape and src_opt is not None:
for d in range(self_dims):
index_d_size = ensure_nonempty_size(index, d)
if index_d_size > ensure_nonempty_size(src_opt, d):
is_wrong_shape = True
break
if src_opt is not None:
check(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
lambda: "Index tensor must have the same number of dimensions as self tensor",
)
check(
not is_wrong_shape,
lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ f" apart from dimension {dim} and to be smaller than src {src_opt.shape}",
)
else:
check(
not is_wrong_shape,
lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ f" apart from dimension {dim}",
)
# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
def scatter_meta_impl(self, dim, index, src=None, reduce_=None, use_new_options=False):
wrapped_dim = maybe_wrap_dim(dim, self.dim())
scatter_gather_dtype_check("scatter", self, index, src)
scatter_shape_check(self, wrapped_dim, index, src)
if reduce_ is not None:
# Check if we have a valid reduce operator.
get_operator_enum(reduce_, use_new_options)
@register_meta(aten.scatter_add.default)
def meta_scatter_add(self, dim, index, src):
scatter_meta_impl(self, dim, index, src, "add")
return self.new_empty(self.shape)
@register_meta(aten.scatter_add_)
def meta_scatter_add_(self, dim, index, src):
scatter_meta_impl(self, dim, index, src, "add")
return self
@register_meta(
[
aten.scatter.src,
aten.scatter.value,
aten.scatter.reduce,
aten.scatter.value_reduce,
]
)
@out_wrapper()
def meta_scatter(self, dim, index, src_or_value, reduce=None):
src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
scatter_meta_impl(self, dim, index, src, reduce)
return self.new_empty(self.shape)
@register_meta(
[
aten.scatter_.src,
aten.scatter_.value,
aten.scatter_.reduce,
aten.scatter_.value_reduce,
]
)
def meta_scatter_(self, dim, index, src_or_value, reduce=None):
src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
scatter_meta_impl(self, dim, index, src, reduce)
return self
@register_meta(
[
aten._scaled_dot_product_flash_attention,
]
)
def meta__scaled_dot_product_flash(
query: Tensor,
key: Tensor,
value: Tensor,
dropout_p: float = 0.0,
is_causal: bool = False,
):
batch_size = query.size(0)
num_heads = query.size(1)
max_seqlen_batch_q = query.size(2)
head_dim = query.size(3)
max_seqlen_batch_k = key.size(2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
Nnz_q = batch_size * max_seqlen_batch_q
output = torch.empty(
(Nnz_q, num_heads, head_dim), dtype=query.dtype, device=query.device
)
ouput = output.view(batch_size, max_seqlen_batch_q, num_heads, head_dim).transpose(
1, 2
)
max_seqlen_q = math.ceil(max_seqlen_batch_q / 16) * 16
logsumexp = torch.empty(
(batch_size, num_heads, max_seqlen_q),
dtype=torch.float,
device=query.device,
)
is_sm80 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)
is_sm75 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)
head_size_rounded = 64 if head_dim <= 64 else 128
blocksize_c = (
128
if (head_size_rounded == 128 and (dropout_p != 0.0 or not is_sm80))
or (is_sm75 and head_size_rounded == 64 and dropout_p != 0.0)
else 256
)
max_seqlen_k = math.ceil(max_seqlen_batch_k / blocksize_c) * blocksize_c
if max_seqlen_k <= 128:
max_seqlen_k = 128
elif max_seqlen_k <= 256:
max_seqlen_k = 256
return ouput, logsumexp
@register_meta(
[
aten._scaled_dot_product_efficient_attention,
]
)
def meta__scaled_dot_product_efficient(
query: Tensor,
key: Tensor,
value: Tensor,
compute_log_sumexp: bool,
is_causal: bool = False,
):
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
B = query.size(0)
M = query.size(1)
N = key.size(1)
num_heads = query.size(-2)
K = query.size(-1)
Kv = value.size(-1)
res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device)
logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
logsum_exp = torch.empty(
(B, num_heads, logsumexp_dim),
dtype=torch.float,
device=query.device,
)
res = res.transpose(1, 2)
return res, logsum_exp
@register_meta(
[
aten._scaled_dot_product_efficient_attention_backward,
]
)
def meta__scaled_dot_product_efficient_backward(
grad_out: Tensor,
query: Tensor,
key: Tensor,
value: Tensor,
out: Tensor,
logsumexp: Tensor,
is_causal: bool = False,
chunk_grad_outputs=False,
):
grad_out = grad_out.transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
B = query.size(0)
M = query.size(1)
N = key.size(1)
nH = query.size(2)
K = query.size(3)
grad_kv_needs_init = is_causal and N > M
if chunk_grad_outputs:
chunk = torch.empty((B, M, 3, nH, K), dtype=query.dtype, device=query.device)
grad_q = chunk.select(2, 0)
grad_k = chunk.select(2, 1)
grad_v = chunk.select(2, 2)
else:
grad_q = torch.empty(query.shape, dtype=query.dtype, device=query.device)
grad_k = (
torch.zeros(key.shape, dtype=key.dtype, device=key.device)
if grad_kv_needs_init
else torch.empty(key.shape, dtype=key.dtype, device=key.device)
)
grad_v = (
torch.zeros(value.shape, dtype=value.dtype, device=value.device)
if grad_kv_needs_init
else torch.empty(value.shape, dtype=value.dtype, device=value.device)
)
return grad_q.transpose(1, 2), grad_k.transpose(1, 2), grad_v.transpose(1, 2)
@register_meta([aten.scatter_reduce.two, aten.scatter_reduce.two_out])
@out_wrapper()
def meta_scatter_reduce_two(self, dim, index, src, reduce, include_self=True):
scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
return self.new_empty(self.shape)
@register_meta(aten.scatter_reduce_.two)
def meta_scatter_reduce__two(self, dim, index, src, reduce, include_self=True):
scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
return self
def multiply_integers(vs):
r = 1
for v in vs:
r *= v
return r
def upsample_common_check(input_size, output_size, num_spatial_dims):
check(
len(output_size) == num_spatial_dims,
lambda: f"It is expected output_size equals to {num_spatial_dims}, but got size {len(output_size)}",
)
expected_input_dims = num_spatial_dims + 2 # N, C, ...
check(
len(input_size) == expected_input_dims,
lambda: f"It is expected input_size equals to {expected_input_dims}, but got size {len(input_size)}",
)
check(
all([s > 0 for s in input_size[2:]]) and all([s > 0 for s in output_size]),
lambda: f"Input and output sizes should be greater than 0, but got "
f"input size {input_size} and output size {output_size}",
)
nbatch, channels = input_size[:2]
return (nbatch, channels, *output_size)
@register_meta(aten.upsample_nearest1d.default)
def upsample_nearest1d(input, output_size, scales=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 3D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=1
)
return input.new_empty(full_output_size).to(
memory_format=utils.suggest_memory_format(input)
)
@register_meta(aten.upsample_nearest2d.default)
def upsample_nearest2d(input, output_size, scales_h=None, scales_w=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=2
)
output = input.new_empty(full_output_size)
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(input)
# following "heuristic: only use channels_last path when it's faster than the contiguous path"
_, n_channels, _, _ = input.shape
if input.device.type == "cuda" and n_channels < 4:
memory_format = torch.contiguous_format
output = output.contiguous(memory_format=memory_format)
return output
@register_meta(aten.upsample_nearest3d.default)
def upsample_nearest3d(input, output_size, scales_d=None, scales_h=None, scales_w=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 5D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=3
)
return input.new_empty(full_output_size).to(
memory_format=utils.suggest_memory_format(input)
)
@register_meta([aten.sort.default, aten.sort.stable])
def meta_sort(self, stable=None, dim=-1, descending=False):
return torch.empty_like(self), torch.empty_like(self, dtype=torch.int64)
def rnn_cell_checkSizes(
input_gates, hidden_gates, input_bias, hidden_bias, factor, prev_hidden
):
check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2")
check(
input_gates.shape == hidden_gates.shape,
lambda: f"{input_gates.shape} != {hidden_gates.shape}",
)
gates_size = input_gates.size(1)
if input_bias is not None:
check(input_bias.ndim == 1, lambda: f"{input_bias.ndim} != 1")
check(
input_bias.numel() == gates_size,
lambda: f"{input_bias.numel()} != {gates_size}",
)
check(
input_bias.shape == hidden_bias.shape,
lambda: f"{input_bias.shape} != {hidden_bias.shape}",
)
check(prev_hidden.ndim == 2, lambda: f"{prev_hidden.ndim} != 2")
expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor
check(
prev_hidden.numel() == expected_prev_hidden_numel,
lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected_prev_hidden_numel})",
)
check(
all(
x.device == input_gates.device
for x in [hidden_gates, input_bias, hidden_bias, prev_hidden]
),
lambda: "expected all inputs to be same device",
)
@register_meta(aten._thnn_fused_lstm_cell.default)
def _thnn_fused_lstm_cell_meta(
input_gates, hidden_gates, cx, input_bias=None, hidden_bias=None
):
rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx)
workspace = torch.empty_like(input_gates, memory_format=torch.contiguous_format)
hy = torch.empty_like(cx, memory_format=torch.contiguous_format)
cy = torch.empty_like(cx, memory_format=torch.contiguous_format)
return (hy, cy, workspace)
@register_meta(aten._cudnn_rnn.default)
def _cudnn_rnn(
input,
weight,
weight_stride0,
weight_buf,
hx,
cx,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
):
is_input_packed = len(batch_sizes) != 0
if is_input_packed:
seq_length = len(batch_sizes)
mini_batch = batch_sizes[0]
batch_sizes_sum = input.shape[0]
else:
seq_length = input.shape[1] if batch_first else input.shape[0]
mini_batch = input.shape[0] if batch_first else input.shape[1]
batch_sizes_sum = -1
num_directions = 2 if bidirectional else 1
out_size = proj_size if proj_size != 0 else hidden_size
if is_input_packed:
out_shape = [batch_sizes_sum, out_size * num_directions]
else:
out_shape = (
[mini_batch, seq_length, out_size * num_directions]
if batch_first
else [seq_length, mini_batch, out_size * num_directions]
)
output = input.new_empty(out_shape)
cell_shape = [num_layers * num_directions, mini_batch, hidden_size]
if cx is None:
cy = torch.empty(0, device=input.device)
else:
cy = cx.new_empty(cell_shape)
hy = hx.new_empty([num_layers * num_directions, mini_batch, out_size])
# TODO: Query cudnnGetRNNTrainingReserveSize (expose to python)
reserve_shape = 0 if train else 0
reserve = input.new_empty(reserve_shape, dtype=torch.uint8)
return output, hy, cy, reserve, weight_buf
@register_meta(aten.mkldnn_rnn_layer.default)
def mkldnn_rnn_layer(
input,
w0,
w1,
w2,
w3,
hx_,
cx_,
reverse,
batch_sizes,
mode,
hidden_size,
num_layers,
has_biases,
bidirectional,
batch_first,
train,
):
seq_length = input.shape[1] if batch_first else input.shape[0]
mini_batch = input.shape[0] if batch_first else input.shape[1]
output_chanels = hidden_size
out_shape = (
[mini_batch, seq_length, output_chanels]
if batch_first
else [seq_length, mini_batch, output_chanels]
)
output = input.new_empty(out_shape)
if hx_ is None:
hy = torch.empty(0, device=input.device)
else:
hy = hx_.new_empty(hx_.shape)
if cx_ is None:
cy = torch.empty(0, device=input.device)
else:
cy = cx_.new_empty(cx_.shape)
workspace = torch.empty(0, device=input.device, dtype=torch.uint8)
return output, hy, cy, workspace
def zero_numel_check_dims(self, dim, fn_name):
if self.ndim == 0:
check(
dim == 0 or dim == -1,
lambda: f"{fn_name}: Expected reduction dim -1 or 0 for scalar but got {dim}",
IndexError,
)
else:
check(
self.size(dim) != 0,
lambda: f"{fn_name}: Expected reduction dim {dim} to have non-zero size.",
IndexError,
)
# From aten/src/ATen/native/ReduceOps.cpp
def check_argmax_argmin(name, self, dim):
if dim is not None:
dim = maybe_wrap_dim(dim, self.dim())
zero_numel_check_dims(self, dim, name)
else:
check(
self.numel() != 0,
lambda: f"{name}: Expected reduction dim to be specified for input.numel() == 0.",
)
@register_meta([aten.argmax.default, aten.argmin.default])
def argmax_argmin_meta(self, dim=None, keepdim=False):
check_argmax_argmin("argmax", self, dim)
dims = utils.reduction_dims(self.shape, (dim,) if dim is not None else None)
shape = _compute_reduction_shape(self, dims, keepdim)
return self.new_empty(shape, dtype=torch.int64)
@register_meta(aten.scalar_tensor.default)
def scalar_tensor(s, dtype=None, layout=None, device=None, pin_memory=None):
return torch.empty(
(), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.topk.default)
def topk_meta(self, k, dim=-1, largest=True, sorted=True):
# From aten/src/ATen/native/Sorting.cpp
dim = maybe_wrap_dim(dim, self.dim(), wrap_scalar=True)
check(
k >= 0 and k <= (self.size(dim) if self.dim() > 0 else 1),
lambda: "selected index k out of range",
)
sliceSize = 1 if self.dim() == 0 else self.size(dim)
check(k >= 0 and k <= sliceSize, lambda: "k not in range for dimension")
topKSize = list(self.shape)
if len(topKSize) > 0:
topKSize[dim] = k
return self.new_empty(topKSize), self.new_empty(topKSize, dtype=torch.int64)
legacy_contiguous_memory_format = torch.contiguous_format
# From aten/src/ATen/native/cuda/RNN.cu
def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace):
defined_grad = grad_hy if grad_hy is not None else grad_cy
check(defined_grad.dim() == 2, lambda: "")
exp_size = defined_grad.size()
if grad_hy is not None:
check(grad_hy.size() == exp_size, lambda: "")
if grad_cy is not None:
check(grad_cy.size() == exp_size, lambda: "")
check(cx.size() == exp_size, lambda: "")
check(cy.size() == exp_size, lambda: "")
check(workspace.dim() == 2, lambda: "")
check(workspace.numel() == exp_size[0] * exp_size[1] * 4, lambda: "")
# From aten/src/ATen/native/cuda/RNN.cu
@register_meta(aten._thnn_fused_lstm_cell_backward_impl.default)
def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias):
if grad_hy is None and grad_cy is None:
return None, None, None
checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
grad_gates = torch.empty_like(
workspace, memory_format=legacy_contiguous_memory_format
)
grad_cx = torch.empty_like(cx, memory_format=legacy_contiguous_memory_format)
grad_bias = grad_gates.sum(0, keepdim=False) if has_bias else None
return grad_gates, grad_cx, grad_bias
@register_meta(aten.pixel_shuffle.default)
def meta_pixel_shuffle(self, upscale_factor):
assert (
len(self.shape) > 2 and self.shape[-3] % (upscale_factor * upscale_factor) == 0
), f"Invalid input shape for pixel_shuffle: {self.shape} with upscale_factor = {upscale_factor}"
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
def pick_memory_format():
if is_channels_last(self):
if device_hint(self) == "cuda":
return torch.contiguous_format
else:
return torch.channels_last
elif self.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif self.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
C = self.shape[-3] // (upscale_factor * upscale_factor)
Hr = self.shape[-2] * upscale_factor
Wr = self.shape[-1] * upscale_factor
out_shape = (*self.shape[:-3], C, Hr, Wr)
out = self.new_empty(out_shape)
out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
return out
@register_meta(aten.mkldnn_rnn_layer_backward.default)
def mkldnn_rnn_layer_backward(
input,
weight0,
weight1,
weight2,
weight3,
hx_,
cx_tmp,
output,
hy_,
cy_,
grad_output_r_opt,
grad_hy_r_opt,
grad_cy_r_opt,
reverse,
mode,
hidden_size,
num_layers,
has_biases,
train,
bidirectional,
batch_sizes,
batch_first,
workspace,
):
diff_x = input.new_empty(input.shape)
diff_hx = hx_.new_empty(hx_.shape)
diff_cx = cx_tmp.new_empty(cx_tmp.shape)
diff_w1 = weight0.new_empty(weight0.shape)
diff_w2 = weight1.new_empty(weight1.shape)
diff_b = weight2.new_empty(weight2.shape)
return diff_x, diff_w1, diff_w2, diff_b, diff_b, diff_hx, diff_cx
# We must also trigger meta registrations from PrimTorch ref
# decompositions
import torch._refs
import torch._refs.nn.functional
import torch._refs.special
def activate_meta():
activate_meta_table = {}
# For a given op, we pick the most specific decomp function from
# global_decomp_table in the precedence order of meta > post_autograd > pre_autograd
for type in ["meta", "post_autograd", "pre_autograd"]:
registry = global_decomposition_table[type]
for opo in registry:
if opo not in activate_meta_table:
activate_meta_table[opo] = registry[opo]
for op_overload, fn in activate_meta_table.items():
assert isinstance(op_overload, OpOverload)
op_overload.py_impl(torch._C.DispatchKey.Meta)(fn)
if torch._C._dispatch_has_kernel_for_dispatch_key(
op_overload.name(), "CompositeImplicitAutograd"
):
# Internally, we shouldn't be registering meta kernels for any operators that
# have CompositeImplicitAutograd kernels.
# Instead, we should be letting those decompositions run, and writing meta kernels
# only for the base operators.
if op_overload in global_decomposition_table["meta"]:
raise RuntimeError(
f"{op_overload} is a CompositeImplicitAutograd op, we shouldn't "
"register meta function for it. Instead, we should let the decomposition run and write "
"meta kernels for the base operators."
)
pass
elif op_overload.is_view:
# Attempting to register a python meta kernel for a view operator.
# We shouldn't do this, because the output will report as not having aliased storages.
# All view ops have meta kernels in C++ today, so we should use those instead.
pass
elif op_overload.name() in {
"aten::empty_strided", # causing infinite recursion, test_meta.py
"aten::clone", # causing infinite recursion
"aten::_to_copy", # causing infinite recursion, test_serialization.py -k test_tensor_subclass_getstate_overwrite # noqa: B950
"aten::copy_", # Exception not raised, test_torch.py -k test_storage_meta_errors_cpu_int64 # noqa: B950
"aten::constant_pad_nd", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_amp_istft_cuda_float32 # noqa: B950
"aten::rot90", # requires_grad mismatch! test_ops.py -k test_fake_crossref_backward_amp_rot90_cuda_float32 # noqa: B950
"aten::as_strided_scatter", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_no_amp_as_strided_scatter_cuda_float32 # noqa: B950
}:
pass
else:
if "mkldnn::" in op_overload.name():
_meta_lib_dont_use_me_use_register_meta_for_mkldnn.impl(op_overload, fn)
elif "mkl::" in op_overload.name():
_meta_lib_dont_use_me_use_register_meta_for_mkl.impl(op_overload, fn)
else:
_meta_lib_dont_use_me_use_register_meta.impl(op_overload, fn)
activate_meta()
| [
"[email protected]"
] | |
8eda4c8d2fd5781128748cfa3f14c23c06229fc3 | 10e19b5cfd59208c1b754fea38c34cc1fb14fdbe | /desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/project/ignored/this_wont_normally_be_here.py | f26ddee1f7972ffe1050d7bb17ab8f960c38096a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | sarvex/hue | 780d28d032edd810d04e83f588617d1630ec2bef | 6e75f0c4da2f3231e19c57bdedd57fb5a935670d | refs/heads/master | 2023-08-15T21:39:16.171556 | 2023-05-01T08:37:43 | 2023-05-01T08:37:43 | 32,574,366 | 0 | 0 | Apache-2.0 | 2023-09-14T16:55:28 | 2015-03-20T09:18:18 | Python | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
# This file won't normally be in this directory.
# It IS only for tests
from gettext import ngettext
def foo():
# Note: This will have the TRANSLATOR: tag but shouldn't
# be included on the extracted stuff
print ngettext('FooBar', 'FooBars', 1)
| [
"[email protected]"
] | |
1621790e8faa136dc64077fdd7cd47ca87f200ae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/85/usersdata/228/54502/submittedfiles/funcoes1.py | 881c334b3cea4bdaa4890004b8352ae9eab83fdf | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # -*- coding: utf-8 -*-
n=int(input('digite o número de elementos:'))
lista1=[]
lista2=[]
lista3=[]
for i in range(0,n,1):
elemento1=int(input('digite o elemento:'))
lista1.append(elemento1)
elemento2=int(input('digite o elemento:'))
lista2.append(elemento2)
elemento3=int(input('digite o elemento:'))
lista3.append(elemento3)
def crescente(a):
for i in range(0,len(a),1):
if a[i]<a[i+1]:
return True
else:
return False
def decrescente(a):
for i in range(0,len(a),1):
if a[i]>a[i+1]:
return True
else:
return False
def elementosiguais(a):
for i in range(0,len(a),1):
if a[i]==a[i+1]:
return True
else:
return False
if crescent(lista1):
print('S')
if crescent(lista1)==False:
print('N')
if decrescente(lista1):
print('S')
if decrescente(lista1)==False:
print('N')
if elementosiguais(lista1):
print('S')
if elementosiguais(lista1)==False:
print('N')
if crescent(lista2):
print('S')
if crescent(lista2)==False:
print('N')
if decrescente(lista2):
print('S')
if decrescente(lista2)==False:
print('N')
if elementosiguais(lista2):
print('S')
if elementosiguais(lista2)==False:
print('N')
if crescent(lista3):
print('S')
if crescent(lista3)==False:
print('N')
if decrescente(lista3):
print('S')
if decrescente(lista3)==False:
print('N')
if elementosiguais(lista3):
print('S')
if elementosiguais(lista3)==False:
print('N')
| [
"[email protected]"
] | |
ea81a3f2769fe2186891c4edce86d5f3c483d4e5 | 940622a48cc8711a39dd7f36122bae1e25ee2fcc | /QuestionTime/QuestionTime/urls.py | a68ebcf6bfba297eff05f5c23e941b75964ca7f5 | [] | no_license | merveealpay/django-vue-question-app | 144d1f9b49cd1f0cbd91820c2c11cc42ff95a09d | f12c88bdbfcac685b7098145370e13be935c8d8f | refs/heads/main | 2023-02-05T12:58:28.651036 | 2020-12-27T18:05:35 | 2020-12-27T18:05:35 | 319,586,207 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | """QuestionTime URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path, re_path
from django_registration.backends.one_step.views import RegistrationView
#look at django-registration documentation!!!
from core.views import IndexTemplateView
from users.forms import CustomUserForm
urlpatterns = [
path('admin/', admin.site.urls),
path("accounts/register/",
RegistrationView.as_view(
form_class=CustomUserForm,
success_url="/",
), name="django_registration_register"),
path("accounts/",
include("django_registration.backends.one_step.urls")),
path("accounts/",
include("django.contrib.auth.urls")),
path("api/",
include("users.api.urls")),
path("api/",
include("questions.api.urls")),
path("api-auth/",
include("rest_framework.urls")),
path("api/rest-auth/",
include("rest_auth.urls")),
path("api/rest-auth/registration/",
include("rest_auth.registration.urls")),
re_path(r"^.*$", IndexTemplateView.as_view(), name="entry-point")
]
| [
"[email protected]"
] | |
2317b9612a821152993d2c8d3d77909c6a5d504f | 69266a7696f5f8be7c78fd29ef68a7619e41d28d | /Tools/ComputeTool.py | 9353c424e8d6deac1c49914c31c6768d29dd1ec4 | [] | no_license | microelly2/PyFlowWWW | 52deb54deb2db668cd21e9ce251894baaa663823 | 0b3d0009494327b2ec34af9fbca2a5fee1fef4a4 | refs/heads/master | 2022-04-14T02:35:08.999370 | 2020-04-11T19:48:54 | 2020-04-11T19:48:54 | 254,876,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,464 | py | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera, microelly
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from PyFlow.UI.Tool.Tool import ShelfTool
from PyFlow.Core.Common import Direction
import FreeCADGui
from Qt import QtGui
from Qt.QtWidgets import QFileDialog
from nodeeditor.say import *
import sys
if sys.version_info[0] !=2:
from importlib import reload
import os
RESOURCES_DIR = os.path.dirname(os.path.realpath(__file__)) + "/res/"
class ComputeTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( ComputeTool, self).__init__()
@staticmethod
def toolTip():
return "call compute method for selected nodes"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "compute.png")
@staticmethod
def name():
return str("ComputeTool")
def do(self):
nodes=FreeCAD.PF.graphManager.get().getAllNodes()
nodes2 = sorted(nodes, key=lambda node: node.x)
say("selected Nodes ...")
for n in nodes2:
if n.getWrapper().isSelected():
say(n,n.x)
n.compute()
class DeleteTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( DeleteTool, self).__init__()
@staticmethod
def toolTip():
return "Delete the selected nodes"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "delete.png")
@staticmethod
def name():
return str("DeleteTool")
def do(self):
nodes=FreeCAD.PF.graphManager.get().getAllNodes()
nodes2 = sorted(nodes, key=lambda node: node.x)
say("selected Nodes ...")
for n in nodes2:
if n.getWrapper().isSelected():
say(n,n.x)
n.kill()
class ToyTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( ToyTool, self).__init__()
@staticmethod
def toolTip():
return "Toy for Developer"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "toy.png")
@staticmethod
def name():
return str("ToyTool")
def do(self):
import nodeeditor.dev
reload (nodeeditor.dev)
nodeeditor.dev.run_shelfToy(self)
class FreeCADTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( FreeCADTool, self).__init__()
@staticmethod
def toolTip():
return "FreeCAD mainWindow"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "freecad.png")
@staticmethod
def name():
return str("FreeCADTool")
def do(self):
mw=FreeCADGui.getMainWindow()
mw.hide()
mw.show()
def toollist():
return [
ComputeTool,
DeleteTool,
FreeCADTool,
ToyTool,
]
| [
"[email protected]"
] | |
2b63fb46758a1f007ae3ed5ce851d0c3a99bb6e0 | f5788e1e1d8522c0d4ae3b4668faa5537680cb07 | /mutual_sale_discount_total/__openerp__.py | 55acff682f3f211f074ab7660a836cc839f366de | [] | no_license | mutualSecurity/mutual-erp-residential | 8549e179af6df1ffceadf42369d69d4dd44f07ac | 88debefc662dd1510a1d52a877ede4673c319532 | refs/heads/master | 2021-11-11T13:33:37.878051 | 2021-11-02T10:14:49 | 2021-11-02T10:14:49 | 71,433,705 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | {
'name': 'Sale Discount on Total Amount',
'version': '1.0',
'category': 'sale',
'sequence': 6,
'summary': "Discount on total in Sale and invoice with Discount limit and approval",
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': 'http://www.cybrosys.com',
'description': """
Sale Discount for Total Amount
=======================
Module to manage discount on total amount in Sale.
as an specific amount or percentage
""",
'depends': ['sale','mutual_sales', 'base', 'stock','mutual_inventory','mutual_reports','mutual_followups','mutual_project','mutual_mass_editing'],
'data': [
'views/sale_view.xml',
'views/account_invoice_view.xml',
'views/invoice_report.xml',
'views/sale_order_report.xml',
'views/sale_discount_approval_view.xml',
'views/sale_discount_approval_workflow.xml'
],
'demo': [
],
'installable': True,
'auto_install': False,
}
| [
"[email protected]"
] | |
e72f4db6ed6a6653152baab96d0fa3235cbf675b | 9c13bffaf12c83b049375cf24e12183fcab3a2aa | /venv/lib/python3.6/site-packages/pip/_vendor/requests/sessions.py | a8e60f360279eb602a4e07bb27447e0a0d22f3b3 | [] | no_license | brielino/SDCCTestAnsible | b702d48c934c8bde9638ceba3b27fabf9dd40071 | 857f66860de2ad889455789b60a162506d3125a1 | refs/heads/master | 2022-12-09T13:15:29.030558 | 2020-09-12T14:51:31 | 2020-09-12T14:51:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,310 | py | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import sys
import time
from datetime import timedelta
from collections import OrderedDict
from .auth import _basic_auth_str
from .compat import cookielib, is_py3, urljoin, urlparse, Mapping
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
# Preferred clock, based on which one is more accurate on a given system.
if sys.platform == 'win32':
try: # Python 3.4+
preferred_clock = time.perf_counter
except AttributeError: # Earlier than Python 3.
preferred_clock = time.clock
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting"""
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (not changed_scheme and old_parsed.port in default_port
and new_parsed.port in default_port):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = ':'.join([to_native_string(parsed_rurl.scheme), url])
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/psf/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/psf/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
headers.pop('Cookie', None)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
# If we get redirected to a new host, we should strip out any
# authentication headers.
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and not bypass_proxy:
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
... s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the src to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the src's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Resolve redirects if allowed.
if allow_redirects:
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
history = [resp for resp in gen]
else:
history = []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for {!r}".format(url))
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
| [
"[email protected]"
] | |
d5672859a1c11baa0302a06e15050c61a8db266f | 70ac291bcf11d8452c6b1ade5fbadd0003d9e613 | /machine_learning_机器学习/准确率(Accuracy)、精确率(Precision)、召回率(Recall)、F值(F-Measure)等评估指标的计算.py | 8c51fb9bad3621e3c8e70198ca29c52253849f25 | [] | no_license | SnowWhiteZ/hello-world | 10cc1faf508340f835fffbf3c587101e3e0e78a5 | e43793b413016eb2f52b40990a8f1b493d29c983 | refs/heads/master | 2022-03-12T22:10:42.163091 | 2019-12-02T09:58:15 | 2019-12-02T09:58:15 | 225,376,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,456 | py | #!/usr/bin/python3
# coding: utf-8
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, fbeta_score, precision_score, recall_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
# 真实标签
y_true = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
# 模型预测结果
y_test = [0.8453712241207609, 0.8365137845084419, 0.8396024690959464, 0.8690716625950063, 0.801398983655787, 0.8353417405844167, 0.8887589815396711, 0.8274617726584338, 0.8901324702288052, 0.8515827665762914, 0.8008748432690203, 0.9129143613344268, 0.8213637332093631, 0.7926672650384551, 0.8715962551942291, 0.865989576549353, 0.8487118383625984, 0.893722366823937, 0.8683798090835637, 0.8258107838161615, 0.9067962552630583, 0.8896577622207299, 0.8287242449131549, 0.862162050742874, 0.9145984088092137, 0.8195240228832353, 0.8627208683955114, 0.8667420865435141, 0.833175478131922, 0.8338735760735464, 0.8609573544733866, 0.8270040835455006, 0.8438342928159803, 0.9162216060491829, 0.8681943043237748, 0.825237777063406, 0.9309199493779501, 0.847918698600505, 0.885842165942269, 0.845606331185933, 0.8867428557974891, 0.8569372316111383, 0.8374900840504085, 0.8495098728280119, 0.8475137546498668, 0.8509974354378016, 0.8545542968912262, 0.8369359268265817, 0.8881628216627452, 0.8553054247582024, 0.8715475068300871, 0.8608489638331329, 0.7871896522021451, 0.7986180814516614, 0.8679817198115483, 0.8555312604259576, 0.8737131993516944, 0.8570307159808236, 0.86943760267903, 0.8155454038368009, 0.8284627670247386, 0.7440460226630737, 0.8383901711678877, 0.9176876584197461, 0.8867356968591616, 0.8800298236584221, 0.8534696245512979, 0.9166524864925935, 0.8205450625187547, 0.8235830983361883, 0.8610359125511253, 0.8534495672661243, 0.8343550724006359, 0.826657313239454, 0.8327557274202153, 0.8263809690050867, 0.8449533999089178, 0.7403854533869694, 0.8862881836134406, 0.80930312554624, 0.8390349727384677, 0.7812820207595776, 0.8405256568966404, 0.7208619973606759, 0.8237972236612818, 0.8652031422452744, 0.7788070757633151, 0.8795942431527423, 0.8603826742129177, 0.83330392945359, 0.8487413534443429, 0.8085704307615089, 0.8862416492592033, 0.8154708608934949, 0.8949611666064037, 0.8189329260750865, 0.8328395987596068, 0.9158502403398057, 0.8066900361300818, 0.9277331317048729]
thre = 0.874 # 随机定义一个阈值
tp = 0 # 正真
tn = 0 # 真负
fp = 0 # 假正
fn = 0 # 假负
for t4, t5 in zip(y_true, y_test):
if t4 == 1 and t5 >= thre:
tp += 1
elif t4 == 1:
fn += 1
elif t4 == 0 and t5 < thre:
tn += 1
else:
fp += 1
data = {
"真正": tp,
"真负": tn,
"假正": fp,
"假负": fn
}
print("混淆矩阵数据:", data)
p = tp / (tp + fp ) # 精确率,预测为正的样本中有多少是真正的正样本
r = tp / (tp + fn ) # 召回率,样本中的正例有多少被预测正确了
acc = (tp + tn) / (tp + tn + fp + fn) # 准确率,被分对的样本数除以所有的样本数
f1 = 2 * p * r / (p + r )
beta = 2
# (1 + β × β) × P × R
# Fβ = ──────────────────────
# (β × β) × P + R
f2 = (1+beta*beta) * p * r / (beta*beta*p+r)
data2 = {
"准确率": acc,
"精确率": p,
"召回率": r,
"f1值": f1,
"f2值": f2,
}
print('通过精确率,召回率计算的结果:', data2)
# auc
auc = roc_auc_score(y_true, y_test)
# 精确率
p = precision_score(y_true, np.array(y_test)>thre)
# 召回率
r = recall_score(y_true, np.array(y_test) > thre)
# acc
acc = accuracy_score(y_true, np.array(y_test) > thre)
f1 = f1_score(y_true, np.array(y_test) > thre)
f2 = fbeta_score(y_true, np.array(y_test) > thre, beta=2)
data3 = {
"准确率": acc,
"ROC曲线下面积": auc,
"f1值": f1,
"f2值": f2,
"精确率": p,
"召回率": r,
}
print('通过sklearn计算的结果:', data3)
y_true = [0, 1, 2, 2, 2]
y_test = [0, 0, 2, 2, 1]
target_names = ['class 0', 'class 1', 'class 2']
print(classification_report(y_true, y_test, target_names=target_names))
def main():
pass
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
e3f5bdca5d9a2bf0d000ba393a7b25ae175ccf9a | 63f8b7a3c3b5ab4c67f3ec6c60c3c327245afe66 | /experiments/scripts/compare_throughput.py | 3e3109181a11913d7287b510eae2e8bd42115c33 | [] | no_license | DanielTakeshi/dqn | 719da28568963f1b2ba041652e32a3d2a62ec191 | 6f9dc0d8aedb1319fd5333295e6561027c68bab2 | refs/heads/main | 2021-01-13T01:48:38.235101 | 2020-11-11T01:35:45 | 2020-11-11T01:35:45 | 311,830,436 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,967 | py | """This combines a bunch of learning curves for all the games.
For bar charts, see `combine_student_results.py`.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import offsetbox
from matplotlib.ticker import FuncFormatter
import argparse, csv, math, os, pickle, sys, inspect, json
from os.path import join
import numpy as np
import pandas as pd
from dqn import common_config as cfg
from collections import defaultdict
import utils as U
plt.style.use('seaborn-darkgrid')
sns.set_style("darkgrid")
np.set_printoptions(linewidth=180, edgeitems=10)
# ------------------------------------------------------------------------------
# matplotlib stuff
# ------------------------------------------------------------------------------
titlesize = 53
xsize = 42
ysize = 42
ticksize = 42
legendsize = 48
scolors = ['gold', 'red', 'blue', 'purple', 'silver', 'orange']
tcolor = 'black'
error_region_alpha = 0.25
bwidth = 0.3
slw = 7
# ------------------------------------------------------------------------------
CONST = 1e6
LEN_REWARDS = 596 # this should be the length ...
def scale_steps(x):
x = np.array(x) / CONST
return x
def get_info(exp_path, w=100):
"""Gather information, in a similar manner as scripts/quick_student.py.
"""
title = U.get_title(exp_path)
summary_train = U.load_summary_data(exp_path, train=True)
s_steps, s_info = U.load_info_data(exp_path)
s_reward = s_info['true_avg_rew'].values
# Ah this is going to be a bit annoying but w/e, b/c one of Pong failed.
if '_pong_snapshot_2019-08-22-21-57_s64329' in exp_path:
print(' At the problematic: _pong_snapshot_2019-08-22-21-57_s64329')
print(' That one exited early due to Pong-specific stuff.')
s_steps = np.array([(x*10000+50000) for x in range(LEN_REWARDS)])
tmp = np.ones((LEN_REWARDS,)) * s_reward[-1]
for i in range(len(s_reward)):
tmp[i] = s_reward[i]
s_reward = tmp
s_steps = scale_steps(s_steps)
assert len(s_steps) == len(s_reward)
# Get the teacher info. Load teacher model, load path, then plot data. Be
# careful we are allowed to do this 'substitution' to get the expert data.
with open(join(exp_path,'params.txt'), 'r') as f:
params = json.load(f)
teacher_models = params['teacher']['models']
assert len(teacher_models) == 1, \
"assume len(teacher_models) = 1, {}".format(len(teacher_models))
s_last = os.path.basename(os.path.normpath(exp_path))
t_last = os.path.basename(os.path.normpath(teacher_models[0]))
teacher_path = exp_path.replace(s_last, t_last)
teacher_path = teacher_path.replace('students/', 'teachers/')
teacher_title = U.get_title(teacher_path)
# CANNOT DO THIS FOR EARLIER RUNS, dating back to before the summer, I think.
#t_steps, t_info = U.load_info_data(teacher_path)
# AH, we did not record 'true_avg_rew' in the teacher ... ugh. So for this
# just read the root file and parse like I do here. That gives us the same
# values that I use for the 'true_avg_rew' key.
t_steps = []
t_reward = []
teacher_root_file = join(teacher_path, 'root.log')
with open(teacher_root_file, 'r') as f:
for line in f:
if 'completed' in line and '**********' in line and 'steps' in line:
linesp = line.split()
assert linesp[0] == '**********', linesp
assert linesp[2] == 'steps', linesp
steps = int(linesp[1])
t_steps.append(steps)
if 'Last 100 results: avg' in line:
linesp = line.split()
assert linesp[0] == 'Last', linesp
assert linesp[1] == '100', linesp
assert linesp[2] == 'results:', linesp
assert linesp[3] == 'avg', linesp
assert ',' in linesp[4], linesp
rew = float(linesp[4].strip(','))
t_reward.append(rew)
t_steps = scale_steps(t_steps)
assert len(t_steps) == len(t_reward)
# More annoying stuff ...
if len(s_steps) > LEN_REWARDS:
print('for {}, len(s_steps) = {} so chopping to {}'.format(
exp_path, len(s_steps), LEN_REWARDS))
s_steps = s_steps[:LEN_REWARDS]
s_reward = s_reward[:LEN_REWARDS]
if len(t_steps) > LEN_REWARDS:
print('for {}, len(t_steps) = {} so chopping to {}'.format(
exp_path, len(t_steps), LEN_REWARDS))
t_steps = t_steps[:LEN_REWARDS]
t_reward = t_reward[:LEN_REWARDS]
assert len(s_steps) == LEN_REWARDS, len(s_steps)
assert len(s_reward) == LEN_REWARDS, len(s_reward)
assert len(t_steps) == LEN_REWARDS, len(t_steps)
assert len(t_reward) == LEN_REWARDS, len(t_reward)
t_lambda = params['teacher']['supervise_loss']['lambda']
t_condense = params['teacher']['condense_freq']
t_overlap_m = params['teacher']['overlap']['match_method']
if t_overlap_m == 'train_net':
t_overlap_p = params['teacher']['overlap']['overlap_target']
elif t_overlap_m == 'fixed_steps':
t_overlap_p = str(params['teacher']['num_snapshot_ahead']).zfill(2)
assert t_condense == 5, t_condense
else:
raise ValueError(t_overlap_m)
# For now
if 'beamrider' in s_last.lower() or 'pong' in s_last.lower() or \
'robotank' in s_last.lower():
assert t_lambda == 0.01, '{}, {}'.format(t_lambda, s_last)
else:
assert t_lambda == 0.1, '{}, {}'.format(t_lambda, s_last)
result = {
'game_name': U.get_game_name(s_last),
'overlap_param': t_overlap_p,
'match_method': t_overlap_m,
'supervise_lambda': t_lambda,
'student_rew': s_reward, # student reward every 10k steps (starts @ 50k)
'teacher_rew': t_reward, # teacher reward every 10k steps (starts @ 50k)
'student_steps': s_steps, # should be same among all trials but save anyway
'teacher_steps': t_steps, # should be same among all trials but save anyway
'mb_start': params['teacher']['blend']['start'],
'mb_end': params['teacher']['blend']['end'],
'train_freq': params['train']['train_freq_per_step'],
}
return result
def _get_array(list_of_items):
nb = len(list_of_items)
lengths = [len(x) for x in list_of_items]
if len(lengths) > 1 and np.std(lengths) > 0:
print('Error with lengths: {}'.format(lengths))
sys.exit()
return np.array(list_of_items)
def _info_for_plots(stats, t_stats, target_num_trials=2):
"""Go through and collect data for one experimental condition.
Calling this method several times means we should be able to compare many
different settings. Unlike earlier, game_info (and t_stats) needs to have
the x coordinates, since we're doing full learning curves.
Returns a list that has all the game stats we want. It should be a list
with ONE ITEM PER GAME, so a length 9 list here!
"""
all_game_stats = []
game_idx = 0
print('\n\n\t\tNEW GAME: {}'.format(U.GAMES[game_idx]))
game_info = {} # For each game, collect stats, put in `all_game_stats`.
for key in sorted(stats.keys()):
game = U.GAMES[game_idx]
if game.lower() not in key:
game_idx += 1
game = U.GAMES[game_idx]
print('\n\n\t\tNEW GAME: {}'.format(game))
# Add the previously accumulated states to the game_stats.
all_game_stats.append(game_info)
game_info = {}
num_trials = len(stats[key])
print('\n{} len(stats[key]): {}'.format(key, num_trials))
s_rews = _get_array([x['student_rew'] for x in stats[key]])
t_rews = _get_array([x['teacher_rew'] for x in stats[key]])
print('student/teacher rewards: {} {}'.format(s_rews.shape, t_rews.shape))
#print('std(student): {}'.format(np.std(s_rews, axis=0)))
#print('std(teacher): {}'.format(np.std(t_rews, axis=0)))
assert np.max( np.abs(np.std(t_rews,axis=0)) ) < 0.001, \
'We are using the same teacher, right? The StDev should be zero.'
assert num_trials == s_rews.shape[0] == t_rews.shape[0], num_trials
# Let's not do this in case we want to plot standard deviation
#s_rews = np.mean(s_rews, axis=0)
# Eh this could easily be a global list since all the games use the
# same number of steps (thus far) but this may give us flexibility later.
s_steps = np.mean(_get_array([x['student_steps'] for x in stats[key]]), axis=0)
t_steps = np.mean(_get_array([x['teacher_steps'] for x in stats[key]]), axis=0)
# Add teacher stats, should match for all in this loop so we just do once.
t_rews = np.mean(t_rews, axis=0)
if len(t_stats[game]) == 0:
t_stats[game].append( (t_steps,t_rews) )
# Only want student samples for statistics that we will actually be using.
info = key.split('__')
if info[1] == 'fixed_steps':
#assert num_trials == args.num_trials, num_trials
if num_trials != target_num_trials:
print('WARNING! we have {} trials, but should have {}'.format(
num_trials, target_num_trials))
num_ahead = info[2]
game_info[num_ahead] = (s_steps,s_rews)
elif info[1] == 'train_net':
continue
else:
raise ValueError(info)
# Add last game.
all_game_stats.append(game_info)
print('\n\nDone printing, len all games: {}'.format(len(all_game_stats)))
assert len(all_game_stats) == len(U.GAMES) == len(U.G_INDS_FAT)
return all_game_stats
def report_combined_stats(stats_3, stats_4, args, w=100):
"""Report combined stats, ideally for a plot.
:param stats: dict, with key --> list, where the list has one item per
random seed. This helps us combine results more easily.
"""
# Increase factor to `nrows` to make plot 'taller'.
nrows = 2
ncols = 5
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharex=False,
figsize=(11*ncols,8*nrows))
#gridspec_kw={'height_ratios': [5,5,5,1]})
INDICES = U.G_INDS_FAT
# Teacher data for plots later.
t_stats_3 = defaultdict(list)
t_stats_4 = defaultdict(list)
# Do what I did earlier, except for BOTH of the stats here. Yeah !!
print('\n*************************************************')
print('COLLECTING DATA FROM FIRST EXPERIMENTAL CONDITION')
print('*************************************************\n')
all_game_stats_3 = _info_for_plots(stats=stats_3, t_stats=t_stats_3)
print('\n*************************************************')
print('COLLECTING DATA FROM FIRST EXPERIMENTAL CONDITION')
print('*************************************************\n')
all_game_stats_4 = _info_for_plots(stats=stats_4, t_stats=t_stats_4)
# --------------------------------------------------------------------------
# Plot experiment condition 3 and 4 on the same plot. The shape of `s_y`
# here, i.e., the reward, is (num_trials, num_recorded) so we could do that
# as standard deviation, but might be noisy ... also these ALREADY include
# an implicit smoothing over the past 100 episodes.
# --------------------------------------------------------------------------
def _plot(r, c, key, s_stats_3, s_stats_4, color, label, force_color=False,
std_curves=False):
# Case 1, try to plot everything together w/same color codes:
if False:
s_x, s_y = s_stats_3[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, ls='--', lw=slw, color=color, label=label+', 4:1')
s_x, s_y = s_stats_4[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=color, label=label+', 2:1')
# Case 2, try to use standard deviations?
if True:
if force_color:
cc = 'gold'
else:
cc = 'blue'
s_x, s_y = s_stats_3[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=cc, label=label+', 4:1')
if std_curves:
ax[r,c].fill_between(s_x,
s_y+np.std(s_y, axis=0),
s_y-np.std(s_y, axis=0),
color=cc,
alpha=error_region_alpha)
if force_color:
cc = 'orange'
else:
cc = 'red'
s_x, s_y = s_stats_4[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=cc, label=label+', 2:1')
if std_curves:
ax[r,c].fill_between(s_x,
s_y+np.std(s_y, axis=0),
s_y-np.std(s_y, axis=0),
color=cc,
alpha=error_region_alpha)
# --------------------------------------------------------------------------
# Now go through this again, same logic, except plot. Alphabetical order
# from top row, w/one for legend to apply to subplots.
# --------------------------------------------------------------------------
for game, (r,c) in zip(U.GAMES, INDICES):
ax[r,c].set_title('{}'.format(game), fontsize=titlesize)
idx = U.GAMES.index(game)
# Keys: ['-1', '-2', '00', '02', '05', '10'] where -1 and -2 are BA and RA.
print('\nKeys for s_stats_3, and then s_stats_4:')
s_stats_3 = all_game_stats_3[idx]
print(game, ': ', sorted(s_stats_3.keys()))
s_stats_4 = all_game_stats_4[idx]
print(game, ': ', sorted(s_stats_4.keys()))
# Just take first one b/c teacher stats should be the same. Actually
# wait maybe we don't need the teacher here? Think about it ...
t_x, t_y = t_stats_3[game][0]
if True:
ax[r,c].plot(t_x, t_y, lw=10, ls='--', color=tcolor, label='DDQN Teacher')
_t_x, _t_y = t_stats_4[game][0]
assert np.allclose(t_x, _t_x), '{} {}'.format(t_x, _t_x)
assert np.allclose(t_y, _t_y), '{} {}'.format(t_y, _t_y)
# --------------------------------------------------------------------------
# NOTE: adjust based on how many of the student 'keys' I want to post.
# Toggle which ones we want on/off. SAME COLOR CODE AS PRIOR FIGURE, if
# we are using all select functions. But we prob. don't need best
# ahead. Honestly it seems best just to let ONE be used at a time.
# --------------------------------------------------------------------------
if True:
key = '-1'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[0], label='S, Best Ahead',
force_color=True)
if False:
key = '-2'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[1], label='S, Rand Ahead')
if False:
key = '00'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[2], label='S, 0 Ahead')
if False:
key = '02'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[3], label='S, 2 Ahead')
if False:
key = '05'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[4], label='S, 5 Ahead')
if True:
key = '10'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[5], label='S, 10 Ahead')
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Bells and whistles
for r in range(nrows):
for c in range(ncols):
#leg = ax[r,c].legend(loc="best", ncol=2, prop={'size':legendsize})
#for legobj in leg.legendHandles:
# legobj.set_linewidth(5.0)
ax[r,c].tick_params(axis='x', labelsize=ticksize)
ax[r,c].tick_params(axis='y', labelsize=ticksize)
# I think it's better to share axes in the x direction to be
# consistent with steps, but doing so removes the axis ticks. This
# reverts it so we get the ticks on all the axis.
#ax[r,c].xaxis.set_tick_params(which='both', labelbottom=True)
# Put this on r=0, c=0, then hide it, just to get legend to appear.
ax[0,0].set_visible(False)
handles, labels = ax[1,1].get_legend_handles_labels()
# Location (0,0) is bottom left. Doing (0,1) is upper left but the text
# isn't visible (because `loc` is the lower left part of the legend).
fig.legend(handles, labels, loc=(0.005,0.500), prop={'size':legendsize})
# Finally, save!! Can't do `.[...].png` since overleaf complains.
plt.tight_layout()
figname = 'fig_throughput_student.png'.format()
plt.savefig(figname)
print("Just saved: {}".format(figname))
if __name__ == "__main__":
# --------------------------------------------------------------------------
# NOW WE ASSUME WE'RE COMPARING EXP's 3 AND 4.
# --------------------------------------------------------------------------
EXP_PATH = cfg.SNAPS_STUDENT
pp = argparse.ArgumentParser()
args = pp.parse_args()
args.num_trials_exp_3 = 2
args.num_trials_exp_4 = 2
# Iterate through all the *student* models.
dirs = sorted( [join(EXP_PATH,x) for x in os.listdir(EXP_PATH) \
if U._criteria_for_experiments_throughput(x,args)] )
print("Currently plotting with these models, one trained agent per file:")
stats_3 = defaultdict(list)
stats_4 = defaultdict(list)
for dd in dirs:
last_part = os.path.basename(os.path.normpath(dd))
if last_part in U.STUFF_TO_SKIP:
print(" skipping {} due to STUFF_TO_SKIP".format(last_part))
continue
print("\nAnalyzing: {}".format(dd))
info = get_info(dd)
key = '{}__{}__{}'.format(info['game_name'], info['match_method'],
info['overlap_param'])
mb = info['mb_start']
tf = info['train_freq']
mm = info['match_method']
# We only want experiments 3 and 4.
if mb == 0.50 and tf == 4 and mm != 'train_net':
stats_3[key].append(info)
elif mb == 0.50 and tf == 2 and mm != 'train_net':
stats_4[key].append(info)
else:
print(' skipping {}, mm,tf,mm: {}, {}, {}'.format(key, mb,tf,mm))
continue
print('\nNow going to report on all these stats.')
print(' len stats 3, 4 dicts: {} and {}'.format(len(stats_3), len(stats_4)))
print('')
report_combined_stats(stats_3, stats_4, args)
| [
"[email protected]"
] | |
c52d673bdcbfae703d470556fea4604762501224 | 91f2e23782b05aa1fb273f3170c50dc4185e8dc1 | /clif/pybind11/staging/virtual_funcs_basics_test.py | 6238c3144d5233fd2ad32b961ceef33c93be6b74 | [
"Apache-2.0"
] | permissive | anukaal/clif | 152fd58e575b90d626a300875aac71cdf69ec6a3 | 8ff675bf93599f4d4a4865376b441d8d0551fd54 | refs/heads/main | 2023-08-03T19:47:00.538660 | 2021-09-14T05:50:43 | 2021-09-30T01:00:14 | 406,238,691 | 0 | 0 | Apache-2.0 | 2021-09-14T05:39:04 | 2021-09-14T05:39:03 | null | UTF-8 | Python | false | false | 3,058 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.pybind11.staging.virtual_funcs_basics.
This file is a copy of clif/testing/python/virtual_funcs_basics_test.py.
"""
import unittest
from clif.pybind11.staging import virtual_funcs_basics
class B(virtual_funcs_basics.B):
def __init__(self):
virtual_funcs_basics.B.__init__(self)
self.c = -1
def set_c(self, v):
self.c = v
class K(virtual_funcs_basics.K):
def inc(self, n):
self.i += n
class L(virtual_funcs_basics.Q):
def __init__(self, max_len):
virtual_funcs_basics.Q.__init__(self)
self._q = []
self._max = max_len
def data(self):
return list(self._q)
def PossiblyPush(self, data):
if len(self._q) < self._max:
self._q.append(data)
return True
return False
class AbstractClassNonDefConstImpl(
virtual_funcs_basics.AbstractClassNonDefConst):
def DoSomething(self):
return self.a * self.b
class ClassNonDefConstImpl(virtual_funcs_basics.ClassNonDefConst):
def __init__(self, a, b):
super().__init__(a, b)
self.c = [1, 2, 3] # Must have a non-trivial container to enable gc.
# Remove self.invalidated after gaining (limited) access to invalidated ptr.
self.invalidated = False
def DoSomething(self):
return -1 if self.invalidated else self.a * self.b
class VirtualFuncsTest(unittest.TestCase):
def testInitConcreteClassWithVirtualMethods(self):
b = virtual_funcs_basics.B()
b.set_c(2)
self.assertEqual(b.c, 2)
c = virtual_funcs_basics.ClassNonDefConst(1, 2)
self.assertEqual(c.DoSomething(), 3)
def testBasicCall(self):
b = B()
b.set_c(2)
self.assertEqual(b.c, 2)
virtual_funcs_basics.Bset(b, 4)
self.assertEqual(b.c, 4)
def testVirtual(self):
self.assertEqual(virtual_funcs_basics.seq(K(), 2, 6), [0, 2, 4, 6])
abc_non_def_impl = AbstractClassNonDefConstImpl(4, 5)
self.assertEqual(abc_non_def_impl.DoSomething(), 20)
self.assertEqual(virtual_funcs_basics.DoSomething1(abc_non_def_impl), 20)
non_def_impl = ClassNonDefConstImpl(4, 5)
self.assertEqual(non_def_impl.DoSomething(), 20)
self.assertEqual(virtual_funcs_basics.DoSomething2(non_def_impl), 20)
def testVirtual2(self):
q = L(3)
self.assertEqual(virtual_funcs_basics.add_seq(q, 2, 6), 3)
self.assertEqual(q.data(), [0, 2, 4])
def testVirtualProperty(self):
c = virtual_funcs_basics.D()
c.pos_c = -1
self.assertEqual(c.pos_c, 1)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4c1785f655e01342cbdda1667b1a388889254f6b | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_pow3/trend_poly/cycle_7/ar_12/test_artificial_32_pow3_poly_7_12_100.py | 7f5a2931b6b59c48d8a1216fadd94ec7826eabbc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 7, transform = "pow3", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset); | [
"[email protected]"
] | |
9dc8e842d1c50ed74d1c5b4728ef47282db16f7c | cf43421567c1634abe1df885c6e185a180659708 | /Extract/common.py | cd7e1ac1c4a6b11a70a4290fe71d6d2217580e77 | [] | no_license | fabio-gz/ETL_newspaper | 4c5239892098840a730ecf3b58452054a50e914b | 7458701eab76821a1fd65f0821356b1e7924bc97 | refs/heads/master | 2023-01-11T05:01:39.773346 | 2020-11-16T22:10:57 | 2020-11-16T22:10:57 | 292,719,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # cargar yaml
import yaml
#global var
__config = None
def config():
global __config
if not __config:
with open('config.yml', mode='r') as f:
__config = yaml.safe_load(f)
return __config
| [
"[email protected]"
] | |
3cbc9bfba6c7cc7ac49325cfc8ffaf1622d354b1 | bdaed512916fcf96e5dc915538fe8598aeb2d3cf | /mcex/history/nphistory.py | f042a176b3e0f6b83b0c8e8c2c2c693ec6657ff1 | [] | no_license | jsalvatier/mcex | 9657cc2e8083f4e4dd013baaaceba08f9a48754e | 040f49bfd6eb467ef4d50d15de25033b1ba52c55 | refs/heads/master | 2021-06-18T19:02:07.055877 | 2017-01-22T01:10:01 | 2017-01-22T01:10:01 | 1,455,409 | 9 | 3 | null | 2012-06-21T18:07:36 | 2011-03-08T17:02:42 | Python | UTF-8 | Python | false | false | 954 | py | '''
Created on Mar 15, 2011
@author: jsalvatier
'''
import numpy as np
class NpHistory(object):
"""
encapsulates the recording of a process chain
"""
def __init__(self, max_draws):
self.max_draws = max_draws
self.samples = {}
self.nsamples = 0
def record(self, point):
"""
records the position of a chain at a certain point in time
"""
if self.nsamples < self.max_draws:
for var, value in point.iteritems():
try :
s = self.samples[var]
except:
s = np.empty((self.max_draws,) + value.shape)
self.samples[var] = s
s[self.nsamples,...] = value
self.nsamples += 1
else :
raise ValueError('out of space!')
def __getitem__(self, key):
return self.samples[key][0:self.nsamples,...] | [
"[email protected]"
] | |
665fa4ba03e6c225b3c0e1b947ee5d50644e1b6b | 4b660991e5c9c93c83dccccdd3ea91531201e8a3 | /DSA/stack/balanced_parentheses.py | b4f1220d5f0ec9e0f22a0eb60703bc0198df83f8 | [
"MIT"
] | permissive | RohanMiraje/DSAwithPython | 2a1515fa5f9e5cc76b08a3e6f0ce34e451fb6f4b | ea4884afcac9d6cc2817a93e918c829dd10cef5d | refs/heads/master | 2022-09-24T08:57:04.695470 | 2021-10-21T01:06:06 | 2021-10-21T01:06:06 | 238,381,770 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | def check_balanced_parentheses(string):
stack = list()
matches = [("(", ")"), ("{", "}"), ("[", "]")]
if len(string) % 2:
"""
base condition to early check assuming string has only parentheses
"""
return False
for char in string:
if char in ['(', '{', '[']:
stack.append(char)
elif char in [')', '}', ']']:
if len(stack) == 0:
return False
last_opening = stack.pop()
if (last_opening, char) not in matches:
return False
# prev = stack.pop()
# if char == ')':
# if prev != "(":
# return False
# elif char == "}":
# if prev != "{":
# return False
# elif char == "]":
# if prev != "[":
# return False
"""
other approach for checking matches like
matches = [("(",")"),("{","}"),("[","]")]
last_opening = stack.pop()
if (last_opening, curr_char )not in matches:
return False
"""
return len(stack) == 0
if __name__ == '__main__':
exp = "([{}])"
print(check_balanced_parentheses(exp))
| [
"[email protected]"
] | |
14349834269be1eb71541b0b9ba7c9447bd65661 | 6f9a5717fed38b0a79c399f7e5da55c6a461de6d | /Baekjoon/TreeDiameter.py | 403cdb3ebca8db3488b4692be26727c85cc6920a | [] | no_license | Alfred-Walker/pythonps | d4d3b0f7fe93c138d02651e05ca5165825676a5e | 81ef8c712c36aa83d1c53aa50886eb845378d035 | refs/heads/master | 2022-04-16T21:34:39.316565 | 2020-04-10T07:50:46 | 2020-04-10T07:50:46 | 254,570,527 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | # 트리의 지름이란, 트리에서 임의의 두 점 사이의 거리 중 가장 긴 것을 말한다.
# 트리의 지름을 구하는 프로그램을 작성하시오.
#
# 입력
# 트리가 입력으로 주어진다.
# 먼저 첫 번째 줄에서는 트리의 정점의 개수 V가 주어지고 (2≤V≤100,000)
# 둘째 줄부터 V개의 줄에 걸쳐 간선의 정보가 다음과 같이 주어진다.
# (정점 번호는 1부터 V까지 매겨져 있다고 생각한다)
#
# 먼저 정점 번호가 주어지고, 이어서 연결된 간선의 정보를 의미하는 정수가 두 개씩 주어지는데,
# 하나는 정점번호, 다른 하나는 그 정점까지의 거리이다.
# 예를 들어 네 번째 줄의 경우 정점 3은 정점 1과 거리가 2인 간선으로 연결되어 있고,
# 정점 4와는 거리가 3인 간선으로 연결되어 있는 것을 보여준다.
# 각 줄의 마지막에는 -1이 입력으로 주어진다. 주어지는 거리는 모두 10,000 이하의 자연수이다.
#
# 출력
# 첫째 줄에 트리의 지름을 출력한다.
import sys
sys.setrecursionlimit(10**6)
V = int(sys.stdin.readline().rstrip())
connected = [[]for _ in range(V + 1)]
visited = [False for _ in range(V + 1)]
# 입력 처리
for i in range(1, V + 1):
edges = list(map(int, sys.stdin.readline().rstrip().split()))
for j in range(1, len(edges)-1, 2):
connected[edges[0]].append((edges[j], edges[j + 1]))
# 오입력 주의: connected[i].append((edges[j], edges[j + 1]))
# v로부터 연결된 정점 중 방문하지 않은 곳들에 대하여 재귀.
# dist로 누적 거리를 체크
def dfs(v, dist):
ret = (v, dist)
visited[v] = True
for v_d in connected[v]:
if visited[v_d[0]]:
continue
next_search = dfs(v_d[0], dist + v_d[1])
if ret[1] < next_search[1]:
ret = next_search
return ret
# 첫번째 dfs: 임의의 점(1)로부터 가장 먼 곳과 거리 구함
first_dfs = dfs(1, 0)
far_v = first_dfs[0]
# 다시 dfs 하기 위해 visited 초기화
visited = [False for _ in range(V + 1)]
# 두번째 dfs: 앞서 구한 1로부터 먼 곳에서 다시 가장 먼 곳을 찾음
second_dfs = dfs(far_v, 0)
far_v = second_dfs[1]
print(far_v)
| [
"[email protected]"
] | |
d3980370454d25fd98274030292d5c8ed674a8f7 | 4116790ee11de30eade92cabd5cddcb0978eb2c9 | /employeerest/company/company/views.py | bce69d005ffe90441d1cc9375a9ca66db31e094a | [] | no_license | Joel-hanson/djangomytutorial | 4e8aadbccea831bb8f7e4cf0de3d35e4bfeaadc0 | 93d2925ae1a8d5f5dcec03e0c85b3ff0e492d125 | refs/heads/master | 2021-08-30T10:48:42.207229 | 2017-12-17T14:43:34 | 2017-12-17T14:43:34 | 108,539,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | from django.views.generic import TemplateView
class TestPage(TemplateView):
template_name = 'firstapp/test.html'
class ThanksPage(TemplateView):
template_name = 'firstapp/thanks.html'
class HomePage(TemplateView):
template_name = 'firstapp/index.html'
| [
"[email protected]"
] | |
eca0f1c99ec492e8b3c6a27b02d6557f8aa3ae1b | 84c2fa4aed9094b5ec3cc612d28980afe5d42d34 | /leetcode/day11_24.py | a6e997a5b64e66b53c1eb8fab9ec554c45fcd371 | [] | no_license | cyg2695249540/generatewework | 186831a1b5c788e9b99e90d1a08bf6a8638131ce | cd01b0fc4a69cc2f2ed4c109afdf8771bee3bffd | refs/heads/master | 2023-01-20T17:13:13.186034 | 2020-12-01T12:05:01 | 2020-12-01T12:05:01 | 310,201,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # !/usr/bin/env Python3
# -*- coding: utf-8 -*-
# @FILE : day11_24.py
# @Author : Pluto.
# @Time : 2020/11/24 16:06
"""
exp:66. 加一
给定一个由 整数 组成的 非空 数组所表示的非负整数,在该数的基础上加一。
最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
你可以假设除了整数 0 之外,这个整数不会以零开头。
示例1:
输入:digits = [1,2,3]
输出:[1,2,4]
解释:输入数组表示数字 123。
示例2:
输入:digits = [4,3,2,1]
输出:[4,3,2,2]
解释:输入数组表示数字 4321。
示例 3:
输入:digits = [0]
输出:[1]
提示:
1 <= digits.length <= 100
0 <= digits[i] <= 9
"""
def plusOne():
s="".join(str(x) for x in digits)
ss=str(int(s)+1)
r=[int(x) for x in ss]
return [0]*(len(digits)-len(r))+r
if __name__ == '__main__':
digits = [0, 0, 0]
print(plusOne())
| [
"[email protected]"
] | |
c6216e017e386c6fcba6a03eb401c29dae4b42b7 | abfa70e1da5b4ba8e465cdc046fa36e81386744a | /base_ml/10.1.Iris_DecisionTree.py | 68bd1cb46b1c29c5cf1e31ca7b17b59b9c34a20c | [] | no_license | superman666ai/crazy_project | f850819ff2287e345b67500111733bafa5629d1f | 99dcba0fe246ecaf3f556f747d44731a04231921 | refs/heads/master | 2020-05-15T09:32:56.523875 | 2019-05-16T00:57:23 | 2019-05-16T00:57:23 | 182,179,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,473 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
# 花萼长度、花萼宽度,花瓣长度,花瓣宽度
# iris_feature = 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
path = '../data/8.iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type},encoding="utf-8")
x, y = np.split(data, (4,), axis=1)
# 为了可视化,仅使用前两列特征
x = x[:, :2]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
#ss = StandardScaler()
#ss = ss.fit(x_train)
# 决策树参数估计
# min_samples_split = 10:如果该结点包含的样本数目大于10,则(有可能)对其分支
# min_samples_leaf = 10:若将某结点分支后,得到的每个子结点样本数目都大于10,则完成分支;否则,不进行分支
model = Pipeline([
('ss', StandardScaler()),
('DTC', DecisionTreeClassifier(criterion='entropy', max_depth=3))])
# clf = DecisionTreeClassifier(criterion='entropy', max_depth=3)
model = model.fit(x_train, y_train)
y_test_hat = model.predict(x_test) # 测试数据
print(model.score)
# 保存
# dot -Tpng -o 1.png 1.dot
f = open('.\\iris_tree.dot', 'w')
tree.export_graphviz(model.get_params('DTC')['DTC'], out_file=f)
# 画图
N, M = 100, 100 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_show = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# # 无意义,只是为了凑另外两个维度
# # 打开该注释前,确保注释掉x = x[:, :2]
# x3 = np.ones(x1.size) * np.average(x[:, 2])
# x4 = np.ones(x1.size) * np.average(x[:, 3])
# x_test = np.stack((x1.flat, x2.flat, x3, x4), axis=1) # 测试点
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_show_hat = model.predict(x_show) # 预测值
y_show_hat = y_show_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.figure(facecolor='w')
plt.pcolormesh(x1, x2, y_show_hat, cmap=cm_light) # 预测值的显示
plt.scatter(x_test[:, 0], x_test[:, 1], c=y_test.ravel(), edgecolors='k', s=100, cmap=cm_dark, marker='o') # 测试数据
plt.scatter(x[:, 0], x[:, 1], c=y.ravel(), edgecolors='k', s=40, cmap=cm_dark) # 全部数据
plt.xlabel(iris_feature[0], fontsize=15)
plt.ylabel(iris_feature[1], fontsize=15)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid(True)
plt.title(u'鸢尾花数据的决策树分类', fontsize=17)
plt.show()
# 训练集上的预测结果
y_test = y_test.reshape(-1)
# print y_test_hat
# print y_test
result = (y_test_hat == y_test) # True则预测正确,False则预测错误
acc = np.mean(result)
# print '准确度: %.2f%%' % (100 * acc)
# 过拟合:错误率
depth = np.arange(1, 15)
err_list = []
for d in depth:
clf = DecisionTreeClassifier(criterion='entropy', max_depth=d)
clf = clf.fit(x_train, y_train)
y_test_hat = clf.predict(x_test) # 测试数据
result = (y_test_hat == y_test) # True则预测正确,False则预测错误
err = 1 - np.mean(result)
err_list.append(err)
# print d, ' 准确度: %.2f%%' % (100 * err)
plt.figure(facecolor='w')
plt.plot(depth, err_list, 'ro-', lw=2)
plt.xlabel(u'决策树深度', fontsize=15)
plt.ylabel(u'错误率', fontsize=15)
plt.title(u'决策树深度与过拟合', fontsize=17)
plt.grid(True)
plt.show()
| [
"[email protected]"
] | |
edbfd9f211f972906a7be68a3b1de4ba080d1d03 | 4e2a22470c983bc6f8463b4d0bd2563e2b4fadba | /manage.py | 91afffd0eea54135379279692eb3ab4988697b8b | [] | no_license | payush/ayush-crowdbotics-375 | 8537f9a86fcdcda7418a0c10a5f258bafc07dd9c | c11bdd721d91e765bcb04379dac476279e6ca599 | refs/heads/master | 2020-03-23T22:34:09.700234 | 2018-07-24T16:09:19 | 2018-07-24T16:09:19 | 142,182,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ayush_crowdbotics_375.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
7f19e8afa6fdab3a0d7af9f55578ca1ba59afa65 | 81061f903318fceac254b60cd955c41769855857 | /server/paiements/migrations/0003_auto__chg_field_transaction_extra_data.py | b059e9dea63be589ea180dbfe9a60bdc411cea7a | [
"BSD-2-Clause"
] | permissive | agepoly/polybanking | 1e253e9f98ba152d9c841e7a72b7ee7cb9d9ce89 | f8f19399585293ed41abdab53609ecb8899542a2 | refs/heads/master | 2020-04-24T06:15:16.606580 | 2015-10-26T19:52:03 | 2015-10-26T19:52:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,133 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Transaction.extra_data'
db.alter_column(u'paiements_transaction', 'extra_data', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Transaction.extra_data'
db.alter_column(u'paiements_transaction', 'extra_data', self.gf('django.db.models.fields.TextField')(default=''))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configs.config': {
'Meta': {'object_name': 'Config'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_enable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allowed_users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_api': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_ipn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_request': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'test_mode': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'url_back_err': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_back_ok': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_ipn': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'paiements.transaction': {
'Meta': {'object_name': 'Transaction'},
'amount': ('django.db.models.fields.IntegerField', [], {}),
'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configs.Config']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_status': ('django.db.models.fields.CharField', [], {'default': "'cr'", 'max_length': '2'}),
'ipn_needed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_postfinance_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_user_back_from_postfinance_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_userforwarded_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'postfinance_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postfinance_status': ('django.db.models.fields.CharField', [], {'default': "'??'", 'max_length': '2'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'paiements.transctionlog': {
'Meta': {'object_name': 'TransctionLog'},
'extra_data': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paiements.Transaction']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['paiements'] | [
"[email protected]"
] | |
1cb69e60aa615509cf524ab1fb086168647ae432 | 7dc80048f72e106f977b49ea882c63cc9623e3ef | /notebooks/other/Y2017M07D28_RH_python27setup_v01.py | 250e214bbfc2fd21afe44797cb7e69bbeb700a16 | [] | no_license | YanCheng-go/Aqueduct30Docker | 8400fdea23bfd788f9c6de71901e6f61530bde38 | 6606fa03d145338d48101fc53ab4a5fccf3ebab2 | refs/heads/master | 2022-12-16T03:36:25.704103 | 2020-09-09T14:38:28 | 2020-09-09T14:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py |
# coding: utf-8
# # Test Python 27 setup
#
# * Purpose of script: test python 27 environement against several libraries
# * Author: Rutger Hofste
# * Kernel used: python27
# * Date created: 20170728
#
#
# In[3]:
packages = {"earth engine":-1,"gdal":-1,"geopandas":-1,"arcgis":-1}
# In[6]:
try:
import ee
packages["earth engine"]=1
except:
packages["earth engine"]=0
# In[4]:
try:
from osgeo import gdal
packages["gdal"]=1
except:
packages["gdal"]=0
# In[10]:
try:
import geopandas
packages["geopandas"]=1
except:
packages["geopandas"]=0
# In[11]:
try:
import arcgis.gis
packages["arcgis"]=1
except:
packages["arcgis"]=0
# In[12]:
print(packages)
# In[ ]:
| [
"[email protected]"
] | |
fa831199505226547d9cfa53b8caf0ccbd1afd58 | fa7e75212e9f536eed7a78237a5fa9a4021a206b | /OLD_ROOT/Backend/SMQTK_Backend/utils/jsmin/test.py | 7aba6993dc941efa2e6ea9557fd99d5a9b43b720 | [] | no_license | kod3r/SMQTK | 3d40730c956220a3d9bb02aef65edc8493bbf527 | c128e8ca38c679ee37901551f4cc021cc43d00e6 | refs/heads/master | 2020-12-03T09:12:41.163643 | 2015-10-19T14:56:55 | 2015-10-19T14:56:55 | 44,916,678 | 1 | 0 | null | 2015-10-25T15:47:35 | 2015-10-25T15:47:35 | null | UTF-8 | Python | false | false | 8,702 | py | import unittest
import sys
# modified path since this is now being embeded in another project.
from SMQTK_Backend.utils import jsmin
class JsTests(unittest.TestCase):
def _minify(self, js):
return jsmin.jsmin(js)
def assertEqual(self, thing1, thing2):
if thing1 != thing2:
print(repr(thing1), repr(thing2))
raise AssertionError
return True
def assertMinified(self, js_input, expected):
minified = jsmin.jsmin(js_input)
assert minified == expected, "%r != %r" % (minified, expected)
def testQuoted(self):
js = r'''
Object.extend(String, {
interpret: function(value) {
return value == null ? '' : String(value);
},
specialChar: {
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'\\': '\\\\'
}
});
'''
expected = r"""Object.extend(String,{interpret:function(value){return value==null?'':String(value);},specialChar:{'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','\\':'\\\\'}});"""
self.assertMinified(js, expected)
def testSingleComment(self):
js = r'''// use native browser JS 1.6 implementation if available
if (Object.isFunction(Array.prototype.forEach))
Array.prototype._each = Array.prototype.forEach;
if (!Array.prototype.indexOf) Array.prototype.indexOf = function(item, i) {
// hey there
function() {// testing comment
foo;
//something something
location = 'http://foo.com;'; // goodbye
}
//bye
'''
expected = r"""
if(Object.isFunction(Array.prototype.forEach))
Array.prototype._each=Array.prototype.forEach;if(!Array.prototype.indexOf)Array.prototype.indexOf=function(item,i){ function(){ foo; location='http://foo.com;';}"""
# print expected
self.assertMinified(js, expected)
def testEmpty(self):
self.assertMinified('', '')
self.assertMinified(' ', '')
self.assertMinified('\n', '')
self.assertMinified('\r\n', '')
self.assertMinified('\t', '')
def testMultiComment(self):
js = r"""
function foo() {
print('hey');
}
/*
if(this.options.zindex) {
this.originalZ = parseInt(Element.getStyle(this.element,'z-index') || 0);
this.element.style.zIndex = this.options.zindex;
}
*/
another thing;
"""
expected = r"""function foo(){print('hey');}
another thing;"""
self.assertMinified(js, expected)
def testLeadingComment(self):
js = r"""/* here is a comment at the top
it ends here */
function foo() {
alert('crud');
}
"""
expected = r"""function foo(){alert('crud');}"""
self.assertMinified(js, expected)
def testJustAComment(self):
self.assertMinified(' // a comment', '')
def testRe(self):
js = r'''
var str = this.replace(/\\./g, '@').replace(/"[^"\\\n\r]*"/g, '');
return (/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);
});'''
expected = r"""var str=this.replace(/\\./g,'@').replace(/"[^"\\\n\r]*"/g,'');return(/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);});"""
self.assertMinified(js, expected)
def testIgnoreComment(self):
js = r"""
var options_for_droppable = {
overlap: options.overlap,
containment: options.containment,
tree: options.tree,
hoverclass: options.hoverclass,
onHover: Sortable.onHover
}
var options_for_tree = {
onHover: Sortable.onEmptyHover,
overlap: options.overlap,
containment: options.containment,
hoverclass: options.hoverclass
}
// fix for gecko engine
Element.cleanWhitespace(element);
"""
expected = r"""var options_for_droppable={overlap:options.overlap,containment:options.containment,tree:options.tree,hoverclass:options.hoverclass,onHover:Sortable.onHover}
var options_for_tree={onHover:Sortable.onEmptyHover,overlap:options.overlap,containment:options.containment,hoverclass:options.hoverclass}
Element.cleanWhitespace(element);"""
self.assertMinified(js, expected)
def testHairyRe(self):
js = r"""
inspect: function(useDoubleQuotes) {
var escapedString = this.gsub(/[\x00-\x1f\\]/, function(match) {
var character = String.specialChar[match[0]];
return character ? character : '\\u00' + match[0].charCodeAt().toPaddedString(2, 16);
});
if (useDoubleQuotes) return '"' + escapedString.replace(/"/g, '\\"') + '"';
return "'" + escapedString.replace(/'/g, '\\\'') + "'";
},
toJSON: function() {
return this.inspect(true);
},
unfilterJSON: function(filter) {
return this.sub(filter || Prototype.JSONFilter, '#{1}');
},
"""
expected = r"""inspect:function(useDoubleQuotes){var escapedString=this.gsub(/[\x00-\x1f\\]/,function(match){var character=String.specialChar[match[0]];return character?character:'\\u00'+match[0].charCodeAt().toPaddedString(2,16);});if(useDoubleQuotes)return'"'+escapedString.replace(/"/g,'\\"')+'"';return"'"+escapedString.replace(/'/g,'\\\'')+"'";},toJSON:function(){return this.inspect(true);},unfilterJSON:function(filter){return this.sub(filter||Prototype.JSONFilter,'#{1}');},"""
self.assertMinified(js, expected)
def testNoBracesWithComment(self):
js = r"""
onSuccess: function(transport) {
var js = transport.responseText.strip();
if (!/^\[.*\]$/.test(js)) // TODO: improve sanity check
throw 'Server returned an invalid collection representation.';
this._collection = eval(js);
this.checkForExternalText();
}.bind(this),
onFailure: this.onFailure
});
"""
expected = r"""onSuccess:function(transport){var js=transport.responseText.strip();if(!/^\[.*\]$/.test(js))
throw'Server returned an invalid collection representation.';this._collection=eval(js);this.checkForExternalText();}.bind(this),onFailure:this.onFailure});"""
self.assertMinified(js, expected)
def testSpaceInRe(self):
js = r"""
num = num.replace(/ /g,'');
"""
self.assertMinified(js, "num=num.replace(/ /g,'');")
def testEmptyString(self):
js = r'''
function foo('') {
}
'''
self.assertMinified(js, "function foo(''){}")
def testDoubleSpace(self):
js = r'''
var foo = "hey";
'''
self.assertMinified(js, 'var foo="hey";')
def testLeadingRegex(self):
js = r'/[d]+/g '
self.assertMinified(js, js.strip())
def testLeadingString(self):
js = r"'a string in the middle of nowhere'; // and a comment"
self.assertMinified(js, "'a string in the middle of nowhere';")
def testSingleCommentEnd(self):
js = r'// a comment\n'
self.assertMinified(js, '')
def testInputStream(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
ins = StringIO(r'''
function foo('') {
}
''')
outs = StringIO()
m = jsmin.JavascriptMinify()
m.minify(ins, outs)
output = outs.getvalue()
assert output == "function foo(''){}"
def testUnicode(self):
instr = u'\u4000 //foo'
expected = u'\u4000'
output = jsmin.jsmin(instr)
self.assertEqual(output, expected)
def testCommentBeforeEOF(self):
self.assertMinified("//test\r\n", "")
def testCommentInObj(self):
self.assertMinified("""{
a: 1,//comment
}""", "{a:1,}")
def testCommentInObj2(self):
self.assertMinified("{a: 1//comment\r\n}", "{a:1\n}")
def testImplicitSemicolon(self):
# return \n 1 is equivalent with return; 1
# so best make sure jsmin retains the newline
self.assertMinified("return;//comment\r\na", "return;a")
def testImplicitSemicolon2(self):
self.assertMinified("return//comment...\r\na", "return\na")
def testSingleComment2(self):
self.assertMinified('x.replace(/\//, "_")// slash to underscore',
'x.replace(/\//,"_")')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6aa6cad3f09fd39c8de6b26302daf10e485cedb5 | 27ece9ab880a0bdba4b2c053eccda94602c716d5 | /.history/save_20181129231105.py | 50671059975cdfa4cf895b943b529349ae4d201e | [] | no_license | Symfomany/keras | 85e3ad0530837c00f63e14cee044b6a7d85c37b2 | 6cdb6e93dee86014346515a2017652c615bf9804 | refs/heads/master | 2020-04-08T20:21:35.991753 | 2018-11-30T08:23:36 | 2018-11-30T08:23:36 | 159,695,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | import os, argparse
import tensorflow as tf
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="models", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
freeze_graph(args.model_dir, args.output_node_names) | [
"[email protected]"
] | |
ff90cd1f1161c0d09ab2942b7f313e655ef548a0 | a6bd898302ffebe9066595b264f9e5e38e6fa8e6 | /settings_template.py | 069b2d192200ef4343a3508486203a989c2cb909 | [] | no_license | symroe/teamprime_retweets | 65e8ec57095b138be45496eb115fb4da1d1e1af0 | 08e817da6191a8058b3606b076ba9de6bd253b12 | refs/heads/master | 2021-01-10T22:04:16.968867 | 2013-09-20T13:32:03 | 2013-09-20T13:32:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN_KEY = ""
ACCESS_TOKEN_SECRET = ""
username = "TeamPrimeLtd"
TWEET_PATH = "tweets"
| [
"[email protected]"
] | |
a083a001d9f5a9559169c82b7ac70022a8d131c7 | c534fba89ff0462334cc724ff4010cbed829e294 | /web/myadmin/migrations/0012_auto_20191019_1638.py | 8bbaa46d7e85d15be38f10c54609829eb800d7f6 | [] | no_license | victorfengming/python_bookshop | 974f5f8ff3b53b024b573f0f256409204116e114 | c0a4757fc2031a015d4b198ba889be69a2a4a3c5 | refs/heads/master | 2020-09-02T18:02:07.547345 | 2019-11-04T15:10:44 | 2019-11-04T15:10:44 | 219,275,403 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 2.2.3 on 2019-10-19 16:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myadmin', '0011_auto_20191018_2225'),
]
operations = [
migrations.DeleteModel(
name='Booktype',
),
migrations.DeleteModel(
name='Users',
),
]
| [
"[email protected]"
] | |
6cd666cf9ad2d4f9fbbfd2c624ff106e65444172 | 7c70f3cbaecfa4d77928c784ae12f232c273112e | /api_client/test_helper.py | 92caf6ea0aa0d2d82ca5d95fe1af6896fce47376 | [
"MIT"
] | permissive | uktrade/lite-tests-common | d029298d9144a447404d38899ab35ff8e54bf53d | 8ae386e55f899d0ffd61cc0a9156cd4db340d6d1 | refs/heads/master | 2020-08-03T19:20:39.673522 | 2020-07-21T09:59:01 | 2020-07-21T09:59:01 | 211,858,651 | 1 | 0 | MIT | 2020-07-21T09:59:03 | 2019-09-30T12:49:33 | Python | UTF-8 | Python | false | false | 2,474 | py | from .sub_helpers.documents import Documents
from .sub_helpers.applications import Applications
from .sub_helpers.cases import Cases
from .sub_helpers.document_templates import DocumentTemplates
from .sub_helpers.ecju_queries import EcjuQueries
from .sub_helpers.flags import Flags
from .sub_helpers.goods import Goods
from .sub_helpers.goods_queries import GoodsQueries
from .sub_helpers.organisations import Organisations
from .sub_helpers.ogel import Ogel
from .sub_helpers.parties import Parties
from .sub_helpers.picklists import Picklists
from .sub_helpers.queues import Queues
from .sub_helpers.users import Users
class TestHelper:
"""
Contains a collection of test helper classes, grouped by functional area, with each class containing
required logic wrapping calls to various LITE API endpoints.
"""
def __init__(self, api):
self.api_client = api
self.context = self.api_client.context
request_data = self.api_client.request_data
self.documents = Documents(api_client=self.api_client, request_data=request_data)
self.users = Users(api_client=self.api_client, request_data=request_data)
self.organisations = Organisations(api_client=self.api_client, request_data=request_data)
self.goods = Goods(api_client=self.api_client, documents=self.documents, request_data=request_data)
self.goods_queries = GoodsQueries(api_client=self.api_client, request_data=request_data)
self.parties = Parties(api_client=self.api_client, documents=self.documents, request_data=request_data)
self.ecju_queries = EcjuQueries(api_client=self.api_client, request_data=request_data)
self.picklists = Picklists(api_client=self.api_client, request_data=request_data)
self.ogel = Ogel(api_client=self.api_client, request_data=request_data)
self.cases = Cases(api_client=self.api_client, request_data=request_data)
self.flags = Flags(api_client=self.api_client, request_data=request_data)
self.queues = Queues(api_client=self.api_client, request_data=request_data)
self.document_templates = DocumentTemplates(api_client=self.api_client, request_data=request_data)
self.applications = Applications(
parties=self.parties,
goods=self.goods,
api_client=self.api_client,
documents=self.documents,
request_data=request_data,
organisations=self.organisations,
)
| [
"[email protected]"
] | |
5aa68c22244a5396ea453095dedc1d96aba4aa72 | d9b53673b899a9b842a42060740b734bf0c63a31 | /leetcode/python/easy/p645_findErrorNums.py | 0b9b378910292d7af736c77ca60c91c415bce9a7 | [
"Apache-2.0"
] | permissive | kefirzhang/algorithms | a8d656774b576295625dd663154d264cd6a6a802 | 549e68731d4c05002e35f0499d4f7744f5c63979 | refs/heads/master | 2021-06-13T13:05:40.851704 | 2021-04-02T07:37:59 | 2021-04-02T07:37:59 | 173,903,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | class Solution:
def findErrorNums(self, nums):
helper = [0] * len(nums)
for i in nums:
helper[i - 1] += 1
for i, n in enumerate(helper):
print(i, n)
if n == 0:
lack = i + 1
elif n == 2:
more = i + 1
return [more, lack]
slu = Solution()
print(slu.findErrorNums([1, 2, 2, 4]))
| [
"[email protected]"
] | |
c571164d09a9dfe8ee2571e96a5c3e2bb982d580 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Google/Drive/Revisions/Delete.py | 21b2277599036d6311d9fc5895330b8646d5bce5 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,974 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# Delete
# Removes a revision.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Delete(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Delete Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Google/Drive/Revisions/Delete')
def new_input_set(self):
return DeleteInputSet()
def _make_result_set(self, result, path):
return DeleteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteChoreographyExecution(session, exec_id, path)
class DeleteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Delete
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientSecret', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
InputSet._set_input(self, 'FileID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'RefreshToken', value)
def set_RevisionID(self, value):
"""
Set the value of the RevisionID input for this Choreo. ((required, string) The ID of the revision.)
"""
InputSet._set_input(self, 'RevisionID', value)
class DeleteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Delete Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class DeleteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteResultSet(response, path)
| [
"[email protected]"
] | |
36d6fbac09d283afec24203a8c80c252d0e04c93 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_backup_protection_containers_operations.py | 977185266eeefe7c362fb3931a8a7fd029b3b0e0 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 6,974 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._backup_protection_containers_operations import build_list_request
from .._vendor import RecoveryServicesBackupClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BackupProtectionContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`backup_protection_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, vault_name: str, resource_group_name: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.ProtectionContainerResource"]:
"""Lists the containers registered to Recovery Services Vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param filter: OData filter options. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProtectionContainerResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ProtectionContainerResourceList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProtectionContainerResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupProtectionContainers"
}
| [
"[email protected]"
] | |
4077ee7230fdd5fcb8bf27ad4eec1e47ecf60567 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/J/JasonSanford/great_american_beer_festival.py | ccc57650e41049eee111fa8bbfab0a4bd1f01ccf | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | import scraperwiki
import lxml.html
html = scraperwiki.scrape("http://www.greatamericanbeerfestival.com/at-the-festival/breweries-at-the-2012-festival")
root = lxml.html.fromstring(html)
i = 1
for tr in root.cssselect("#brewery_table tbody tr"):
tds = tr.cssselect("td")
data = {
'id' : i,
'name' : tds[0].text_content(),
'city' : tds[1].text_content(),
'state' : tds[2].text_content(),
}
scraperwiki.sqlite.save(unique_keys=['id'], data=data)
i += 1import scraperwiki
import lxml.html
html = scraperwiki.scrape("http://www.greatamericanbeerfestival.com/at-the-festival/breweries-at-the-2012-festival")
root = lxml.html.fromstring(html)
i = 1
for tr in root.cssselect("#brewery_table tbody tr"):
tds = tr.cssselect("td")
data = {
'id' : i,
'name' : tds[0].text_content(),
'city' : tds[1].text_content(),
'state' : tds[2].text_content(),
}
scraperwiki.sqlite.save(unique_keys=['id'], data=data)
i += 1 | [
"[email protected]"
] | |
4dc75a5c5ad9b9adc0eee92205b2a3ec96120685 | 1a220abd21c56728aa3368534506bfc9ced8ad46 | /프로그래머스/lv0/120862. 최댓값 만들기 (2)/최댓값 만들기 (2).py | 2150e823f28bad1d9f1692f23f12517ff6e88e54 | [] | no_license | JeonJe/Algorithm | 0ff0cbf47900e7877be077e1ffeee0c1cd50639a | 6f8da6dbeef350f71b7c297502a37f87eb7d0823 | refs/heads/main | 2023-08-23T11:08:17.781953 | 2023-08-23T08:31:41 | 2023-08-23T08:31:41 | 197,085,186 | 0 | 0 | null | 2023-02-21T03:26:41 | 2019-07-15T23:22:55 | Python | UTF-8 | Python | false | false | 630 | py | def solution(numbers):
answer = 0
negative = []
positive = []
for i in numbers:
if i < 0:
negative.append(i)
else:
positive.append(i)
negative.sort()
positive.sort()
max_positive, max_negative, mix = -1e9, -1e9, -1e9
if len(positive) == 1 and len(negative) == 1:
mix = positive[-1] * negative[0]
if len(positive) >= 2:
max_positive = positive[-1] * positive[-2]
if len(negative) >= 2:
max_negative = negative[0] * negative[1]
answer = max(max_positive, max_negative, mix)
return answer | [
"[email protected]"
] | |
b1dc61b9b0266ed2642cd5bf9517f09540601de5 | 7abb3d309a011a36247e0b4dcda3759537c45b2c | /utils/vb-meta-to-json-topology.py | 031f8c9a3763b172b8281d83709ffc18311a4b0b | [
"BSD-3-Clause"
] | permissive | TomPlano/varbench | 7937a8a7221117e2d817549eb8ba22746c324869 | 83933380e1876da388dd07a78e554e65f388861b | refs/heads/master | 2020-04-02T14:34:11.376400 | 2018-10-27T19:10:09 | 2018-10-27T19:10:09 | 154,529,766 | 0 | 0 | BSD-3-Clause | 2018-10-24T16:01:55 | 2018-10-24T16:01:54 | null | UTF-8 | Python | false | false | 2,486 | py | #!/usr/bin/env python
import os
import sys
import getopt
import json
def usage(argv, exit=None):
print "Usage: %s [OPTIONS] <VB metadata file> <VB JSON topology file (output)>" % argv[0]
print " -h (--help) : print help and exit"
print " -v (--vbs-path=) : path to VB Stats python module"
if exit is not None:
sys.exit(exit)
def parse_cmd_line(argc, argv):
opts = []
args = []
cur_path = os.path.dirname(os.path.realpath(__file__))
vb_path = cur_path + "/../vb-stats/"
try:
opts, args = getopt.getopt(
argv[1:],
"hv:",
["help", "vb-path="]
)
except getopt.GetoptError, err:
print >> sys.stderr, err
usage(argv, exit=1)
for o, a in opts:
if o in ("-h", "--help"):
usage(argv, exit=0)
elif o in ("-v", "--vb-path"):
vb_path = a
else:
usage(argv, exit=1)
if len(args) != 2:
usage(argv, exit=1)
return vb_path, args[0], args[1]
def main(argc, argv, envp):
vb_path, meta, json_file = parse_cmd_line(argc, argv)
procs = []
# Try to import vb-path
try:
sys.path.insert(0, vb_path)
from vb_stats import VB_Stats as vbs
except ImportError:
print >> sys.stderr, "Could not import VB_Stats. Please specify path to VB_Stats with '--vbs-path'"
usage(argv, exit=2)
with vbs(meta, load_data=False) as vb:
with open(json_file, "w") as f:
num_processors = vb.num_sockets_per_node * vb.num_cores_per_socket * vb.num_hw_threads_per_core
json.dump({
"processor_info" : {
"num_processors" : num_processors,
"num_sockets" : vb.num_sockets_per_node,
"cores_per_socket" : vb.num_cores_per_socket,
"hw_threads_per_core" : vb.num_hw_threads_per_core
},
# The format of p: [socket, core, hw_thread, os_core]
"processor_list" : [
{
"os_core" : p[3],
"socket" : p[0],
"core" : p[1],
"hw_thread" : p[2]
} for p in vb.processor_map
]
}, f, indent=4)
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
envp = os.environ
sys.exit(main(argc, argv, envp))
| [
"[email protected]"
] | |
7f3e63f22434cad4df3c5f31228f840cee385144 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/5259522/snippet.py | 530896846672f9f888ff87c34b403125582a7bbd | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/env python
import sys
files = []
if len(sys.argv) > 2:
for file in sys.argv[1:]:
files.append(str(file))
else:
print "Usage: Wordcount.py file1 file2 file3 ..."
words_to_ignore = ["that","what","with","this","would","from","your","which","while","these"]
things_to_strip = [".",",","?",")","(","\"",":",";","'s"]
words_min_size = 4
print_in_html = True
text = ""
for file in files:
f = open(file,"rU")
for line in f:
text += line
words = text.lower().split()
wordcount = {}
for word in words:
for thing in things_to_strip:
if thing in word:
word = word.replace(thing,"")
if word not in words_to_ignore and len(word) >= words_min_size:
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
sortedbyfrequency = sorted(wordcount,key=wordcount.get,reverse=True)
def print_txt(sortedbyfrequency):
for word in sortedbyfrequency:
print word, wordcount[word]
def print_html(sortedbyfrequency):
print "<html><head><title>Wordcount.py Output</title></head><body><table>"
for word in sortedbyfrequency:
print "<tr><td>%s</td><td>%s</td></tr>" % (word,wordcount[word])
print "</table></body></html>"
if print_in_html == True:
print_html(sortedbyfrequency)
else:
print_txt(sortedbyfrequency) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.