blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5fa8083c771b118d03d567b6878554f82c71120c | bd187ecc8a94460cd17d3c13aa469c25a3f3be3a | /mainsite/migrations/0006_auto_20191127_0915.py | 75e8efb19fdb91c2f6c2a2354e86433dc4c9a946 | [] | no_license | ozkilim/DinosaurDating | 3226acb3f4987534ce5ceed7649d76d47b51065e | 44bb4583f50e7a5c903040ab80a63ba390330d35 | refs/heads/master | 2021-11-06T16:18:25.192520 | 2019-12-10T14:47:22 | 2019-12-10T14:47:22 | 224,671,044 | 0 | 0 | null | 2021-09-08T01:28:17 | 2019-11-28T14:30:07 | Python | UTF-8 | Python | false | false | 389 | py | # Generated by Django 2.2.7 on 2019-11-27 09:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0005_atendee_event'),
]
operations = [
migrations.AlterField(
model_name='atendee',
name='looking_for',
field=models.CharField(max_length=100),
),
]
| [
"[email protected]"
] | |
7d201b6089d5c2d7d4f81efd39e2d8b13d4eb4b8 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/circularflyer.py | 9a0d228924714e4ed8bf49c022803e589020bcde | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 4,298 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/CircularFlyer.py
import math
import BigWorld
import AnimationSequence
from Math import Matrix, Vector3
from debug_utils import LOG_CURRENT_EXCEPTION
from vehicle_systems.stricted_loading import makeCallbackWeak
import SoundGroups
class CircularFlyer(BigWorld.UserDataObject):
def __init__(self):
BigWorld.UserDataObject.__init__(self)
self.__prevTime = BigWorld.time()
self.__angularVelocity = 2 * math.pi / self.rotationPeriod
if not self.rotateClockwise:
self.__angularVelocity *= -1
self.__currentAngle = 0.0
self.__updateCallbackId = None
self.__model = None
self.__modelMatrix = None
self.__sound = None
self.__animator = None
BigWorld.loadResourceListBG((self.modelName, self.pixieName), makeCallbackWeak(self.__onResourcesLoaded))
return
def __del__(self):
self.__clear()
def __clear(self):
if self.__updateCallbackId is not None:
BigWorld.cancelCallback(self.__updateCallbackId)
self.__updateCallbackId = None
if self.__sound is not None:
self.__sound.stop()
self.__sound.releaseMatrix()
self.__sound = None
if self.__model is not None:
self.__animator = None
BigWorld.delModel(self.__model)
self.__model = None
return
def __onResourcesLoaded(self, resourceRefs):
if self.guid not in BigWorld.userDataObjects:
return
else:
self.__clear()
if self.modelName in resourceRefs.failedIDs:
return
try:
self.__model = resourceRefs[self.modelName]
self.__modelMatrix = Matrix()
self.__modelMatrix.setIdentity()
servo = BigWorld.Servo(self.__modelMatrix)
self.__model.addMotor(servo)
BigWorld.addModel(self.__model)
if self.actionName != '':
clipResource = self.__model.deprecatedGetAnimationClipResource(self.actionName)
if clipResource:
spaceID = BigWorld.player().spaceID
loader = AnimationSequence.Loader(clipResource, spaceID)
animator = loader.loadSync()
animator.bindTo(AnimationSequence.ModelWrapperContainer(self.__model, spaceID))
animator.start()
self.__animator = animator
if self.pixieName != '' and self.pixieName not in resourceRefs.failedIDs:
pixieNode = self.__model.node(self.pixieHardPoint)
pixieNode.attach(resourceRefs[self.pixieName])
if self.soundName != '':
self.__sound = SoundGroups.g_instance.getSound3D(self.__modelMatrix, self.soundName)
except Exception:
LOG_CURRENT_EXCEPTION()
self.__model = None
return
self.__prevTime = BigWorld.time()
self.__update()
return
def __update(self):
self.__updateCallbackId = None
self.__updateCallbackId = BigWorld.callback(0.0, self.__update)
curTime = BigWorld.time()
dt = curTime - self.__prevTime
self.__prevTime = curTime
self.__currentAngle += self.__angularVelocity * dt
if self.__currentAngle > 2 * math.pi:
self.__currentAngle -= 2 * math.pi
elif self.__currentAngle < -2 * math.pi:
self.__currentAngle += 2 * math.pi
radialPosition = Vector3(self.radius * math.sin(self.__currentAngle), 0, self.radius * math.cos(self.__currentAngle))
modelYaw = self.__currentAngle
if self.rotateClockwise:
modelYaw += math.pi / 2
else:
modelYaw -= math.pi / 2
localMatrix = Matrix()
localMatrix.setRotateY(modelYaw)
localMatrix.translation = radialPosition
self.__modelMatrix.setRotateYPR((self.yaw, self.pitch, self.roll))
self.__modelMatrix.translation = self.position
self.__modelMatrix.preMultiply(localMatrix)
return
| [
"[email protected]"
] | |
48a73da0886034bf90716950527d561d32bbab82 | dce8531d0e9665a09205f70a909ac1424f7e09eb | /preprocess.py | d6a53884fddf97d075c2264cc801f41dff0d4b95 | [
"MIT"
] | permissive | keonlee9420/Comprehensive-Tacotron2 | 40a6e5fcecf55ee02a8523a7e2701b6124748bee | 1eff7f08c41a2127bbe300b6d66ce5c966422b25 | refs/heads/main | 2023-08-07T16:10:15.133301 | 2022-02-20T14:30:07 | 2022-02-20T14:44:36 | 388,990,172 | 39 | 17 | MIT | 2023-07-31T13:08:05 | 2021-07-24T03:36:08 | Python | UTF-8 | Python | false | false | 640 | py | import argparse
from utils.tools import get_configs_of
from preprocessor import ljspeech, vctk
def main(config):
if "LJSpeech" in config["dataset"]:
preprocessor = ljspeech.Preprocessor(config)
if "VCTK" in config["dataset"]:
preprocessor = vctk.Preprocessor(config)
preprocessor.build_from_path()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
required=True,
help="name of dataset",
)
args = parser.parse_args()
preprocess_config, *_ = get_configs_of(args.dataset)
main(preprocess_config)
| [
"[email protected]"
] | |
13ba32c6e2a103795a5cafba7f437334176ac67e | d5fbb40c8fa95970a6b1dd10920071a3330c6de8 | /src_d21c/in_theta.py | 428938d97f7d9d9e57abdd3f26b45e3ee98844aa | [] | no_license | Pooleyo/theta.py | 622000e04a7834a7b12d371337992f6063c3f332 | 7bdf96f7494db7fda8dbe8d1e8bb536a5b39e39d | refs/heads/master | 2021-06-18T06:03:47.176742 | 2019-09-18T16:02:02 | 2019-09-18T16:02:02 | 137,497,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | test_mode = True
image_files = ['3x3_pixel_value_1.tif'] # "PSL_plate4_s10257_BBXRD.tif"#"3x3_white_test_image.tif"##"Nb_test_image.png"
# #"PSL_plate4_s10257_BBXRD.tif"#"Nb_test_image.png"#
source_position = [[50.0, 0.0, 49.8]] # In mm
normal = [[-25.6, 0.0, 12.7078]] # [-10.0, 0.0, 10.0] # The normal to the plane of the image plate with units mm.
sample_normal = [[0.0, 0.0, 1.0]] # This is used to correct for attenuation in the diffracting sample.
offset = [[0.0, 12.0, 0.0]] # X offset (mm), Y offset (mm), rotation (degrees); note that rotation is not actually used
# in this code, it is included simply to indicate which sonOfHoward parameters are being reference here.
x_scale = [56] # In mm
y_scale = [44] # In mm
view_x = [[0.01, 1.0, 0.02]] # [-0.71, 0.0, -0.71] # "normalised"
view_y = [[0.44, -0.02, 0.90]] # [0.0, 1.0, 0.0] # "normalised"
wavelength = 1.378 # In Angstroms
a_lattice = 3.3 # In Angstroms
filter_thickness = [[10.0, 6.0]]
filter_attenuation_length = [[34.1, 109.7]] # The attenuation length(s) of filter(s) used, in microns. Enter a new list
# element for each filter; the order doesn't matter. Zn, at 9 keV, has attenuation length of 34.1 microns. Al, at 9 keV,
# has attenuation length of 109.7 microns.
phi_0_definer = [0.0, 0.0, 1.0]
phi_limit = [-180.0, 180.0]
gsqr_limit = [0.0, 18.0]
theta_phi_n_pixels_width = 1
theta_phi_n_pixels_height = 1
num_width_subpixels = 1
num_height_subpixels = 1
plot = True
debug = False
name_plot_integrated_intensity = 'integrated_intensity_vs_gsqr.png'
| [
"[email protected]"
] | |
377b54e77c82a1e6af952880cf1817eb944f7fe3 | ea7f0b643d6e43f432eb49ef7f0c62899ebdd026 | /conans/test/util/output_test.py | 65a29d32d1f4ccfd5bd5c286a3c2e77fba71b7c3 | [
"MIT"
] | permissive | jbaruch/conan | 0b43a4dd789547bd51e2387beed9095f4df195e4 | 263722b5284828c49774ffe18d314b24ee11e178 | refs/heads/develop | 2021-01-19T21:15:37.429340 | 2017-04-18T14:27:33 | 2017-04-18T14:27:33 | 88,635,196 | 0 | 1 | null | 2017-04-18T14:34:03 | 2017-04-18T14:34:02 | null | UTF-8 | Python | false | false | 2,625 | py | # -*- coding: utf-8 -*-
import unittest
from conans.client.output import ConanOutput
from six import StringIO
from conans.client.rest.uploader_downloader import print_progress
from conans.test.utils.test_files import temp_folder
from conans import tools
import zipfile
import os
from conans.util.files import save, load
import sys
from conans.test.utils.tools import TestClient
class OutputTest(unittest.TestCase):
def simple_output_test(self):
stream = StringIO()
output = ConanOutput(stream)
output.rewrite_line("This is a very long line that has to be truncated somewhere, "
"because it is so long it doesn't fit in the output terminal")
self.assertIn("This is a very long line that ha ... esn't fit in the output terminal",
stream.getvalue())
def error_test(self):
client = TestClient()
conanfile = """
# -*- coding: utf-8 -*-
from conans import ConanFile
from conans.errors import ConanException
class PkgConan(ConanFile):
def source(self):
self.output.info("TEXT ÑÜíóúéáàèòù абвгдежзийкл 做戏之说 ENDTEXT")
"""
client.save({"conanfile.py": conanfile})
client.run("source")
self.assertIn("TEXT", client.user_io.out)
self.assertIn("ENDTEXT", client.user_io.out)
def print_progress_test(self):
stream = StringIO()
output = ConanOutput(stream)
for units in range(50):
print_progress(output, units)
output_str = stream.getvalue()
self.assertNotIn("=", output_str)
self.assertNotIn("[", output_str)
self.assertNotIn("]", output_str)
def unzip_output_test(self):
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "example.txt")
save(file_path, "Hello world!")
zip_path = os.path.join(tmp_dir, 'example.zip')
zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for root, _, files in os.walk(tmp_dir):
for f in files:
zipf.write(os.path.join(root, f), f)
zipf.close()
output_dir = os.path.join(tmp_dir, "output_dir")
new_out = StringIO()
old_out = sys.stdout
try:
sys.stdout = new_out
tools.unzip(zip_path, output_dir)
finally:
sys.stdout = old_out
output = new_out.getvalue()
self.assertRegexpMatches(output, "Unzipping [\d]+ bytes, this can take a while")
content = load(os.path.join(output_dir, "example.txt"))
self.assertEqual(content, "Hello world!")
| [
"[email protected]"
] | |
6938f0bc75372893e1b90af44297d7efdbdabe3c | 2d2ef049d450ef9ac6459bcdd1ea25fccc0305d5 | /loadTimeEstimator.py | b1f03e7172b7ba44449aeb4afa1aa99899599ebb | [] | no_license | iotmember/code-fights | fc119f53cc42f9fea8a40f43335d93d076c92e9d | e7f1fdb9d5068bd2ed67d9df07541f306097bd19 | refs/heads/master | 2021-05-31T06:03:41.497371 | 2016-04-08T05:40:39 | 2016-04-08T05:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | import sys
def loadTimeEstimator(sizes, uploadingStart, V):
finish_time = [x for x in uploadingStart]
t = [0 for x in uploadingStart]
c = len(sizes)
time_ = 0
curr_time = uploadingStart[0]
while (c > 0):
index_of_uploading_start = [i for i, x in enumerate(uploadingStart) if x == curr_time + time_ and sizes[i] != 0 ]
if len(index_of_uploading_start):
speed = V/float(len(index_of_uploading_start))
else:
speed = V
#print index_of_uploading_start
for i in index_of_uploading_start:
if sizes[i] > 0:
sizes[i] = sizes[i] - speed
finish_time[i] = finish_time[i] + 1
t[i] = t[i] + 1
uploadingStart[i] = uploadingStart[i] + 1
time_ += 1
c = len([x for x in sizes if x > 0])
return finish_time
sizes = [21, 10]
uploadingStart = [100, 105]
V = 2
#print loadTimeEstimator(sizes, uploadingStart, V)
print loadTimeEstimator([20, 10], [1, 1], 1)
#print loadTimeEstimator([1, 1, 1], [10, 20, 30], 3)
| [
"[email protected]"
] | |
6251d68f138a9b14476759cfdce97fd773872ec8 | 12123592a54c4f292ed6a8df4bcc0df33e082206 | /py3/pgms/sec8/Extend/ctypes/convs.py | 616830fa1e5821bc2a04d9cde8ded2752fa0ab67 | [] | no_license | alvinooo/advpython | b44b7322915f832c8dce72fe63ae6ac7c99ef3d4 | df95e06fd7ba11b0d2329f4b113863a9c866fbae | refs/heads/master | 2021-01-23T01:17:22.487514 | 2017-05-30T17:51:47 | 2017-05-30T17:51:47 | 92,860,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | #!/usr/bin/env python3
# convs.py - ctype conversions
from ctypes import *
# load shared library
mylib = CDLL("./mylib.so")
# double mult(double, double)
mylib.mult.argtypes = (c_double, c_double)
mylib.mult.restype = c_double
# call C mult() function
print(mylib.mult(2.5, 3.5))
#####################################
#
# $ convs.py
# 8.75
#
| [
"[email protected]"
] | |
c625c91f36b9023bdda7ad7f8346b9bde769ae1b | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC300~ABC399/ABC300/e.py | 9ba33d532fb2e724f862b0ad868328126a7e1249 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from functools import lru_cache
def modinv(a: int, m: int) -> int:
'''
モジュラ逆元
ax mod m =1の解x=a^(-1)を返す
Parameters
----------
a:int
m:int
'''
x, y, u, v = 1, 0, 0, 1
M = m
while m > 0:
k = a//m
x -= k*u
y -= k*v
x, u = u, x
y, v = v, y
a, m = m, a % m
assert a == 1, "a and m aren't relatively prime numbers"
if x < 0:
x += M
return x
N = int(input())
MOD = 998244353
P = modinv(5, MOD)
@lru_cache(maxsize=None)
def f(n):
if n >= N:
return 1 if n == N else 0
res = 0
for i in range(2, 7):
res += f(i*n)
return res*P % MOD
ans = f(1)
print(ans)
| [
"[email protected]"
] | |
02cf011d3d9b1895c848c8f25e71c77dc301fdcf | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Simulador_20200708161311.py | cbf32fa7cf0f64a93631d4928bd7f10dc6a1caf5 | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,983 | py | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_curados = []
self.individuos_mortos = []
self.lista_matrizes_posicionamento = []
self.matriz_status = np.zeros([tamanho_matriz,tamanho_matriz], dtype= int)
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
self.matriz_individuos = pd.DataFrame(columns=range(tamanho_matriz), index=range(tamanho_matriz))
self.matriz_individuos.loc[:] = self.fabrica_individuo.criar_individuo(Individuo.SADIO,(0,0))
self.matriz_status[:] = Individuo.SADIO
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.popular(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(index = [0])
self.salvar_posicionamento()
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice[0], indice[1])
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for vizinho in lista_vizinhos:
x = vizinho[0]
y = vizinho[1]
#verificação de SADIO
if self.matriz_status[x,y] == Individuo.SADIO:
#verificação do novo status
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].infectar()
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append((x,y))
#modifica o status do objeto recém infectado
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_1
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append((x,y))
#modifica o status do objeto recém infectado
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_2
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
lista_mortos = []
for indice in lista_infectantes_tipo2:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_morte()
if novo_status == Individuo.MORTO:
lista_mortos.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.MORTO
return lista_mortos
def verificar_cura(self, lista_infectantes):
lista_curados = []
for indice in lista_infectantes:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_cura()
if novo_status == Individuo.CURADO:
lista_curados.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.CURADO
return lista_curados
def iterar(self):
#Verifica os novos infectados a partir dos atuais infectantes na matriz
lista_novos_infectados_tipo1_1, lista_novos_infectados_tipo2_1 = self.verificar_infeccao(self.individuos_infectados_tipo_1)
lista_novos_infectados_tipo1_2, lista_novos_infectados_tipo2_2 = self.verificar_infeccao(self.individuos_infectados_tipo_2)
#Verifica morte dos tipo 2
lista_mortos = self.verificar_morte(self.individuos_infectados_tipo_2)
#retirar os mortos da atualização da lista de infectados tipo 2
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_mortos]
#adiciona os novos mortos na lista geral de mortos
self.individuos_mortos = self.individuos_mortos + lista_mortos
#Verificar cura
lista_curados_tipo1 = self.verificar_cura(self.individuos_infectados_tipo_1)
lista_curados_tipo2 = self.verificar_cura(self.individuos_infectados_tipo_2)
#retirar os curados das lista de infectados tipo 1 e 2
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_curados_tipo2]
self.individuos_infectados_tipo_1 = [i for i in self.individuos_infectados_tipo_1 if i not in lista_curados_tipo1]
#adiciona os novos curados na lista geral de curados
self.individuos_curados = self.individuos_curados + lista_curados_tipo1 + lista_curados_tipo2
# self. #movimentar infectantes:
for i in range(len(self.individuos_infectados_tipo_1)):
self.individuos_infectados_tipo_1[i] = self.mover_infectante(self.individuos_infectados_tipo_1[i])
for i in range(len(self.individuos_infectados_tipo_2)):
self.individuos_infectados_tipo_2[i] = self.mover_infectante(self.individuos_infectados_tipo_2[i])
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.individuos_infectados_tipo_2 = self.individuos_infectados_tipo_2 + lista_novos_infectados_tipo2_1 + lista_novos_infectados_tipo2_2
self.individuos_infectados_tipo_1 = self.individuos_infectados_tipo_1 + lista_novos_infectados_tipo1_1 + lista_novos_infectados_tipo1_2
#salva os resultados da atualização no dataframe:
num_mortos = len(self.individuos_mortos)
num_curados = len(self.individuos_curados)
num_tipo_1 = len(self.individuos_infectados_tipo_1)
num_tipo_2 = len(self.individuos_infectados_tipo_2)
dict = {
'num_sadios':self.populacao_inicial - num_mortos - num_curados - num_tipo_1 - num_tipo_2 ,
'num_infect_t1':num_tipo_1,
'num_infect_t2':num_tipo_2,
'num_curados':num_curados,
'num_mortos':num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
#salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_2.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
def mover_infectante(self, indice):
pos_x, pos_y = indice[0], indice[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
novo_x, novo_y = self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
#troca os valores no dataframe
aux = self.matriz_individuos.loc[novo_x, novo_y]
self.matriz_individuos.loc[novo_x, novo_y] = self.matriz_individuos.loc[pos_x, pos_y]
self.matriz_individuos.loc[pos_x, pos_y] = aux
#troca os valores na matriz de status
aux = self.matriz_status[novo_x, novo_y]
self.matriz_status[novo_x, novo_y] = self.matriz_status[pos_x, pos_y]
self.matriz_status[pos_x, pos_y] = aux
return (novo_x, novo_y)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.
percentual_inicial_tipo2 = 0.
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.individuos_infectados_tipo_2)
#print(sim.individuos_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'yellow', 'red'])
for i in range(10):
plt.matshow(sim.lista_matrizes_posicionamento[1], cmap = cmap)
sim.iterar()
plt.show()
# plt.matshow(sim.lista_matrizes_posicionamento[1], cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[2], cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[3], cmap = cmap)
| [
"[email protected]"
] | |
1eb48a906c41d240228e260d96f74a91e308d423 | 2afb1095de2b03b05c8b96f98f38ddeca889fbff | /web_scrapping/try_steam_parse.py | f76504a9eb079147e18d2455c67b424ba847329a | [] | no_license | draganmoo/trypython | 187316f8823296b12e1df60ef92c54b7a04aa3e7 | 90cb0fc8626e333c6ea430e32aa21af7d189d975 | refs/heads/master | 2023-09-03T16:24:33.548172 | 2021-11-04T21:21:12 | 2021-11-04T21:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | import glob
import pandas as pd
from bs4 import BeautifulSoup
df = pd.DataFrame()
df1 = pd.DataFrame()
df2 = pd.DataFrame()
for one_file in glob.glob("steam_html_file/*.html"):
f = open(one_file, "r", encoding= "utf-8")
soup = BeautifulSoup(f.read(),"html.parser")
items = soup.find("div", id = "search_result_container")
for item in items.find_all("a"):
try:
###因价格修改统一标签下存在多个子标签,打乱整体标签排列结构。
price_change = item.find("div",class_="col search_price discounted responsive_secondrow")
if not price_change:
###价格无变动的游戏的价格
original_price = item.find("div", class_="col search_price_discount_combined responsive_secondrow").get_text().strip()
else:
###注意,如果发现有价格变动时,虽然不去,但是也要输出空值占位置,为后面合并数据做准备!
original_price = ""
df1 = df1.append({
"3.original price": original_price
}, ignore_index=True)
if price_change:
##价格有变动的游戏现在的价格
changed_price = price_change.get_text().strip()
else:
changed_price = ""
df2 = df2.append({
"4.changed price":changed_price
},ignore_index=True)
# print(changed_price)
###价格信息提取完成
######???待寻找如何将变动后价格拼接到没有价格变动的那一列中,查寻方向:合并多个df时如何填补同一列中的空缺值
name = item.find("div", class_="col search_name ellipsis").find("span").get_text().strip()
release_time = item.find("div", class_="col search_released responsive_secondrow").get_text().strip()
df = df.append({
"1.name": name,
"2.release_time":release_time,
},ignore_index=True)
except:
pass
df2 = df1.join(df2)
df = df.join(df2)
print (df)
df.to_csv("steam_html_file/steam_fps_game.csv", encoding="utf-8-sig")
####
| [
"[email protected]"
] | |
c6b5593b63f105914856900aebbc5be8af1a513d | 7e90a1f8280618b97729d0b49b80c6814d0466e2 | /workspace_pc/catkin_ws/cartographer_ws/build_isolated/jackal_navigation/catkin_generated/generate_cached_setup.py | 4daf5fb9eb5f1b0d60987d0c7e079fc39de7971f | [] | no_license | IreneYIN7/Map-Tracer | 91909f4649a8b65afed56ae3803f0c0602dd89ff | cbbe9acf067757116ec74c3aebdd672fd3df62ed | refs/heads/master | 2022-04-02T09:53:15.650365 | 2019-12-19T07:31:31 | 2019-12-19T07:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/gse5/catkin_ws/cartographer_ws/install_isolated;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/gse5/catkin_ws/cartographer_ws/devel_isolated/jackal_navigation/env.sh')
output_filename = '/home/gse5/catkin_ws/cartographer_ws/build_isolated/jackal_navigation/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
44b8b15712428540ec8bb8881ed03e41fb5bbabc | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2167.py | 0d743940a6f806b63db35149af9e8542d6728277 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,035 | py | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.5686282702997527,input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=18
prog.h(input_qubit[3]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=21
prog.rx(0.4241150082346221,input_qubit[2]) # number=33
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=23
prog.cz(input_qubit[1],input_qubit[2]) # number=24
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[2],input_qubit[0]) # number=29
prog.z(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=14
prog.y(input_qubit[0]) # number=15
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2167.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
a24704b833d23a859af9ec1629f1226377f8c7ea | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /bill_ws/build/bill_description/catkin_generated/pkg.develspace.context.pc.py | d846aac1c29db5d24fa24af24e101a5ae58bdccd | [] | no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "bill_description"
PROJECT_SPACE_DIR = "/home/bill/bill_ros/bill_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
96a2513a19ec5ef5b4cef589ef45c1624ee248cb | 117f066c80f3863ebef74463292bca6444f9758a | /data_pulling/crypto/do.py | 33e917485159521a9e506bfcba5606efbf76ad82 | [] | no_license | cottrell/notebooks | c6de3842cbaeb71457d270cbe6fabc8695a6ee1b | 9eaf3d0500067fccb294d064ab78d7aaa03e8b4d | refs/heads/master | 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 | Python | UTF-8 | Python | false | false | 1,908 | py | import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import os
import glob
import inspect
def get_pandas_read_csv_defaults():
# probably fragile
i = inspect.getfullargspec(pd.read_csv)
v = i.defaults
k = i.args[-len(v):]
kwargs = dict(zip(k, v))
return kwargs
_mydir = os.path.dirname(os.path.realpath('__file__'))
def load_raw():
# note manually removed some bad row
kwargs = get_pandas_read_csv_defaults()
kwargs['thousands'] = ',' # always do this
kwargs['parse_dates'] = ['Date']
kwargs['na_values'] = ['-']
kwargs['dtype'] = 'str'
dtype = {
'Close': 'float',
'High': 'float',
'Low': 'float',
'Market Cap': 'float',
'Open': 'float',
'Volume': 'float'
}
meta = pd.read_csv(os.path.join(_mydir, 'Top100Cryptos/data/100 List.csv'))
names = meta.Name.tolist()
files = [os.path.join(_mydir, 'Top100Cryptos/data/{}.csv'.format(x)) for x in names]
# files = glob.glob(os.path.join(_mydir, 'Top100Cryptos/data/*.csv'))
dfs = list()
datadir = os.path.join(_mydir, 'parsed')
if not os.path.exists(datadir):
os.makedirs(datadir)
for i, (name, f) in enumerate(zip(names, files)):
mtime = os.path.getmtime(f)
dirname = os.path.join(datadir, 'name={}/mtime={}'.format(name, mtime))
filename = os.path.join(dirname, 'data.parquet')
if not os.path.exists(filename):
df = pd.read_csv(f, **kwargs)
df = pa.Table.from_pandas(df)
if not os.path.exists(dirname):
os.makedirs(dirname)
print('writing {}'.format(filename))
pq.write_table(df, filename)
pq.read_table('./parsed') # test
else:
print('{} exists'.format(filename))
return pq.read_table('./parsed') # test
# id big ups big downs
df = load_raw()
df = df.sort_values('Date')
| [
"[email protected]"
] | |
7c4ca5b5dfae96a3696b405eff6c615b26b86332 | 4c873560c66ce3b84268ad2abcd1ffcada32e458 | /examples/scripts/csc/gwnden_clr.py | d382225550d6ce93e6a690716a2697b9e384579a | [
"BSD-3-Clause"
] | permissive | wangjinjia1/sporco | d21bf6174365acce614248fcd2f24b72d5a5b07f | c6363b206fba6f440dd18de7a17dadeb47940911 | refs/heads/master | 2023-04-02T01:10:02.905490 | 2021-03-29T14:20:57 | 2021-03-29T14:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Gaussian White Noise Restoration via CSC
========================================
This example demonstrates the removal of Gaussian white noise from a colour image using convolutional sparse coding :cite:`wohlberg-2016-convolutional`,
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \sum_c \left\| \sum_m \mathbf{d}_{m} * \mathbf{x}_{c,m} -\mathbf{s}_c \right\|_2^2 + \lambda \sum_m \| \mathbf{x}_m \|_1 + \mu \| \{ \mathbf{x}_{c,m} \} \|_{2,1}$$
where $\mathbf{d}_m$ is the $m^{\text{th}}$ dictionary filter, $\mathbf{x}_{c,m}$ is the coefficient map corresponding to the $c^{\text{th}}$ colour band and $m^{\text{th}}$ dictionary filter, and $\mathbf{s}_c$ is colour band $c$ of the input image.
"""
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco import util
from sporco import signal
from sporco import fft
from sporco import metric
from sporco import plot
from sporco.cupy import (cupy_enabled, np2cp, cp2np, select_device_by_load,
gpu_info)
from sporco.cupy.admm import cbpdn
"""
Boundary artifacts are handled by performing a symmetric extension on the image to be denoised and then cropping the result to the original image support. This approach is simpler than the boundary handling strategies that involve the insertion of a spatial mask into the data fidelity term, and for many problems gives results of comparable quality. The functions defined here implement symmetric extension and cropping of images.
"""
def pad(x, n=8):
if x.ndim == 2:
return np.pad(x, n, mode='symmetric')
else:
return np.pad(x, ((n, n), (n, n), (0, 0)), mode='symmetric')
def crop(x, n=8):
return x[n:-n, n:-n]
"""
Load a reference image and corrupt it with Gaussian white noise with $\sigma = 0.1$. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""
img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True,
idxexp=np.s_[:, 160:672])
np.random.seed(12345)
imgn = img + np.random.normal(0.0, 0.1, img.shape).astype(np.float32)
"""
Highpass filter test image.
"""
npd = 16
fltlmbd = 5.0
imgnl, imgnh = signal.tikhonov_filter(imgn, fltlmbd, npd)
"""
Load dictionary.
"""
D = util.convdicts()['G:8x8x128']
"""
Set solver options. See Section 8 of :cite:`wohlberg-2017-convolutional2` for details of construction of $\ell_1$ weighting matrix $W$.
"""
imgnpl, imgnph = signal.tikhonov_filter(pad(imgn), fltlmbd, npd)
W = fft.irfftn(np.conj(fft.rfftn(D[..., np.newaxis, :], imgnph.shape[0:2],
(0, 1))) * fft.rfftn(imgnph[..., np.newaxis], None, (0, 1)),
imgnph.shape[0:2], (0, 1))
W = 1.0/(np.maximum(np.abs(W), 1e-8))
lmbda = 1.5e-2
mu = 2.7e-1
opt = cbpdn.ConvBPDNJoint.Options({'Verbose': True, 'MaxMainIter': 250,
'HighMemSolve': True, 'RelStopTol': 3e-3, 'AuxVarObj': False,
'L1Weight': cp2np(W), 'AutoRho': {'Enabled': False},
'rho': 1e3*lmbda})
"""
Initialise a ``sporco.cupy`` version of a :class:`.admm.cbpdn.ConvBPDNJoint` object and call the ``solve`` method.
"""
if not cupy_enabled():
print('CuPy/GPU device not available: running without GPU acceleration\n')
else:
id = select_device_by_load()
info = gpu_info()
if info:
print('Running on GPU %d (%s)\n' % (id, info[id].name))
b = cbpdn.ConvBPDNJoint(np2cp(D), np2cp(pad(imgnh)), lmbda, mu, opt, dimK=0)
X = cp2np(b.solve())
"""
The denoised estimate of the image is just the reconstruction from the coefficient maps.
"""
imgdp = cp2np(b.reconstruct().squeeze())
imgd = np.clip(crop(imgdp) + imgnl, 0, 1)
"""
Display solve time and denoising performance.
"""
print("ConvBPDNJoint solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgd))
"""
Display the reference, noisy, and denoised images.
"""
fig = plot.figure(figsize=(21, 7))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Noisy', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgd, title='CSC Result', fig=fig)
fig.show()
"""
Plot functional evolution during ADMM iterations.
"""
its = b.getitstat()
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional')
"""
Plot evolution of ADMM residuals and ADMM penalty parameter.
"""
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'])
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter')
# Wait for enter on keyboard
input()
| [
"[email protected]"
] | |
d038c06c6c4f20653a17f5fb33b4d16d637fb9be | 66acbd1f601e00f311c53a9ce0659e5b56c87fef | /pre_analysis/observable_analysis/topc4mcintervalanalyser.py | 508e973104b53d9e4245701856db48d1f55c9b6c | [
"MIT"
] | permissive | hmvege/LatticeAnalyser | fad3d832190f4903642a588ed018f6cca3858193 | 6c3e69ab7af893f23934d1c3ce8355ac7514c0fe | refs/heads/master | 2021-05-25T11:46:30.278709 | 2019-04-11T14:14:23 | 2019-04-11T14:14:23 | 127,303,453 | 0 | 1 | null | 2018-10-12T21:09:58 | 2018-03-29T14:29:14 | Python | UTF-8 | Python | false | false | 601 | py | from pre_analysis.core.flowanalyser import FlowAnalyser
class Topc4MCIntervalAnalyser(FlowAnalyser):
"""Class for topological charge with quartic topological charge."""
observable_name = r"$\langle Q^4 \rangle$"
observable_name_compact = "topc4MC"
x_label = r"$\sqrt{8t_{f}}$ [fm]"
y_label = r"$\langle Q^4 \rangle$"
def __init__(self, *args, **kwargs):
super(Topc4MCIntervalAnalyser, self).__init__(*args, **kwargs)
self.y **= 4
def main():
exit("Module Topc4MCIntervalAnalyser not intended for standalone usage.")
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
37b0f73442e6b0db42d0419136e19faef5f2f973 | d272b041f84bbd18fd65a48b42e0158ef6cceb20 | /catch/datasets/tacaribe_mammarenavirus.py | 7f62021e43d0f87c81e26077042b3721995eee6d | [
"MIT"
] | permissive | jahanshah/catch | bbffeadd4113251cc2b2ec9893e3d014608896ce | 2fedca15f921116f580de8b2ae7ac9972932e59e | refs/heads/master | 2023-02-19T13:30:13.677960 | 2021-01-26T03:41:10 | 2021-01-26T03:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | """Dataset with 'Tacaribe mammarenavirus' sequences.
A dataset with 5 'Tacaribe mammarenavirus' sequences. The virus is
segmented and has 2 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 4 genomes. Many genomes
may have fewer than 2 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetMultiChrom
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (L|S)\]')
m = c.search(header)
if not m:
raise Exception("Unknown or invalid segment in header %s" % header)
seg = m.group(1)
return "segment_" + seg
def seq_header_to_genome(header):
import re
c = re.compile(r'\[genome (.+)\]')
m = c.search(header)
if not m:
raise Exception("Unknown genome in header %s" % header)
return m.group(1)
chrs = ["segment_" + seg for seg in ['L', 'S']]
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr,
seq_header_to_genome=seq_header_to_genome)
ds.add_fasta_path("data/tacaribe_mammarenavirus.fasta.gz", relative=True)
sys.modules[__name__] = ds
| [
"[email protected]"
] | |
47b0fad3467437ec0622fddde5ff65dbed7f685e | a306e621d15d6287f75c8e4f22329da810408605 | /tests/test_distance.py | 2500daf3774b7e965c5bd7d243e4f01b24e8e026 | [
"MIT"
] | permissive | moble/quaternionic | c6175a8e5ff57fbb9d2f2462bc761368f3b4fa66 | 074b626d0c63aa78479ff04ed41638931ca6693a | refs/heads/main | 2023-06-08T08:21:46.827232 | 2023-02-07T17:36:31 | 2023-02-07T17:36:38 | 286,745,519 | 73 | 7 | MIT | 2023-05-27T12:19:43 | 2020-08-11T13:00:26 | Python | UTF-8 | Python | false | false | 3,116 | py | import warnings
import numpy as np
import quaternionic
import pytest
@pytest.mark.parametrize("rotor,rotation,slow", [ # pragma: no branch
(quaternionic.distance.rotor, quaternionic.distance.rotation, True),
quaternionic.distance.CreateMetrics(lambda f: f, quaternionic.utilities.pyguvectorize) + (False,)
], ids=["jit metrics", "non-jit metrics"])
def test_metrics(Rs, array, rotor, rotation, slow):
metric_precision = 4.e-15
Rs = array(Rs.ndarray)
one = array(1, 0, 0, 0)
intrinsic_funcs = (rotor.intrinsic, rotation.intrinsic)
chordal_funcs = (rotor.chordal, rotation.chordal)
metric_funcs = intrinsic_funcs + chordal_funcs
rotor_funcs = (rotor.intrinsic, rotor.chordal)
rotation_funcs = (rotation.intrinsic, rotation.chordal)
distance_dict = {func: func(Rs, Rs[:, np.newaxis]) for func in metric_funcs}
# Check non-negativity
for mat in distance_dict.values():
assert np.all(mat >= 0.)
# Check discernibility
for func in metric_funcs:
if func in chordal_funcs:
eps = 0
else:
eps = 5.e-16
if func in rotor_funcs:
target = Rs != Rs[:, np.newaxis]
else:
target = np.logical_and(Rs != Rs[:, np.newaxis], Rs != - Rs[:, np.newaxis])
assert ((distance_dict[func] > eps) == target).all()
# Check symmetry
for mat in distance_dict.values():
assert np.allclose(mat, mat.T, atol=metric_precision, rtol=0)
# Check triangle inequality
for mat in distance_dict.values():
assert ((mat - metric_precision)[:, np.newaxis, :] <= mat[:, :, np.newaxis] + mat).all()
# Check distances from self or -self
for func in metric_funcs:
# All distances from self should be 0.0
if func in chordal_funcs:
eps = 0
else:
eps = 5.e-16
assert (np.diag(distance_dict[func]) <= eps).all()
# Chordal rotor distance from -self should be 2
assert (abs(rotor.chordal(Rs, -Rs) - 2.0) < metric_precision).all()
# Intrinsic rotor distance from -self should be 2pi
assert (abs(rotor.intrinsic(Rs, -Rs) - 2.0 * np.pi) < metric_precision).all()
# Rotation distances from -self should be 0
assert (rotation.chordal(Rs, -Rs) == 0.0).all()
assert (rotation.intrinsic(Rs, -Rs) < 5.e-16).all()
# We expect the chordal distance to be smaller than the intrinsic distance (or equal, if the distance is zero)
assert np.logical_or(rotor.chordal(one, Rs) < rotor.intrinsic(one, Rs), Rs == one).all()
if slow:
# Check invariance under overall rotations: d(R1, R2) = d(R3*R1, R3*R2) = d(R1*R3, R2*R3)
for func in rotor.chordal, rotation.intrinsic:
rotations = Rs[:, np.newaxis] * Rs
right_distances = func(rotations, rotations[:, np.newaxis])
assert (abs(distance_dict[func][:, :, np.newaxis] - right_distances) < metric_precision).all()
left_distances = func(rotations[:, :, np.newaxis], rotations[:, np.newaxis])
assert (abs(distance_dict[func] - left_distances) < metric_precision).all()
| [
"[email protected]"
] | |
58f3ad5187db0ba90a597d319ecd2fd4036de17e | fd74a044c0037796455ba4bd4fd44f11c3323599 | /Practice/ABC/Bcontest037_a.py | 25217aff10ee643818c607b06b0b3160e6edfb8b | [] | no_license | tegetege/tegetege_AtCoder | 5ac87e0a7a9acdd50d06227283aa7d95eebe2e2f | ba6c6472082e8255202f4f22a60953d0afe21591 | refs/heads/master | 2022-03-25T00:29:22.952078 | 2022-02-10T14:39:58 | 2022-02-10T14:39:58 | 193,516,879 | 0 | 0 | null | 2019-06-25T13:53:13 | 2019-06-24T14:02:05 | Python | UTF-8 | Python | false | false | 55 | py | A,B,C = map(int,input().split())
print(int(C/min(A,B))) | [
"[email protected]"
] | |
270c15670e030d0104c5c652e4fe7cb418d3d976 | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetOverviewRequest.py | 45fcc3ddbf67d6dc972d5537367c6ddd7257cc6e | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 1,556 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class GetOverviewRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'GetOverview','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region) | [
"[email protected]"
] | |
818fb09f8f5de94bdddf44acd471f366bfd04c70 | eb463217f001a8ff63243208dc2bb7e355793548 | /src/richie/plugins/section/migrations/0002_add_template_field.py | d3c83581c472881596481d657675e6d9f2744e84 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | phuoclhb/richie | 25020254b635c41648d65a30b3f2405007bd8a39 | 328167d02f9596c8b1d428655f0de1bed7fb277d | refs/heads/master | 2020-08-13T07:14:22.006472 | 2019-10-11T15:31:02 | 2019-10-11T15:58:48 | 214,930,515 | 1 | 0 | MIT | 2019-10-14T02:27:14 | 2019-10-14T02:27:13 | null | UTF-8 | Python | false | false | 631 | py | # Generated by Django 2.1.7 on 2019-02-22 01:57
from django.db import migrations, models
from ..defaults import SECTION_TEMPLATES
class Migration(migrations.Migration):
dependencies = [("section", "0001_initial")]
operations = [
migrations.AddField(
model_name="section",
name="template",
field=models.CharField(
choices=SECTION_TEMPLATES,
default=SECTION_TEMPLATES[0][0],
help_text="Optional template for custom look.",
max_length=150,
verbose_name="Template",
),
)
]
| [
"[email protected]"
] | |
d6758b1214e18affacc304004dfb23d732194dc0 | 07cf86733b110a13224ef91e94ea5862a8f5d0d5 | /taum_and_bday/taum_and_bday.py | 2dca00a8c687bce30c4615338d881eba6f673268 | [] | no_license | karsevar/Code_Challenge_Practice | 2d96964ed2601b3beb324d08dd3692c3d566b223 | 88d4587041a76cfd539c0698771420974ffaf60b | refs/heads/master | 2023-01-23T17:20:33.967020 | 2020-12-14T18:29:49 | 2020-12-14T18:29:49 | 261,813,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | def taumBday(b, w, bc, wc, z):
# Write your code here
# create a variable named black_cost
# create a variable named white_cost
# check if bc + z is less than wc:
# if so overwrite white_cost with b * (bc + z)
# overwrite black_cost with b * (bc)
# elif wc + z is less than bc:
# if so overwrite black_cost with w * (wc + z)
# overwrite white_cost with w * (wc)
# else
# overwrite black_cost with b * (bc + z)
# overwrite white_cost with w * (wc + z)
black_cost = 0
white_cost = 0
if (bc + z) < wc:
white_cost = w * (bc + z)
black_cost = b * bc
elif (wc + z) < bc:
white_cost = w * wc
black_cost = b * (wc + z)
else:
white_cost = w * wc
black_cost = b * bc
return white_cost + black_cost | [
"[email protected]"
] | |
1b0b4bc4e5b5b0bc77020ca601dd1f1dabbccc3a | 23e74e0d5bd42de514544917f7b33206e5acf84a | /alumnos/58003-Martin-Ruggeri/copia.py | eb8dc2774c2bf67e0fdfa336f443d85570aba882 | [] | no_license | Martin-Ruggeri-Bio/lab | 2e19015dae657bb9c9e86c55d8355a04db8f5804 | 9a1c1d8f99c90c28c3be62670a368838aa06988f | refs/heads/main | 2023-08-01T07:26:42.015115 | 2021-09-20T20:21:31 | 2021-09-20T20:21:31 | 350,102,381 | 0 | 0 | null | 2021-03-21T19:48:58 | 2021-03-21T19:48:58 | null | UTF-8 | Python | false | false | 279 | py | #!/bin/python3
def copiaArchivos():
archi_org = open(input("ingrese archivo de origen:\n"), "r")
archi_des = open(input("ingrese archivo de destino:\n"), "w")
with archi_org:
archi_des.write(archi_org.read())
if __name__ == '__main__':
copiaArchivos()
| [
"[email protected]"
] | |
85dfa9657bf5f1207e0b7cd837ff3661aa12b093 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/2. Add Two Numbers/solution1.py | 78df80bb3d9a1d82a8d444589710b5f138669603 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy_head = curr_head = ListNode()
p, q = l1, l2
carry = 0
while p or q:
if p:
carry += p.val
p = p.next
if q:
carry += q.val
q = q.next
curr_head.next = ListNode(carry % 10)
curr_head = curr_head.next
carry //= 10
if carry > 0:
curr_head.next = ListNode(carry)
return dummy_head.next
| [
"[email protected]"
] | |
2fd7e86a0345548fe89a360c898f938f9227bdb2 | 5b38dd549d29322ae07ad0cc68a28761989ef93a | /cc_lib/_util/_logger.py | fc66aac68804a599831a0405e5eaf400e78fd1cb | [
"Apache-2.0"
] | permissive | SENERGY-Platform/client-connector-lib | d54ea800807892600cf08d3b2a4f00e8340ab69c | e365fc4bed949e84cde81fd4b5268bb8d4f53c12 | refs/heads/master | 2022-09-03T00:03:29.656511 | 2022-08-24T11:18:22 | 2022-08-24T11:18:22 | 159,316,125 | 1 | 2 | Apache-2.0 | 2020-05-27T07:47:14 | 2018-11-27T10:15:38 | Python | UTF-8 | Python | false | false | 784 | py | """
Copyright 2019 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ('get_logger',)
import logging
logger = logging.getLogger('connector')
logger.propagate = False
def get_logger(name: str) -> logging.Logger:
return logger.getChild(name)
| [
"[email protected]"
] | |
74b4ed23694523deb7002963f183afb60094dad0 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /decoding/GAD/fairseq/modules/scalar_bias.py | c96247c75914fabb8a2b7ff731bb82b588f72690 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 888 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
| [
"[email protected]"
] | |
f5de930cd145d2474ed04f2b3d2d810ceba3e181 | f38db79439185ab6062294e1d82f6e909d2be81e | /test/test_update_timesheet_model.py | cbf81950c118167fd9c24c13db0647e8123c0e00 | [] | no_license | ContainerSolutions/avazacli | 3a37f8500ad1f1843acbdbb413d4949e00ec6f91 | 49618314f15d8cb2bda36e6019670fdfbed1524f | refs/heads/master | 2020-06-18T18:44:58.594385 | 2019-07-11T14:23:10 | 2019-07-11T14:23:10 | 196,406,206 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | # coding: utf-8
"""
Avaza API Documentation
Welcome to the autogenerated documentation & test tool for Avaza's API. <br/><br/><strong>API Security & Authentication</strong><br/>Authentication options include OAuth2 Implicit and Authorization Code flows, and Personal Access Token. All connections should be encrypted over SSL/TLS <br/><br/>You can set up and manage your api authentication credentials from within your Avaza account. (requires Administrator permissions on your Avaza account).<br/><br/> OAuth2 Authorization endpoint: https://any.avaza.com/oauth2/authorize <br/>OAuth2 Token endpoint: https://any.avaza.com/oauth2/token<br/>Base URL for subsequent API Requests: https://api.avaza.com/ <br/><br/>Blogpost about authenticating with Avaza's API: https://www.avaza.com/avaza-api-oauth2-authentication/ <br/>Blogpost on using Avaza's webhooks: https://www.avaza.com/avaza-api-webhook-notifications/<br/>The OAuth flow currently issues Access Tokens that last 1 day, and Refresh tokens that last 180 days<br/>The Api respects the security Roles assigned to the authenticating Avaza user and filters the data return appropriately. <br/><br><strong>Support</strong><br/>For API Support, and to request access please contact Avaza Support Team via our support chat. <br/><br/><strong>User Contributed Libraries:</strong><br/>Graciously contributed by 3rd party users like you. <br/>Note these are not tested or endorsesd by Avaza. We encourage you to review before use, and use at own risk.<br/> <ul><li> - <a target='blank' href='https://packagist.org/packages/debiprasad/oauth2-avaza'>PHP OAuth Client Package for Azava API (by Debiprasad Sahoo)</a></li></ul> # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import avazacli
from avazacli.models.update_timesheet_model import UpdateTimesheetModel # noqa: E501
from avazacli.rest import ApiException
class TestUpdateTimesheetModel(unittest.TestCase):
"""UpdateTimesheetModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateTimesheetModel(self):
"""Test UpdateTimesheetModel"""
# FIXME: construct object with mandatory attributes with example values
# model = avazacli.models.update_timesheet_model.UpdateTimesheetModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0726392c3e962800ab537f902236c9ddf78370f0 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/hinventory/account.py | 7b581f55213470c10ef7d664c6a48948edf9b960 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,184 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Account(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.hinventory.Account")
meta.moClassName = "hinventoryAccount"
meta.rnFormat = "account-[%(name)s]"
meta.category = MoCategory.REGULAR
meta.label = "AInventory"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x600c101
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.hinventory.Region")
meta.childClasses.add("cobra.model.hinventory.StaleResource")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.hcloud.RtSelectorToTagParent")
meta.childClasses.add("cobra.model.hinventory.ResourceGrp")
meta.childNamesAndRnPrefix.append(("cobra.model.hcloud.RtSelectorToTagParent", "rtselectorToTagParent-"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.ResourceGrp", "resourcegrp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.Region", "region-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.StaleResource", "stale"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.hinventory.Provider")
meta.superClasses.add("cobra.model.hcloud.AResource")
meta.superClasses.add("cobra.model.hinventory.AInventory")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.hcloud.ACloudBase")
meta.superClasses.add("cobra.model.hinventory.AAInventory")
meta.rnPrefixes = [
('account-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cloudName", "cloudName", 53279, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cloudName", prop)
prop = PropMeta("str", "cloudProviderId", "cloudProviderId", 54108, PropCategory.REGULAR)
prop.label = "Resource ID in Cloud Provider"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cloudProviderId", prop)
prop = PropMeta("str", "configDn", "configDn", 54120, PropCategory.REGULAR)
prop.label = "DN of object that created the resource"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("configDn", prop)
prop = PropMeta("str", "delegateDn", "delegateDn", 53375, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("delegateDn", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "isStale", "isStale", 54109, PropCategory.REGULAR)
prop.label = "Resource out-of-sync with current configuration"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("isStale", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 50766, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 50279, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "resolvedObjDn", "resolvedObjDn", 50280, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("resolvedObjDn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
getattr(meta.props, "name").needDelimiter = True
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
79331affbc571e2fd6380690621972ed904a93b2 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_11_15_5deg_FIP_db/check_db_symm_v3.py | 02760a80bd14513da9f994e3a337517bca50323a | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,154 | py | import numpy as np
import matplotlib.pyplot as plt
import euler_func as ef
import h5py
"""
check whether the database exhibits hexagonal-triclinic crystal
symmetry
first find 12 symmetric orientations in triclinic FZ
(0<=phi1<2*pi, 0<=Phi<=pi, 0<=phi2<2*pi)
for each deformation mode sample (theta), check if the value of
interest is the same for all symmetric orientations
"""
inc = 5 # degree increment for angular variables
np.random.seed() # generate seed for random
symhex = ef.symhex()
r2d = 180./np.pi
d2r = np.pi/180.
r2s = r2d/inc
n_th_max = 120/inc # number of theta samples in FOS
n_max = 360/inc # number of phi1, Phi and phi2 samples in FOS
n_hlf = 180/inc # half n_max
n_th = (60/inc)+1 # number of theta samples for FZ
n_p1 = 360/inc # number of phi1 samples for FZ
n_P = (90/inc)+1 # number of Phi samples for FZ
n_p2 = 60/inc # number of phi2 samples for FZ
print "angle space shape: %s" % str(np.array([n_th, n_p1, n_P, n_p2]))
# only look at last in series for value of interest
db = np.load("pre_fft.npy")[:n_th, ..., -1]
print "db shape: %s" % str(db.shape)
# n_FZ: total number of sampled orientations in FZ
n_FZ = n_p1*n_P*n_p2
# FZ_indx: vector of linear indices for sampled orientations in FZ
FZ_indx = np.arange(n_FZ)
print "FZ_indx shape: %s" % str(FZ_indx.shape)
# FZ_subs: array of subscripts of sampled orientations in FZ
FZ_subs = np.unravel_index(FZ_indx, (n_p1, n_P, n_p2))
FZ_subs = np.array(FZ_subs).transpose()
print "FZ_subs shape: %s" % str(FZ_subs.shape)
# FZ_euler: array of euler angles of sampled orientations in FZ
FZ_euler = np.float64(FZ_subs*inc*d2r)
# g: array of orientation matrices (sample to crystal frame rotation
# matrices) for orientations in fundamental zone
g = ef.bunge2g(FZ_euler[:, 0],
FZ_euler[:, 1],
FZ_euler[:, 2])
print "g shape: %s" % str(g.shape)
# FZ_euler_sym: array of euler angles of sampled orientations in
# FZ and their symmetric equivalents
FZ_euler_sym = np.zeros((12, n_FZ, 3))
# find the symmetric equivalents to the euler angle within the FZ
for sym in xrange(12):
op = symhex[sym, ...]
# g_sym: array of orientation matrices transformed with a
# hexagonal symmetry operator
g_sym = np.einsum('ik,...kj', op, g)
tmp = np.array(ef.g2bunge(g_sym)).transpose()
if sym == 0:
print "g_sym shape: %s" % str(g_sym.shape)
print "tmp shape: %s" % str(tmp.shape)
del g_sym
FZ_euler_sym[sym, ...] = tmp
del tmp
# convert euler angles to subscripts
FZ_subs_sym = np.int64(np.round(FZ_euler_sym*r2s))
# # make sure all of the euler angles within the appropriate
# # ranges (eg. not negative)
for ii in xrange(3):
lt = FZ_subs_sym[..., ii] < 0.0
FZ_subs_sym[..., ii] += n_max*lt
print np.sum(FZ_subs_sym < 0)
# determine the deviation from symmetry by finding the value of
# the function for symmetric locations and comparing these values
f = h5py.File('symm_check.hdf5', 'w')
error = f.create_dataset("error", (n_th, 12, n_FZ, 5))
for th in xrange(n_th):
for sym in xrange(12):
error[th, sym, :, 0:3] = FZ_subs_sym[sym, ...]*inc
origFZ = db[th,
FZ_subs_sym[0, :, 0],
FZ_subs_sym[0, :, 1],
FZ_subs_sym[0, :, 2]]
symFZ = db[th,
FZ_subs_sym[sym, :, 0],
FZ_subs_sym[sym, :, 1],
FZ_subs_sym[sym, :, 2]]
if th == 0 and sym == 0:
print "origFZ shape: %s" % str(origFZ.shape)
print "symFZ shape: %s" % str(symFZ.shape)
if th == 0:
print "operator number: %s" % sym
idcheck = np.all(FZ_euler_sym[0, ...] == FZ_euler_sym[sym, ...])
print "are Euler angles in different FZs identical?: %s" % str(idcheck)
orig_0sum = np.sum(origFZ == 0.0)
sym_0sum = np.sum(symFZ == 0.0)
if orig_0sum != 0 or sym_0sum != 0:
print "number of zero values in origFZ: %s" % orig_0sum
print "number of zero values in symFZ: %s" % sym_0sum
error[th, sym, :, 3] = symFZ
error[th, sym, :, 4] = np.abs(origFZ-symFZ)
error_sec = error[...]
f.close()
# perform error analysis
# generate random deformation mode and euler angle
# th_rand = np.int64(np.round((n_th-1)*np.random.rand()))
# g_rand = np.int64(np.round((n_FZ-1)*np.random.rand()))
badloc = np.argmax(error_sec[..., 4])
badloc = np.unravel_index(badloc, error_sec[..., 3].shape)
th_rand = badloc[0]
g_rand = badloc[2]
print "\nexample comparison:"
print "deformation mode: %s degrees" % str(np.float(th_rand*inc))
for sym in xrange(12):
print "operator number: %s" % sym
eul_rand = error_sec[th_rand, sym, g_rand, 0:3]
print "euler angles: %s (degrees)" % str(eul_rand)
val_rand = error_sec[th_rand, sym, g_rand, 3]
print "value of interest: %s" % str(val_rand)
errvec = error_sec[..., 4].reshape(error_sec[..., 4].size)
print "\noverall error metrics:"
print "mean database value: %s" % np.mean(db)
print "mean error: %s" % np.mean(errvec)
print "maximum error: %s" % np.max(errvec)
print "standard deviation of error: %s" % np.std(errvec)
print "total number of locations checked: %s" % (errvec.size)
err_count = np.sum(errvec != 0.0)
# plot the error histograms
error_indx = errvec != 0.0
print error_indx.shape
loc_hist = errvec[error_indx]
print loc_hist.shape
err_count = np.sum(loc_hist != 0.0)
print "number of locations with nonzero error: %s" % err_count
errvec_p1 = error_sec[..., 0].reshape(error_sec[..., 0].size)[error_indx]
plt.figure(num=4, figsize=[10, 6])
plt.hist(errvec_p1, 361)
errvec_P = error_sec[..., 1].reshape(error_sec[..., 1].size)[error_indx]
plt.figure(num=5, figsize=[10, 6])
plt.hist(errvec_P, 361)
errvec_p2 = error_sec[..., 0].reshape(error_sec[..., 0].size)[error_indx]
plt.figure(num=6, figsize=[10, 6])
plt.hist(errvec_p2, 361)
# plot the error histograms
plt.figure(num=1, figsize=[10, 6])
error_hist = error_sec[..., 4]
plt.hist(error_hist.reshape(error_hist.size), 100)
# plot the symmetric orientations in euler space
plt.figure(2)
plt.plot(np.array([0, 360, 360, 0, 0]), np.array([0, 0, 180, 180, 0]), 'k-')
plt.plot(np.array([0, 360]), np.array([90, 90]), 'k-')
plt.xlabel('$\phi_1$')
plt.ylabel('$\Phi$')
sc = 1.05
plt.axis([-(sc-1)*360, sc*360, -(sc-1)*180, sc*180])
plt.figure(3)
plt.plot(np.array([0, 180, 180, 0, 0]), np.array([0, 0, 360, 360, 0]), 'k-')
plt.plot(np.array([90, 90]), np.array([0, 360]), 'k-')
plt.plot(np.array([0, 180]), np.array([60, 60]), 'k-')
plt.plot(np.array([0, 180]), np.array([120, 120]), 'k-')
plt.plot(np.array([0, 180]), np.array([180, 180]), 'k-')
plt.plot(np.array([0, 180]), np.array([240, 240]), 'k-')
plt.plot(np.array([0, 180]), np.array([300, 300]), 'k-')
plt.xlabel('$\Phi$')
plt.ylabel('$\phi2$')
sc = 1.05
plt.axis([-(sc-1)*180, sc*180, -(sc-1)*360, sc*360])
eul_plt = error_sec[th_rand, :, g_rand, 0:3]
plt.figure(2)
plt.plot(eul_plt[:, 0], eul_plt[:, 1],
c='b', marker='o', linestyle='none')
plt.figure(3)
plt.plot(eul_plt[:, 1], eul_plt[:, 2],
c='b', marker='o', linestyle='none')
plt.show()
| [
"[email protected]"
] | |
61094d5d3babcb4ac784998ee52b573967471ac0 | 7fc22330d96b48a425894311441c4e83cb4d2447 | /code/snakeeyes/tests/__init__.py | e207e34b2b0db2f98b137a14327de8cf795330f9 | [] | no_license | tangentstorm/snakeeyes | 5c23791adfe4511a3a97a35d725d1b2769552000 | a036884e39fe7989e8101c7f96cae8d4f3c507ea | refs/heads/master | 2021-01-22T08:23:27.661057 | 2020-11-22T05:08:56 | 2020-11-22T05:08:56 | 10,516,815 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | """
Created on Aug 1, 2009
@author: michal
"""
import sys; sys.path.append("..") # for testloop.sh
import unittest
from snakeeyes.tests.img_test import *
from snakeeyes.tests.ocr_test import *
from snakeeyes.tests.scrape_test import *
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e551dbec93a15d5f8a6eb4b246b2a3a381e2691e | f62fd455e593a7ad203a5c268e23129473d968b6 | /tacker-0.7.0/tacker/db/vnfm/vnfm_db.py | e7ca3763bffe6461854f842cd873c008ae70d313 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 27,220 | py | # Copyright 2013, 2014 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from tacker.api.v1 import attributes
from tacker import context as t_context
from tacker.db.common_services import common_services_db
from tacker.db import db_base
from tacker.db import model_base
from tacker.db import models_v1
from tacker.db import types
from tacker.extensions import vnfm
from tacker import manager
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.ERROR, constants.DEAD)
CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
###########################################################################
# db tables
class VNFD(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents VNFD to create VNF."""
__tablename__ = 'vnfd'
# Descriptive name
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text)
# service type that this service vm provides.
# At first phase, this includes only single service
# In future, single service VM may accomodate multiple services.
service_types = orm.relationship('ServiceType', backref='vnfd')
# driver to communicate with service managment
mgmt_driver = sa.Column(sa.String(255))
# (key, value) pair to spin up
attributes = orm.relationship('VNFDAttribute',
backref='vnfd')
# vnfd template source - inline or onboarded
template_source = sa.Column(sa.String(255), server_default='onboarded')
class ServiceType(model_base.BASE, models_v1.HasId, models_v1.HasTenant):
"""Represents service type which hosting vnf provides.
Since a vnf may provide many services, This is one-to-many
relationship.
"""
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'),
nullable=False)
service_type = sa.Column(sa.String(64), nullable=False)
class VNFDAttribute(model_base.BASE, models_v1.HasId):
"""Represents attributes necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs.
The interpretation is up to actual driver of hosting vnf.
"""
__tablename__ = 'vnfd_attribute'
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.TEXT(65535), nullable=True)
class VNF(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents vnfs that hosts services.
Here the term, 'VM', is intentionally avoided because it can be
VM or other container.
"""
__tablename__ = 'vnf'
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'))
vnfd = orm.relationship('VNFD')
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text, nullable=True)
# sufficient information to uniquely identify hosting vnf.
# In case of openstack manager, it's UUID of heat stack.
instance_id = sa.Column(sa.String(64), nullable=True)
# For a management tool to talk to manage this hosting vnf.
# opaque string.
# e.g. (driver, mgmt_url) = (ssh, ip address), ...
mgmt_url = sa.Column(sa.String(255), nullable=True)
attributes = orm.relationship("VNFAttribute", backref="vnf")
status = sa.Column(sa.String(64), nullable=False)
vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False)
placement_attr = sa.Column(types.Json, nullable=True)
vim = orm.relationship('Vim')
error_reason = sa.Column(sa.Text, nullable=True)
class VNFAttribute(model_base.BASE, models_v1.HasId):
"""Represents kwargs necessary for spinning up VM in (key, value) pair.
key value pair is adopted for being agnostic to actuall manager of VMs.
The interpretation is up to actual driver of hosting vnf.
"""
__tablename__ = 'vnf_attribute'
vnf_id = sa.Column(types.Uuid, sa.ForeignKey('vnf.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
# json encoded value. example
# "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}]
value = sa.Column(sa.TEXT(65535), nullable=True)
class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
@property
def _core_plugin(self):
return manager.TackerManager.get_plugin()
def subnet_id_to_network_id(self, context, subnet_id):
subnet = self._core_plugin.get_subnet(context, subnet_id)
return subnet['network_id']
def __init__(self):
super(VNFMPluginDb, self).__init__()
self._cos_db_plg = common_services_db.CommonServicesPluginDb()
def _get_resource(self, context, model, id):
try:
if uuidutils.is_uuid_like(id):
return self._get_by_id(context, model, id)
return self._get_by_name(context, model, id)
except orm_exc.NoResultFound:
if issubclass(model, VNFD):
raise vnfm.VNFDNotFound(vnfd_id=id)
elif issubclass(model, ServiceType):
raise vnfm.ServiceTypeNotFound(service_type_id=id)
if issubclass(model, VNF):
raise vnfm.VNFNotFound(vnf_id=id)
else:
raise
def _make_attributes_dict(self, attributes_db):
return dict((attr.key, attr.value) for attr in attributes_db)
def _make_service_types_list(self, service_types):
return [service_type.service_type
for service_type in service_types]
def _make_vnfd_dict(self, vnfd, fields=None):
res = {
'attributes': self._make_attributes_dict(vnfd['attributes']),
'service_types': self._make_service_types_list(
vnfd.service_types)
}
key_list = ('id', 'tenant_id', 'name', 'description',
'mgmt_driver', 'created_at', 'updated_at',
'template_source')
res.update((key, vnfd[key]) for key in key_list)
return self._fields(res, fields)
def _make_dev_attrs_dict(self, dev_attrs_db):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_vnf_dict(self, vnf_db, fields=None):
LOG.debug(_('vnf_db %s'), vnf_db)
LOG.debug(_('vnf_db attributes %s'), vnf_db.attributes)
res = {
'vnfd':
self._make_vnfd_dict(vnf_db.vnfd),
'attributes': self._make_dev_attrs_dict(vnf_db.attributes),
}
key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id',
'vim_id', 'placement_attr', 'vnfd_id', 'status',
'mgmt_url', 'error_reason', 'created_at', 'updated_at')
res.update((key, vnf_db[key]) for key in key_list)
return self._fields(res, fields)
@staticmethod
def _mgmt_driver_name(vnf_dict):
return vnf_dict['vnfd']['mgmt_driver']
@staticmethod
def _instance_id(vnf_dict):
return vnf_dict['instance_id']
def create_vnfd(self, context, vnfd):
vnfd = vnfd['vnfd']
LOG.debug(_('vnfd %s'), vnfd)
tenant_id = self._get_tenant_id_for_create(context, vnfd)
service_types = vnfd.get('service_types')
mgmt_driver = vnfd.get('mgmt_driver')
template_source = vnfd.get("template_source")
if (not attributes.is_attr_set(service_types)):
LOG.debug(_('service types unspecified'))
raise vnfm.ServiceTypesNotSpecified()
with context.session.begin(subtransactions=True):
vnfd_id = str(uuid.uuid4())
vnfd_db = VNFD(
id=vnfd_id,
tenant_id=tenant_id,
name=vnfd.get('name'),
description=vnfd.get('description'),
mgmt_driver=mgmt_driver,
template_source=template_source)
context.session.add(vnfd_db)
for (key, value) in vnfd.get('attributes', {}).items():
attribute_db = VNFDAttribute(
id=str(uuid.uuid4()),
vnfd_id=vnfd_id,
key=key,
value=value)
context.session.add(attribute_db)
for service_type in (item['service_type']
for item in vnfd['service_types']):
service_type_db = ServiceType(
id=str(uuid.uuid4()),
tenant_id=tenant_id,
vnfd_id=vnfd_id,
service_type=service_type)
context.session.add(service_type_db)
LOG.debug(_('vnfd_db %(vnfd_db)s %(attributes)s '),
{'vnfd_db': vnfd_db,
'attributes': vnfd_db.attributes})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
LOG.debug(_('vnfd_dict %s'), vnfd_dict)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_ONBOARDED,
evt_type=constants.RES_EVT_CREATE,
tstamp=vnfd_dict[constants.RES_EVT_CREATED_FLD])
return vnfd_dict
def update_vnfd(self, context, vnfd_id,
vnfd):
with context.session.begin(subtransactions=True):
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
vnfd_db.update(vnfd['vnfd'])
vnfd_db.update({'updated_at': timeutils.utcnow()})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_NA_STATE,
evt_type=constants.RES_EVT_UPDATE,
tstamp=vnfd_dict[constants.RES_EVT_UPDATED_FLD])
return vnfd_dict
def delete_vnfd(self,
context,
vnfd_id,
soft_delete=True):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent from newly inserting hosting vnf
# that refers to this vnfd
vnfs_db = context.session.query(VNF).filter_by(
vnfd_id=vnfd_id).first()
if vnfs_db is not None and vnfs_db.deleted_at is None:
raise vnfm.VNFDInUse(vnfd_id=vnfd_id)
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
if soft_delete:
vnfd_db.update({'deleted_at': timeutils.utcnow()})
self._cos_db_plg.create_event(
context, res_id=vnfd_db['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_NA_STATE,
evt_type=constants.RES_EVT_DELETE,
tstamp=vnfd_db[constants.RES_EVT_DELETED_FLD])
else:
context.session.query(ServiceType).filter_by(
vnfd_id=vnfd_id).delete()
context.session.query(VNFDAttribute).filter_by(
vnfd_id=vnfd_id).delete()
context.session.delete(vnfd_db)
def get_vnfd(self, context, vnfd_id, fields=None):
vnfd_db = self._get_resource(context, VNFD, vnfd_id)
return self._make_vnfd_dict(vnfd_db)
def get_vnfds(self, context, filters, fields=None):
if 'template_source' in filters and \
filters['template_source'][0] == 'all':
filters.pop('template_source')
return self._get_collection(context, VNFD,
self._make_vnfd_dict,
filters=filters, fields=fields)
def choose_vnfd(self, context, service_type,
required_attributes=None):
required_attributes = required_attributes or []
LOG.debug(_('required_attributes %s'), required_attributes)
with context.session.begin(subtransactions=True):
query = (
context.session.query(VNFD).
filter(
sa.exists().
where(sa.and_(
VNFD.id == ServiceType.vnfd_id,
ServiceType.service_type == service_type))))
for key in required_attributes:
query = query.filter(
sa.exists().
where(sa.and_(
VNFD.id ==
VNFDAttribute.vnfd_id,
VNFDAttribute.key == key)))
LOG.debug(_('statements %s'), query)
vnfd_db = query.first()
if vnfd_db:
return self._make_vnfd_dict(vnfd_db)
def _vnf_attribute_update_or_create(
self, context, vnf_id, key, value):
arg = (self._model_query(context, VNFAttribute).
filter(VNFAttribute.vnf_id == vnf_id).
filter(VNFAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = VNFAttribute(
id=str(uuid.uuid4()), vnf_id=vnf_id,
key=key, value=value)
context.session.add(arg)
# called internally, not by REST API
def _create_vnf_pre(self, context, vnf):
LOG.debug(_('vnf %s'), vnf)
tenant_id = self._get_tenant_id_for_create(context, vnf)
vnfd_id = vnf['vnfd_id']
name = vnf.get('name')
vnf_id = str(uuid.uuid4())
attributes = vnf.get('attributes', {})
vim_id = vnf.get('vim_id')
placement_attr = vnf.get('placement_attr', {})
with context.session.begin(subtransactions=True):
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
vnf_db = VNF(id=vnf_id,
tenant_id=tenant_id,
name=name,
description=vnfd_db.description,
instance_id=None,
vnfd_id=vnfd_id,
vim_id=vim_id,
placement_attr=placement_attr,
status=constants.PENDING_CREATE,
error_reason=None)
context.session.add(vnf_db)
for key, value in attributes.items():
arg = VNFAttribute(
id=str(uuid.uuid4()), vnf_id=vnf_id,
key=key, value=value)
context.session.add(arg)
evt_details = "VNF UUID assigned."
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.PENDING_CREATE,
evt_type=constants.RES_EVT_CREATE,
tstamp=vnf_db[constants.RES_EVT_CREATED_FLD],
details=evt_details)
return self._make_vnf_dict(vnf_db)
# called internally, not by REST API
# intsance_id = None means error on creation
def _create_vnf_post(self, context, vnf_id, instance_id,
mgmt_url, vnf_dict):
LOG.debug(_('vnf_dict %s'), vnf_dict)
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(CREATE_STATES)).
one())
query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
if instance_id is None or vnf_dict['status'] == constants.ERROR:
query.update({'status': constants.ERROR})
for (key, value) in vnf_dict['attributes'].items():
# do not store decrypted vim auth in vnf attr table
if 'vim_auth' not in key:
self._vnf_attribute_update_or_create(context, vnf_id,
key, value)
evt_details = ("Infra Instance ID created: %s and "
"Mgmt URL set: %s") % (instance_id, mgmt_url)
self._cos_db_plg.create_event(
context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(), details=evt_details)
def _create_vnf_status(self, context, vnf_id, new_status):
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(CREATE_STATES)).one())
query.update({'status': new_status})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(), details="VNF creation completed")
def _get_vnf_db(self, context, vnf_id, current_statuses, new_status):
try:
vnf_db = (
self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(current_statuses)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
raise vnfm.VNFNotFound(vnf_id=vnf_id)
if vnf_db.status == constants.PENDING_UPDATE:
raise vnfm.VNFInUse(vnf_id=vnf_id)
vnf_db.update({'status': new_status})
return vnf_db
def _update_vnf_scaling_status(self,
context,
policy,
previous_statuses,
status,
mgmt_url=None):
with context.session.begin(subtransactions=True):
vnf_db = self._get_vnf_db(
context, policy['vnf']['id'], previous_statuses, status)
if mgmt_url:
vnf_db.update({'mgmt_url': mgmt_url})
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=updated_vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_SCALE,
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_pre(self, context, vnf_id):
with context.session.begin(subtransactions=True):
vnf_db = self._get_vnf_db(
context, vnf_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_post(self, context, vnf_id, new_status,
new_vnf_dict=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_UPDATE).
update({'status': new_status,
'updated_at': timeutils.utcnow()}))
dev_attrs = new_vnf_dict.get('attributes', {})
(context.session.query(VNFAttribute).
filter(VNFAttribute.vnf_id == vnf_id).
filter(~VNFAttribute.key.in_(dev_attrs.keys())).
delete(synchronize_session='fetch'))
for (key, value) in dev_attrs.items():
if 'vim_auth' not in key:
self._vnf_attribute_update_or_create(context, vnf_id,
key, value)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=new_vnf_dict[constants.RES_EVT_UPDATED_FLD])
def _delete_vnf_pre(self, context, vnf_id):
with context.session.begin(subtransactions=True):
vnf_db = self._get_vnf_db(
context, vnf_id, _ACTIVE_UPDATE_ERROR_DEAD,
constants.PENDING_DELETE)
deleted_vnf_db = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=deleted_vnf_db['status'],
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(), details="VNF delete initiated")
return deleted_vnf_db
def _delete_vnf_post(self, context, vnf_dict, error, soft_delete=True):
vnf_id = vnf_dict['id']
with context.session.begin(subtransactions=True):
query = (
self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_DELETE))
if error:
query.update({'status': constants.ERROR})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.ERROR,
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(),
details="VNF Delete ERROR")
else:
if soft_delete:
deleted_time_stamp = timeutils.utcnow()
query.update({'deleted_at': deleted_time_stamp})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.PENDING_DELETE,
evt_type=constants.RES_EVT_DELETE,
tstamp=deleted_time_stamp,
details="VNF Delete Complete")
else:
(self._model_query(context, VNFAttribute).
filter(VNFAttribute.vnf_id == vnf_id).delete())
query.delete()
# Delete corresponding vnfd
if vnf_dict['vnfd']['template_source'] == "inline":
self.delete_vnfd(context, vnf_dict["vnfd_id"])
# reference implementation. needs to be overrided by subclass
def create_vnf(self, context, vnf):
vnf_dict = self._create_vnf_pre(context, vnf)
# start actual creation of hosting vnf.
# Waiting for completion of creation should be done backgroundly
# by another thread if it takes a while.
instance_id = str(uuid.uuid4())
vnf_dict['instance_id'] = instance_id
self._create_vnf_post(context, vnf_dict['id'], instance_id, None,
vnf_dict)
self._create_vnf_status(context, vnf_dict['id'],
constants.ACTIVE)
return vnf_dict
# reference implementation. needs to be overrided by subclass
def update_vnf(self, context, vnf_id, vnf):
vnf_dict = self._update_vnf_pre(context, vnf_id)
# start actual update of hosting vnf
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while
self._update_vnf_post(context, vnf_id, constants.ACTIVE)
return vnf_dict
# reference implementation. needs to be overrided by subclass
def delete_vnf(self, context, vnf_id, soft_delete=True):
vnf_dict = self._delete_vnf_pre(context, vnf_id)
# start actual deletion of hosting vnf.
# Waiting for completion of deletion should be done backgroundly
# by another thread if it takes a while.
self._delete_vnf_post(context,
vnf_dict,
False,
soft_delete=soft_delete)
def get_vnf(self, context, vnf_id, fields=None):
vnf_db = self._get_resource(context, VNF, vnf_id)
return self._make_vnf_dict(vnf_db, fields)
def get_vnfs(self, context, filters=None, fields=None):
return self._get_collection(context, VNF, self._make_vnf_dict,
filters=filters, fields=fields)
def set_vnf_error_status_reason(self, context, vnf_id, new_reason):
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == vnf_id).
update({'error_reason': new_reason}))
def _mark_vnf_status(self, vnf_id, exclude_status, new_status):
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
try:
vnf_db = (
self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(~VNF.status.in_(exclude_status)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
LOG.warning(_('no vnf found %s'), vnf_id)
return False
vnf_db.update({'status': new_status})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow())
return True
def _mark_vnf_error(self, vnf_id):
return self._mark_vnf_status(
vnf_id, [constants.DEAD], constants.ERROR)
def _mark_vnf_dead(self, vnf_id):
exclude_status = [
constants.DOWN,
constants.PENDING_CREATE,
constants.PENDING_UPDATE,
constants.PENDING_DELETE,
constants.INACTIVE,
constants.ERROR]
return self._mark_vnf_status(
vnf_id, exclude_status, constants.DEAD)
| [
"[email protected]"
] | |
e33fe0145613768d16866c5fc41bc2560e783bf5 | 70bee1e4e770398ae7ad9323bd9ea06f279e2796 | /test/test_istio_authorization_policy_source.py | d06474312ad2007728f5c1f1dbe3e96ba1395147 | [] | no_license | hi-artem/twistlock-py | c84b420b1e582b3c4cf3631eb72dac6d659d4746 | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | refs/heads/main | 2023-07-18T07:57:57.705014 | 2021-08-22T04:36:33 | 2021-08-22T04:36:33 | 398,637,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.istio_authorization_policy_source import IstioAuthorizationPolicySource # noqa: E501
from openapi_client.rest import ApiException
class TestIstioAuthorizationPolicySource(unittest.TestCase):
"""IstioAuthorizationPolicySource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IstioAuthorizationPolicySource
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.istio_authorization_policy_source.IstioAuthorizationPolicySource() # noqa: E501
if include_optional :
return IstioAuthorizationPolicySource(
namespaces = [
''
],
principals = [
''
]
)
else :
return IstioAuthorizationPolicySource(
)
def testIstioAuthorizationPolicySource(self):
"""Test IstioAuthorizationPolicySource"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a9b556949473408521e5fae46b690dbc52cc4f55 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/pybites/bitesofpy-master/!201-300/204/test_pomodoro.py | ed5d098ac2af44caaaf4144782768d028d668cea | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,331 | py | from typing import Union
import pytest
from pomodoro import break_time, lunch_time, main, session, work_time
@pytest.mark.asyncio
async def test_break_time(capfd):
anno = break_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["loop"] == int
assert anno["return"] is None
delay = 0.0001
await break_time(delay, 1)
output = capfd.readouterr()[0].strip()
assert "[1]" in output
assert f"Time for a {int(delay/60)} min break!" in output
@pytest.mark.asyncio
async def test_lunch_time(capfd):
anno = lunch_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["return"] is None
delay = 0.06
await lunch_time(delay)
output = capfd.readouterr()[0].strip()
assert "Time for lunch!" in output
@pytest.mark.asyncio
async def test_work_time(capfd):
anno = work_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["return"] is None
delay = 0.0025
await work_time(delay, 3)
output = capfd.readouterr()[0].strip()
assert "[3]" in output
assert "Time to work!" in output
@pytest.mark.asyncio
async def test_session(capfd):
anno = session.__annotations__
assert anno["work_length"] == Union[int, float]
assert anno["short_break_length"] == Union[int, float]
assert anno["long_break_length"] == Union[int, float]
assert anno["return"] is None
await session(0.0025, 0.0005, 0.003)
output = capfd.readouterr()[0].strip()
assert "Time to work!" in output
assert "min break!" in output
assert "Time for lunch!" not in output
assert len(output.splitlines()) == 8
@pytest.mark.asyncio
async def test_main(capfd):
anno = main.__annotations__
assert anno["work_length"] == Union[int, float]
assert anno["short_break_length"] == Union[int, float]
assert anno["long_break_length"] == Union[int, float]
assert anno["lunch_length"] == Union[int, float]
assert anno["return"] is None
await main(0.0025, 0.0005, 0.003, 0.01)
output = capfd.readouterr()[0].strip()
assert "Pomodor timer started at" in output
assert "Time to work!" in output
assert "min break!" in output
assert "Time for lunch!" in output
assert "Work day completed at" in output
assert len(output.splitlines()) == 45 | [
"[email protected]"
] | |
98c7e860f3e9b72be38d65d6434b2f524d8aef87 | 28ec3ee4daab919ef005e5913498be3fb96b19a4 | /polyorg/tests.py | d4e1c78d9034a910e93000b9baa3e029a3b0b5b4 | [
"BSD-2-Clause"
] | permissive | danielbraun/open-shot | 2bd5b0af9c8b6c32bc0b244edfafa1a871e85972 | 5e7507b57912a047e460b32927412f43df154def | refs/heads/master | 2020-12-25T12:07:26.287583 | 2013-09-15T10:17:16 | 2013-09-15T10:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from models import Candidate, CandidateList
class CreationTest(TestCase):
def setUp(self):
self.linus = User.objects.create(username='Linus')
self.guido = User.objects.create(username='Guido')
self.jacob = User.objects.create(username='Jacob')
def test_candidatelist(self):
"""
Tests the creation of CandiateList and it's basic methods
"""
cl1 = CandidateList.objects.create(name="Imagine", ballot="I")
c = Candidate.objects.create(candidate_list=cl1, user=self.jacob, ordinal=1)
self.assertFalse(cl1.get_candidates())
c.status = 'V'
c.save()
self.assertEquals(cl1.get_candidates().count(), 1)
c.status = 'X'
c.save()
self.assertFalse(cl1.get_candidates())
cl1.delete()
def teardown(self):
for u in self.users: u.delete()
| [
"[email protected]"
] | |
49256118e79555242d05bc0d7a022c34619aa4ae | c86cd75be4f5b4eef605fb0f40743406ae19685f | /core/ui_test.py | cd1ce62099cf077a55dbf0934f3f6763c20bac3b | [
"Apache-2.0"
] | permissive | jyn514/oil | 3de53092c81e7f9129c9d12d51a8dfdbcacd397b | 42adba6a1668ff30c6312a6ce3c3d1f1acd529ec | refs/heads/master | 2022-02-23T08:12:48.381272 | 2019-03-15T08:54:31 | 2019-03-15T08:54:31 | 176,316,917 | 0 | 0 | Apache-2.0 | 2019-03-18T15:36:14 | 2019-03-18T15:36:13 | null | UTF-8 | Python | false | false | 279 | py | #!/usr/bin/python -S
from __future__ import print_function
"""
ui_test.py: Tests for ui.py
"""
import unittest
from core import ui # module under test
class UiTest(unittest.TestCase):
def testFoo(self):
ui.usage('oops')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c78584df8800ee65609ce9f1b1ef2ac873ef1205 | e75c4f0b2c69620e84d180d52ca94f9f03a29e1f | /account/account_move_line.py | 63ebf436ba160d9c0132e354d64ac617909f64d7 | [] | no_license | Tecvemar/openerp-addons-6.0 | 1af14214c557a9a1525812d837fe1528d0f6029a | 357956e64b4c5189b799065cbbd028d8a99e0c74 | refs/heads/master | 2021-01-21T05:32:46.649690 | 2017-08-31T11:57:48 | 2017-08-31T11:57:48 | 101,927,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,198 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from operator import itemgetter
import netsvc
from osv import fields, osv
from tools.translate import _
import decimal_precision as dp
import tools
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
if context is None:
context = {}
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
# Find the old periods where date start of those periods less then Start period
periods = fiscalperiod_obj.search(cr, uid, [('date_start', '<', first_period.date_start)])
periods = ','.join([str(x) for x in periods])
if periods:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, periods, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.type in ('payable', 'receivable'):
#this function does not suport to be used on move lines not related to payable or receivable accounts
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
amt = (obj_line.credit or 0.0) - (obj_line.debit or 0.0)
vals_lines = {
'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': amt,
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid
}
acc_ana_line_obj.create(cr, uid, vals_lines)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context.update({
'period_id': ids[0]
})
return context
def _default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
# Starts: Manual entry from account.move form
if context.get('lines',[]):
total_new = 0.00
for line_record in context['lines']:
if not isinstance(line_record, (tuple, list)):
line_record_detail = self.read(cr, uid, line_record, ['analytic_account_id','debit','credit','name','reconcile_id','tax_code_id','tax_amount','account_id','ref','currency_id','date_maturity','amount_currency','partner_id', 'reconcile_partial_id'])
else:
line_record_detail = line_record[2]
total_new += (line_record_detail['debit'] or 0.00)- (line_record_detail['credit'] or 0.00)
for item in line_record_detail.keys():
data[item] = line_record_detail[item]
if context['journal']:
journal_data = journal_obj.browse(cr, uid, context['journal'], context=context)
if journal_data.type == 'purchase':
if total_new > 0:
account = journal_data.default_credit_account_id
else:
account = journal_data.default_debit_account_id
else:
if total_new > 0:
account = journal_data.default_credit_account_id
else:
account = journal_data.default_debit_account_id
if account and ((not fields) or ('debit' in fields) or ('credit' in fields)) and 'partner_id' in data and (data['partner_id']):
part = partner_obj.browse(cr, uid, data['partner_id'], context=context)
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account.id
s = -total_new
data['debit'] = s > 0 and s or 0.0
data['credit'] = s < 0 and -s or 0.0
data = self._default_get_move_form_hook(cr, uid, data)
return data
# Ends: Manual entry from account.move form
if not 'move_id' in fields: #we are not in manual entry
return data
# Compute the current move
move_id = False
partner_id = False
if context.get('journal_id', False) and context.get('period_id', False):
if 'move_id' in fields:
cr.execute('SELECT move_id \
FROM \
account_move_line \
WHERE \
journal_id = %s and period_id = %s AND create_uid = %s AND state = %s \
ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = (res and res[0]) or False
if not move_id:
return data
else:
data['move_id'] = move_id
if 'date' in fields:
cr.execute('SELECT date \
FROM \
account_move_line \
WHERE \
journal_id = %s AND period_id = %s AND create_uid = %s \
ORDER BY id DESC',
(context['journal_id'], context['period_id'], uid))
res = cr.fetchone()
if res:
data['date'] = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'],
context=context)
data['date'] = period.date_start
if not move_id:
return data
total = 0
ref_id = False
move = move_obj.browse(cr, uid, move_id, context=context)
if 'name' in fields:
data.setdefault('name', move.line_id[-1].name)
acc1 = False
for l in move.line_id:
acc1 = l.account_id
partner_id = partner_id or l.partner_id.id
ref_id = ref_id or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
if 'ref' in fields:
data['ref'] = ref_id
if 'partner_id' in fields:
data['partner_id'] = partner_id
if move.journal_id.type == 'purchase':
if total > 0:
account = move.journal_id.default_credit_account_id
else:
account = move.journal_id.default_debit_account_id
else:
if total > 0:
account = move.journal_id.default_credit_account_id
else:
account = move.journal_id.default_debit_account_id
part = partner_id and partner_obj.browse(cr, uid, partner_id) or False
# part = False is acceptable for fiscal position.
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
if account:
account = account_obj.browse(cr, uid, account, context=context)
if account and ((not fields) or ('debit' in fields) or ('credit' in fields)):
data['account_id'] = account.id
# Propose the price VAT excluded, the VAT will be added when confirming line
if account.tax_ids:
taxes = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, account.tax_ids)
tax = tax_obj.browse(cr, uid, taxes)
for t in tax_obj.compute_inv(cr, uid, tax, total, 1):
total -= t['amount']
s = -total
data['debit'] = s > 0 and s or 0.0
data['credit'] = s < 0 and -s or 0.0
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
acc = account
if s>0:
acc = acc1
compute_ctx = context.copy()
compute_ctx.update({
'res.currency.compute.account': acc,
'res.currency.compute.account_invert': True,
})
v = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], s, context=compute_ctx)
data['amount_currency'] = v
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l2.id, SUM(l1.debit-l1.credit)
FROM account_move_line l1, account_move_line l2
WHERE l2.account_id = l1.account_id
AND l1.id <= l2.id
AND l2.id IN %s AND """ + \
self._query_get(cr, uid, obj='l1', context=c) + \
" GROUP BY l2.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {False: ''}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = (invoice_id, invoice_names[invoice_id])
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
_columns = {
'name': fields.char('Name', size=64, required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'UoM'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Move', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Narration'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', size=64, store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, method=True, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, method=True, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'period_id': fields.many2one('account.period', 'Period', required=True, select=2),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, select=1),
'blocked': fields.boolean('Litigation', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, method=True, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Valid')], 'State', readonly=True,
help='When new move line is created the state will be \'Draft\'.\n* When all the payments are done it will be in \'Valid\' state.'),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, method=True, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
#TODO: remove this
#'amount_taxed':fields.float("Taxed Amount", digits_compute=dp.get_precision('Account')),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if ('journal_id' in context) and ('period_id' in context):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': lambda *a: time.strftime('%Y-%m-%d'),
'state': 'draft',
'currency_id': _get_currency,
'journal_id': lambda self, cr, uid, c: c.get('journal_id', c.get('journal',False)),
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': lambda self, cr, uid, c: c.get('period_id', False),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'view':
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
return False
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.journal_id.allow_date:
if not time.strptime(line.date[:10],'%Y-%m-%d') >= time.strptime(line.period_id.date_start, '%Y-%m-%d') or not time.strptime(line.date[:10], '%Y-%m-%d') <= time.strptime(line.period_id.date_stop, '%Y-%m-%d'):
return False
return True
_constraints = [
(_check_no_view, 'You can not create move line on view account.', ['account_id']),
(_check_no_closed, 'You can not create move line on closed account.', ['account_id']),
(_check_company_id, 'Company must be same for its related account and period.',['company_id'] ),
(_check_date, 'The date of your Journal Entry is not in the defined period!', ['date']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
part = partner_obj.browse(cr, uid, partner_id)
if part.property_payment_term:
res = payment_term_obj.compute(cr, uid, part.property_payment_term.id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if journal:
jt = journal_obj.browse(cr, uid, journal).type
#FIXME: Bank and cash journal are such a journal we can not assume a account based on this 2 journals
# Bank and cash journal can have a payment or receipt transaction, and in both type partner account
# will not be same id payment then payable, and if receipt then receivable
#if jt in ('sale', 'purchase_refund', 'bank', 'cash'):
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund', 'expense', 'bank', 'cash'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, val['account_id'])
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context and context.get('fiscalyear', False):
periods = self.pool.get('account.fiscalyear').browse(cr, uid, context.get('fiscalyear'), context=context).period_ids
period_ids = [period.id for period in periods]
args.append(('period_id', 'in', period_ids))
if context and context.get('periods', False):
args.append(('period_id', 'in', context.get('periods')))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.get_next_partner_only(cr, uid, offset, context)
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def get_next_partner_only(self, cr, uid, offset=0, context=None):
cr.execute(
"""
SELECT p.id
FROM res_partner p
RIGHT JOIN (
SELECT l.partner_id AS partner_id, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line l
LEFT JOIN account_account a ON (a.id = l.account_id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND (p.last_reconciliation_date IS NULL OR l.date > p.last_reconciliation_date)
AND l.state <> 'draft'
GROUP BY l.partner_id
) AS s ON (p.id = s.partner_id)
WHERE debit > 0 AND credit > 0
ORDER BY p.last_reconciliation_date LIMIT 1 OFFSET %s""", (offset, )
)
return cr.fetchone()
def reconcile_partial(self, cr, uid, ids, type='auto', context=None):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning !'), _('To reconcile the entries company should be the same for all entries'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
company_currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, company_currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return True
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning !'), _('To reconcile the entries company should be the same for all entries'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if (len(r) != 1) and not context.get('fy_closing', False):
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error'), _('Entry is already reconciled'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not context.get('fy_closing', False) and not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error'), _('Some entries are already reconciled !'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(not context.get('fy_closing', False) and account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning'), _('You have to provide an account for the write off entry !'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
wf_service = netsvc.LocalService("workflow")
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
wf_service.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and context and context.get('stop_reconcile', False):
partner_obj.write(cr, uid, [partner_id], {'last_reconciliation_date': time.strftime('%Y-%m-%d %H:%M:%S')})
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.search(cr, user, [('date_start','<=',date), ('date_stop','>=',date)])
if pids:
res.update({
'period_id':pids[0]
})
context.update({
'period_id':pids[0]
})
return {
'value':res,
'context':context,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
journal_pool = self.pool.get('account.journal')
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type != 'tree':
#Remove the toolbar from the form view
if view_type == 'form':
if result.get('toolbar', False):
result['toolbar']['action'] = []
#Restrict the list of journal view in search view
if view_type == 'search' and result['fields'].get('journal_id', False):
result['fields']['journal_id']['selection'] = journal_pool.name_search(cr, uid, '', [], context=context)
ctx = context.copy()
#we add the refunds journal in the selection field of journal
if context.get('journal_type', False) == 'sale':
ctx.update({'journal_type': 'sale_refund'})
result['fields']['journal_id']['selection'] += journal_pool.name_search(cr, uid, '', [], context=ctx)
elif context.get('journal_type', False) == 'purchase':
ctx.update({'journal_type': 'purchase_refund'})
result['fields']['journal_id']['selection'] += journal_pool.name_search(cr, uid, '', [], context=ctx)
return result
if context.get('view_mode', False):
return result
fld = []
fields = {}
flds = []
title = _("Accounting Entries") #self.view_header_get(cr, uid, view_id, view_type, context)
xml = '''<?xml version="1.0"?>\n<tree string="%s" editable="top" refresh="5" on_write="on_create_write" colors="red:state==\'draft\';black:state==\'valid\'">\n\t''' % (title)
ids = journal_pool.search(cr, uid, [])
journals = journal_pool.browse(cr, uid, ids, context=context)
all_journal = [None]
common_fields = {}
total = len(journals)
for journal in journals:
all_journal.append(journal.id)
for field in journal.view_id.columns_id:
if not field.field in fields:
fields[field.field] = [journal.id]
fld.append((field.field, field.sequence, field.name))
flds.append(field.field)
common_fields[field.field] = 1
else:
fields.get(field.field).append(journal.id)
common_fields[field.field] = common_fields[field.field] + 1
fld.append(('period_id', 3, _('Period')))
fld.append(('journal_id', 10, _('Journal')))
flds.append('period_id')
flds.append('journal_id')
fields['period_id'] = all_journal
fields['journal_id'] = all_journal
fld = sorted(fld, key=itemgetter(1))
widths = {
'statement_id': 50,
'state': 60,
'tax_code_id': 50,
'move_id': 40,
}
for field_it in fld:
field = field_it[0]
if common_fields.get(field) == total:
fields.get(field).append(None)
# if field=='state':
# state = 'colors="red:state==\'draft\'"'
attrs = []
if field == 'debit':
attrs.append('sum = "%s"' % _("Total debit"))
elif field == 'credit':
attrs.append('sum = "%s"' % _("Total credit"))
elif field == 'move_id':
attrs.append('required = "False"')
elif field == 'account_tax_id':
attrs.append('domain="[(\'parent_id\', \'=\' ,False)]"')
attrs.append("context=\"{'journal_id': journal_id}\"")
elif field == 'account_id' and journal.id:
attrs.append('domain="[(\'journal_id\', \'=\', journal_id),(\'type\',\'<>\',\'view\'), (\'type\',\'<>\',\'closed\')]" on_change="onchange_account_id(account_id, partner_id)"')
elif field == 'partner_id':
attrs.append('on_change="onchange_partner_id(move_id, partner_id, account_id, debit, credit, date, journal_id)"')
elif field == 'journal_id':
attrs.append("context=\"{'journal_id': journal_id}\"")
elif field == 'statement_id':
attrs.append("domain=\"[('state', '!=', 'confirm'),('journal_id.type', '=', 'bank')]\"")
elif field == 'date':
attrs.append('on_change="onchange_date(date)"')
elif field == 'analytic_account_id':
attrs.append('''groups="analytic.group_analytic_accounting"''') # Currently it is not working due to framework problem may be ..
if field in ('amount_currency', 'currency_id'):
attrs.append('on_change="onchange_currency(account_id, amount_currency, currency_id, date, journal_id)"')
attrs.append('''attrs="{'readonly': [('state', '=', 'valid')]}"''')
if field in widths:
attrs.append('width="'+str(widths[field])+'"')
if field in ('journal_id',):
attrs.append("invisible=\"context.get('journal_id', False)\"")
elif field in ('period_id',):
attrs.append("invisible=\"context.get('period_id', False)\"")
else:
attrs.append("invisible=\"context.get('visible_id') not in %s\"" % (fields.get(field)))
xml += '''<field name="%s" %s/>\n''' % (field,' '.join(attrs))
xml += '''</tree>'''
result['arch'] = xml
result['fields'] = self.fields_get(cr, uid, flds, context)
return result
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('UserError'),
_('The account move (%s) for centralisation ' \
'has been confirmed!') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=[], context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
if unlink_ids:
obj_move_rec.unlink(cr, uid, unlink_ids)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax !'), _('You can not change the tax, you should remove and recreate lines !'))
if ('account_id' in vals):
#~ if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
account_tmp = account_obj.browse(cr, uid, vals['account_id'], context=context)
if account_tmp and not account_tmp.active:
raise osv.except_osv(_('Bad account!'), _('You can not use an inactive account!'))
if update_check:
#~ if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
if list(set(vals.keys()) & set([
'period_id', 'date', 'account_id', 'move_id', 'tax_code_id',
'debit', 'credit', 'currency_id', 'amount_currency',
'analytic_account_id', 'partner_id', 'journal_id',
'company_id', 'tax_code_id'])):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if ('journal_id' not in ctx):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if ('period_id' not in ctx):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error !'), _('You can not add/modify entries in a closed journal.'))
if not result:
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error !'), _('You can not do this modification on a confirmed entry ! Please note that you can just change some non important fields !'))
if line.reconcile_id:
raise osv.except_osv(_('Error !'), _('You can not do this modification on a reconciled entry ! Please note that you can just change some non important fields !'))
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if vals.get('move_id', False):
company_id = self.pool.get('account.move').read(cr, uid, vals['move_id'], ['company_id']).get('company_id', False)
if company_id:
vals['company_id'] = company_id[0]
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad account!'), _('You can not use an inactive account!'))
if 'journal_id' in vals:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').get_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No piece number !'), _('Can not create an automatic sequence for this piece !\n\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad account !'), _('You can not use this general account in this journal !'))
if vals.get('analytic_account_id',False):
if journal.analytic_journal_id:
vals['analytic_lines'] = [(0,0, {
'name': vals['name'],
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'account_id': vals.get('analytic_account_id', False),
'unit_amount': vals.get('quantity', 1.0),
'amount': vals.get('debit', 0.0) or vals.get('credit', 0.0),
'general_account_id': vals.get('account_id', False),
'journal_id': journal.analytic_journal_id.id,
'ref': vals.get('ref', False),
'user_id': uid
})]
result = super(osv.osv, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
if journal.refund_journal:
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
else:
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
self.write(cr, uid,[result], {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total)
})
else:
data = {
'move_id': vals['move_id'],
'journal_id': vals['journal_id'],
'period_id': vals['period_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the VAT movement
data = {
'move_id': vals['move_id'],
'journal_id': vals['journal_id'],
'period_id': vals['period_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and ((not context.get('no_store_function')) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr, uid, [vals['move_id']], context)
return result
account_move_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
f4430e22cc2f6c99418d9e381141e4def5bbadbe | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/loadGroupName.py | 91317396187f0aba508194373e1dc407e7c35dc1 | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,416 | py | """loadGroupName userdefined property type, originally defined in resource
file set iot 90:00:00:05:00:00:00:00-1."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:15.
import pylon.resources.base
from pylon.resources.userdefined import userdefined
import pylon.resources.enumerations.char_encoding_t
class loadGroupName(pylon.resources.base.Structure):
"""loadGroupName userdefined property type. Text load group name. Name
for a load group to be used by optional user interface applications;
used to create an array of load group names."""
def __init__(self):
super().__init__(
key=10,
scope=1
)
self.__encoding = pylon.resources.enumerations.char_encoding_t.char_encoding_t(
)
self._register(('encoding', self.__encoding))
self.__name = pylon.resources.base.Array(
[
pylon.resources.base.Scaled(
size=1,
signed=False,
minimum=0,
maximum=255
) for i in range(120)
]
)
self._register(('name', self.__name))
self._default_bytes = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self._original_name = 'UCPTloadGroupName'
self._property_scope, self._property_key = 1, 10
self._definition = userdefined.add(self)
def __set_encoding(self, v):
self.__encoding._value = v
encoding = property(
lambda self: self.__encoding._value,
__set_encoding,
None,
"""."""
)
def __set_name(self, v):
self.__name._value = v
name = property(
lambda self: self.__name._value,
__set_name,
None,
"""."""
)
def __set(self, v):
if not isinstance(v, type(self)):
raise TypeError(
'Expected instance of {0}, got {1}'.format(
type(self),
type(v)
)
)
self.__set_encoding(v.__encoding)
self.__set_name(v.__name)
_value = property(lambda self: self, __set)
def __len__(self):
"""Return the length of the type, in bytes."""
return 121
if __name__ == '__main__':
# unit test code.
item = loadGroupName()
pass
| [
"[email protected]"
] | |
595a9e74a588b9a31577ba1c84a3e2bd2e99a3bc | e4c798246339e765f04424d727106e80e810f47c | /Medium/iNo008.py | 7c70fbb6da2f54341b2bef3bbcc9b1e6fae85c2f | [] | no_license | kikihiter/LeetCode | 3a61dc4ee3223d634632e30b97c30a73e5bbe253 | 62b5ae50e3b42ae7a5a002efa98af5ed0740a37f | refs/heads/master | 2021-05-26T08:05:00.126775 | 2019-05-21T09:18:37 | 2019-05-21T09:18:37 | 127,999,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.lstrip()
posNum = True
if str == "":
return 0
if str[0]=='-':
posNum = False
str = str[1:]
elif str[0]=='+':
str = str[1:]
try:
int(str[0])
except:
return 0
rStr = ""
for i in str:
try:
int(i)
except:
break
rStr = rStr + i
rStr = rStr.lstrip('0')
if rStr == "":
return 0
if posNum == False:
return max(-int(rStr),-2147483648)
print rStr
return min(int(rStr),2147483647)
| [
"[email protected]"
] | |
3bf09fa4f79c4ab4f60f4fdf8d3c23e04214b598 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /F5ycABGyZtghMpYjr_16.py | ce3767d6e7a4448df5ed169a0465448836b9b5c5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py |
def max_num(n1, n2):
if n1 > n2:
return n1
else:
return n2
| [
"[email protected]"
] | |
50157256f9b323f313890c0165fa4fe159337357 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/the/test.py | 8b5458899ae7d4a6053ff37dca0868ce16e83cdb | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11 | py | import the
| [
"[email protected]"
] | |
be4bc8669b12545f0c578c87d72131ebfc8489d0 | 947273c16f8984a20cd002b99b52facd6e63e43b | /server/authentication/urls.py | dacfd5c43349691a7bc454b922558db58c2608aa | [] | no_license | ecuaappgye/App | 8e3b50b4f7a8b9c50876d24343781e8f53a51bbc | 2df7be6fd206d012f6a83acd0aa0cb75cf6d5937 | refs/heads/master | 2023-07-05T00:48:24.341021 | 2021-07-31T17:02:12 | 2021-07-31T17:02:12 | 385,267,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | from django.urls import include, path
from .apis import (UserEmailChange, UserGetApi, UserLoginApi, UserLogoutApi,
UserPasswordChange, UserPasswordReset,
UserPasswordResetCheck, UserRegisterApi,
UserRegisterVerifyApi, UserRegisterVerifyCheckApi,
UserUpdateApi)
authentication_urls = [
path('register/', UserRegisterApi.as_view(), name='register'),
path('register/verify/<int:user_id>/', UserRegisterVerifyApi.as_view(), name='register_verify'),
path('register/verify_check/<int:user_id>/', UserRegisterVerifyCheckApi.as_view(), name='register_verify_check'),
path('login/', UserLoginApi.as_view(), name='login'),
path('logout/', UserLogoutApi.as_view(), name='logout'),
path('password_reset/', UserPasswordReset.as_view()),
path('password_reset_check/', UserPasswordResetCheck.as_view()),
path('password_change/<int:user_id>/', UserPasswordChange.as_view(), name='password_change'),
path('email_change/<int:user_id>/', UserEmailChange.as_view(), name='email_change'),
path('get/<int:user_id>/', UserGetApi.as_view(), name='get'),
]
drivers_urls =[
path('update/<int:user_id>/', UserUpdateApi.as_view(), name='update')
]
urlpatterns =[
path('auth/', include((authentication_urls, 'auth'))),
path('driver/', include((drivers_urls, 'driver')))
]
| [
"[email protected]"
] | |
ab6f49788e9c9b703b8119182f349d2b181ec92c | f907f8ce3b8c3b203e5bb9d3be012bea51efd85f | /kaki.py | 2c2c28e9db71f7ea3b53c39e8cf861cadb925d35 | [] | no_license | KohsukeKubota/Atcoder-practice | 3b4b986395551443f957d1818d6f9a0bf6132e90 | 52554a2649445c2760fc3982e722854fed5b8ab1 | refs/heads/master | 2020-08-26T15:17:29.344402 | 2019-10-26T11:14:24 | 2019-10-26T11:14:24 | 217,052,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | S = [input() for _ in range(12)]
cnt = 0
for s in S:
set_ = set(s)
if 'r' in set_:
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
7329c993e5cfe2cf131a107a9c946a0937892cb4 | 098ac9ecdaa67b717182c2aeca2a9d60833e88e7 | /opentcweb/settings/prod.py | fd788fb20c554254729032aeabf64156243e772a | [
"MIT"
] | permissive | cahya-wirawan/opentc-web | c8e758835d129cf7edb6f9dbf640632c2aa9ff2f | fa74c49f3f2b1a74624deca912f7da87afdc7e1b | refs/heads/master | 2021-01-19T19:13:44.629858 | 2018-01-21T13:21:32 | 2018-01-21T13:21:32 | 88,406,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from __future__ import absolute_import
from .base import *
# Production overrides
DEBUG = False
#...
| [
"[email protected]"
] | |
46b52fe8e5c60205d2161d38dc9193d19d105f9e | cba90cdd06eced813be6ad80e6295587223c4600 | /betfairlightweight/endpoints/navigation.py | 8795b7d2b4a2e08e79350a3a78ae3dd5e1c20f13 | [
"MIT"
] | permissive | mberk/betfair | 1a22528b881e02567626dbe7e8c4f0197809c38e | 6b064a68c8d2afceda81b70d74b6a0ee9601f228 | refs/heads/master | 2023-03-07T02:33:06.443407 | 2022-08-16T08:06:10 | 2022-08-16T08:06:10 | 192,976,576 | 0 | 1 | MIT | 2023-03-01T12:03:37 | 2019-06-20T19:28:23 | Python | UTF-8 | Python | false | false | 1,510 | py | import requests
from ..exceptions import APIError, InvalidResponse
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
from ..compat import json
class Navigation(BaseEndpoint):
"""
Navigation operations.
"""
def list_navigation(self, session: requests.Session = None) -> dict:
"""
This Navigation Data for Applications service allows the retrieval of the
full Betfair market navigation menu from a compressed file.
:param requests.session session: Requests session object
:rtype: json
"""
return self.request(session=session)
def request(
self, method: str = None, params: dict = None, session: requests.Session = None
) -> (dict, float):
session = session or self.client.session
try:
response = session.get(
self.url,
headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout),
)
except requests.ConnectionError as e:
raise APIError(None, method, params, e)
except Exception as e:
raise APIError(None, method, params, e)
check_status_code(response)
try:
response_json = json.loads(response.content.decode("utf-8"))
except ValueError:
raise InvalidResponse(response.text)
return response_json
@property
def url(self) -> str:
return self.client.navigation_uri
| [
"[email protected]"
] | |
42cd98f60f8637e2f8b57280dee6eeb14f3eac98 | bb4dc40ec0b62e5d2fc3ce1234013aebd4e648d5 | /src/modules/customised/payroll/hra/__init__.py | 708a454f4468ac2e8c826538ed0f9f59fab6f7cf | [] | no_license | kakamble-aiims/work | ba6cbaf4c525ff7bc28d0a407f16c829d0c35983 | cd392bf0e80d71c4742568e9c1dd5e5211da56a9 | refs/heads/master | 2022-04-02T14:45:58.515014 | 2019-12-31T14:00:51 | 2019-12-31T14:00:51 | 199,015,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from trytond.pool import Pool
from .hra import *
def register():
Pool.register(
HRA_Allowance,
module='hra', type_='model') | [
"[email protected]"
] | |
dd8ff876cdff51683095b93c5c1e9985b5a29584 | 9732da539d940904cf09b4164a307cb1a58fbb35 | /superhero/ability_and_armor.py | bb0e7c0ea30847095581385d460942d5d2e5ad75 | [] | no_license | makhmudislamov/fun_python_exercises | f3c7557fa6ed400ee196252a84ad7b6b23b913f1 | 21ab89540fb5f4f04dbdb80f361bf4febd694c11 | refs/heads/master | 2020-05-26T05:42:20.115833 | 2019-10-17T03:28:57 | 2019-10-17T03:28:57 | 188,125,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | from random import randint
class Ability:
def __init__(self, name, max_damage):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_damage = max_damage
def __str__(self):
return f'This ability is {self.name}'
def ability_attack(self):
'''
Use randint(a, b) to select a random attack value.
Return an attack value between 0 and the full attack.
'''
attack_value = randint(0, self.max_damage)
# print(f"attack value in ability: {attack_value}")
self.max_damage -= attack_value
return attack_value
class Weapon(Ability):
def ability_attack(self):
""" This method returns a random value
between one half to the full attack power of the weapon.
"""
return randint(self.max_damage // 2, self.max_damage)
class Armor():
def __init__(self, name, max_block):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_block = max_block
def block(self):
'''
Return a random value between
0 and the initialized max_block strength.
'''
block_value = randint(0, self.max_block)
return block_value
# if __name__ == "__main__":
# pass
| [
"[email protected]"
] | |
edb363be7d18412f48d26946d0a265a266919f9e | 9d43b8a3b53001f25a347fd96e5c49538b0c509a | /mxshop/apps/trade/views.py | 30e854b8ad252b98ccda10e6bfe8ca3d67cb173a | [] | no_license | w8833531/mxfresh | b81b7e4223536c6bedb049009386015935d33987 | 46b83fafdae8450491344c531de81a45ab5d8aae | refs/heads/master | 2021-04-09T15:53:50.829921 | 2018-08-08T01:41:14 | 2018-08-08T01:41:14 | 125,793,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,746 | py | import random, time
from datetime import datetime
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import status
from rest_framework import permissions
from rest_framework import authentication
from rest_framework import mixins
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.permissions import IsOwnerOrReadOnly
from utils.alipay import AliPay
from .serializers import ShopCartSerializer,ShopCartDetailSerializer, OrderSerializer, OrderDetailSerializer
from .models import ShoppingCart, OrderInfo, OrderGoods
from mxshop.settings import appid, private_key_path, alipay_pub_key_path, alipay_notify_url, alipay_return_url
# Create your views here.
class ShoppingCartViewset(viewsets.ModelViewSet):
"""
购物车功能
list:
获取购物车物品列表
create:
加入购物车物品
delete:
删除购物车物品
update:
更新购物车物品
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
lookup_field = "goods_id"
# override get_serializer_class method, if list return DetailSerializer
def get_serializer_class(self, *args, **kwargs):
if self.action == 'list':
return ShopCartDetailSerializer
else:
return ShopCartSerializer
def get_queryset(self):
return ShoppingCart.objects.filter(user=self.request.user)
class OrderViewset(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):
"""
订单管理
List:
获取订单
Delete:
删除订单
Create:
新增订单
Retrieve:
获取订单详情
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
serializer_class = OrderSerializer
def get_queryset(self):
return OrderInfo.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'retrieve':
return OrderDetailSerializer
else:
return OrderSerializer
# 生成订单号 当前时间+userid+random
def generate_order_sn(self):
random_int = random.Random()
order_sn = "{time_str}{userid}{random_str}".format(time_str=time.strftime('%Y%m%d%H%M%S'),
userid=self.request.user.id, random_str=random_int.randint(10, 99))
return order_sn
# 在创建订单时,重载 perform_create 方法, set order_sn in serializer.data
def perform_create(self, serializer):
"""
在创建订单时,关联订单中的商品,消减商品库存,清空购物车
"""
# 保存当前用户的订单
order = serializer.save(order_sn=self.generate_order_sn())
# 获取当前用户购物车内所有商品条目
shop_carts = ShoppingCart.objects.filter(user=self.request.user)
# 把商品、商品数量放入定单,库存相应消减,并清空购物车
for shop_cart in shop_carts:
# 生成订单商品对象
order_goods = OrderGoods()
# 把商品、商品数量放入订单商品对象
order_goods.goods = shop_cart.goods
order_goods.goods_num = shop_cart.nums
# 对商品的库存相应消减
order_goods.goods.goods_num -= order_goods.goods_num
order_goods.goods.save()
# 放入订单对象并保存
order_goods.order = order
order_goods.save()
# 清空购物车
shop_cart.delete()
return order
# 在删除订单时,重载 perform_destroy 方法,实现订单商品库存增加
def perform_destroy(self, instance):
if instance.pay_status != "TRADE_SUCCESS":
# 在删除订单前,如果订单没有支付成功,增加这个订单中的所有商品对应数量的库存
order_goods = OrderGoods.objects.filter(order=instance.id)
for order_good in order_goods:
order_good.goods.goods_num += order_good.goods_num
order_good.goods.save()
instance.delete()
class AliPayViewset(APIView):
def get(self, request):
"""
处理支付宝return_url 返回
:param request:
:return:
"""
processed_dict = {}
for key, value in request.GET.items():
processed_dict[key] = value
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
# order_sn = processed_dict.get('out_trade_no', None)
# trade_no = processed_dict.get('trade_no', None)
# trade_status = processed_dict.get('trade_status', None)
# existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
# for existed_order in existed_orders:
# existed_order.pay_status = trade_status
# existed_order.trade_no = trade_no
# existed_order.pay_time = datetime.now()
# existed_order.save()
return Response("success")
def post(self, request):
"""
处理支付宝notify_url 返回
:param request:
:return:
"""
processed_dict = {}
for key, value in request.POST.items():
processed_dict[key] = value
print(key, value)
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
order_sn = processed_dict.get('out_trade_no', None)
trade_no = processed_dict.get('trade_no', None)
trade_status = processed_dict.get('trade_status', None)
existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
for existed_order in existed_orders:
existed_order.pay_status = trade_status
# 如果支付成功,把订单中所有商品售出数量做相应增加(注:这个操作不要求实时,建议用后台程序来完成会更好)
if existed_order.pay_status == "TRADE_SUCCESS":
order_goods = existed_order.goods.all()
for order_good in order_goods:
order_good.goods.sold_num += order_good.goods_num
order_good.goods.save()
existed_order.trade_no = trade_no
existed_order.pay_time = datetime.now()
existed_order.save()
return Response("success") | [
"[email protected]"
] | |
7d6c817fe544b5cc80a68b8c685ce92faf0c9ef5 | a9d6a3b0fe418e4e5cc131ebc05f9b56c0e4543e | /chapter11-django/site02/site02/settings.py | 1ba07484b03cf34c8252583125bc6c301d4cb224 | [] | no_license | Kianqunki/Python_CorePythonApplicationsProgramming | 34a36ba64bdc303814de507c4fcfc3c81ff88b5f | 77263c1fde0d02aade180f7e73d2cdee1d170d58 | refs/heads/master | 2021-05-07T02:41:44.567088 | 2014-10-27T17:43:51 | 2014-10-27T17:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | """
Django settings for site02 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y97upk5xk__c@j95sw4v-pf&#i45ir$cm6-ya)byzikor7+2sv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'approver',
'poster'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'site02.urls'
WSGI_APPLICATION = 'site02.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'TweetApprover.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# from this point on MY consts
TWEET_APPROVER_EMAIL = '[email protected]'
EMAIL_HOST = 'smtp.mydomain.com'
EMAIL_HOST_USER = 'username'
EMAIL_HOST_PASSWORD = 'password'
DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
TWITTER_CONSUMER_KEY = 'DeH9TfrfeV7UeRgK3OSGA'
TWITTER_CONSUMER_SECRET = 'sZGBB28VZcrRfcZvexYydj2Pc2uWW307kP8l7T7yiQo'
TWITTER_OAUTH_TOKEN = '2334856880-zYwvSu8kS7cGfH67lQ64vulTUbY7zxhc39bpnlG'
TWITTER_OAUTH_TOKEN_SECRET = 'RTQ7pzSytCIPsASCkA0Z5rubpHSWbvjvYR3c3hb9QhC3M'
| [
"[email protected]"
] | |
b642ce9125bc51b5a9f9d0ae69199d2d0bd1bf63 | 2e8ff2eb86f34ce2fc330766906b48ffc8df0dab | /tensorflow_probability/python/experimental/inference_gym/targets/__init__.py | a5ba67a6a9b68bf31372bf5990405fe49fbdf663 | [
"Apache-2.0"
] | permissive | wataruhashimoto52/probability | 9613f9a3cc685ff1a20643c4a05a48f9cf0fe1ae | 12e3f256544eadea6e863868da825614f4423eb0 | refs/heads/master | 2021-07-16T18:44:25.970036 | 2020-06-14T02:48:29 | 2020-06-14T02:51:59 | 146,873,495 | 0 | 0 | Apache-2.0 | 2018-08-31T09:51:20 | 2018-08-31T09:51:20 | null | UTF-8 | Python | false | false | 2,223 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Targets package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.inference_gym.targets.banana import Banana
from tensorflow_probability.python.experimental.inference_gym.targets.bayesian_model import BayesianModel
from tensorflow_probability.python.experimental.inference_gym.targets.ill_conditioned_gaussian import IllConditionedGaussian
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import ItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import SyntheticItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import GermanCreditNumericLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import LogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.model import Model
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import GermanCreditNumericSparseLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import SparseLogisticRegression
__all__ = [
'Banana',
'BayesianModel',
'GermanCreditNumericLogisticRegression',
'GermanCreditNumericSparseLogisticRegression',
'IllConditionedGaussian',
'ItemResponseTheory',
'LogisticRegression',
'Model',
'SparseLogisticRegression',
'SyntheticItemResponseTheory',
]
| [
"[email protected]"
] | |
748a3810da0b0659890ef170abef1ea0d6d32b5f | 5961726d2e0d84c4ced32e5cd072c3c0c07153cb | /smart_schedule/line/handlers/__init__.py | 48b1b8553fed5e192692650955bf0185450019e4 | [] | no_license | macinjoke/smart_schedule | 46bc68d712646ffb45dcf1e8bd9d140d7a9fb84f | 605c39f2d465cb8e56bedc941109f3b716608efa | refs/heads/master | 2021-03-19T15:53:35.886128 | 2018-01-13T08:22:50 | 2018-01-13T08:22:50 | 76,947,986 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | from datetime import datetime
import flask
import urllib
import hashlib
import re
from linebot.models import TextSendMessage
from linebot import LineBotApi
from smart_schedule.settings import (
line_env, web_env, hash_env
)
line_bot_api = LineBotApi(line_env['channel_access_token'])
# TODO 以降の関数たちはどこにあるべきか、リファクタリングの余地が無いか考える
def reply_google_auth_message(event):
auth_url = flask.url_for('oauth2')
if event.source.type == 'user':
talk_id = event.source.user_id
elif event.source.type == 'group':
talk_id = event.source.group_id
elif event.source.type == 'room':
talk_id = event.source.room_id
else:
raise Exception('invalid `event.source`')
m = hashlib.md5()
m.update(talk_id.encode('utf-8'))
m.update(hash_env['seed'].encode('utf-8'))
params = urllib.parse.urlencode({'talk_id': talk_id, 'hash': m.hexdigest()})
url = '{}{}?{}'.format(web_env['host'], auth_url, params)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='このリンクから認証を行ってください\n{}'.format(url))
)
def reply_refresh_error_message(event):
reply_text = '''認証情報の更新エラーが発生しました。同じGoogleアカウントで複数の\
認証を行っている場合にこの不具合が発生します。このトークでSmart Scheduleを使用したい場合\
は以下のいずれかを行った後で認証しなおしてください。
1. 同じアカウントで認証しているトークでlogoutコマンドを行う(オススメ)
2. 下記URLから手動でSmart Scheduleの認証を解除する\
https://myaccount.google.com/u/1/permissions'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def reply_invalid_credential_error_message(event):
reply_text = '''無効な認証情報です。同じGoogleアカウントで複数の認証を行っている\
場合にこの不具合が発生します。認証をやりなおしてください。'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def generate_message_from_events(events, reply_text):
day_of_week_strs = ["月", "火", "水", "木", "金", "土", "日"]
for e in events:
summary = e['summary']
start = e['start'].get('dateTime', e['start'].get('date'))
if re.match('\d+[-]\d+[-]\d+[T]\d+[:]\d+[:]\d+[+]\d+[:]\d+', start):
start_datetime = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[start_datetime.weekday()]
start = start_datetime.strftime(
'%Y年%m月%d日({}) %H時%S分'.format(day_of_week)
)
end = e['end'].get('dateTime', e['end'].get('date'))
end_datetime = datetime.strptime(end, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[end_datetime.weekday()]
end = end_datetime.strftime(
'%Y年%m月%d日({}) %H時%S分'.format(day_of_week)
)
reply_text += '\n\n{}\n{}\n |\n{}\n\n---------------------------'.format(summary,
start,
end)
else:
start_datetime = datetime.strptime(start, '%Y-%m-%d')
start = start_datetime.strftime('%Y年%m月%d日')
end = '終日'
reply_text += '\n\n{}\n{} {}\n\n---------------------------'.format(summary,
start,
end)
return reply_text
from .join_event_handler import JoinEventHandler
from .leave_event_handler import LeaveEventHandler
from .message_event_handler import MessageEventHandler
from .postback_event_handler import PostBackEventHandler
from .unfollow_event_handler import UnfollowEventHandler
| [
"[email protected]"
] | |
a4354d06907b766c2c8e2f23546b79efe0959e4f | 06322e962c80f4c25838318e7d805ae88f0299e5 | /lengths.py | f6546177e6a717d960717d0a920b2e6122347ee7 | [
"BSD-2-Clause"
] | permissive | unixpickle/uno-ai | 6d4ec187e0c158c15cd4240ccf7e894cb599e071 | 3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7 | refs/heads/master | 2020-04-21T10:20:07.310885 | 2019-08-06T15:27:45 | 2019-08-06T15:27:45 | 169,482,953 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | """
Measure the lengths of random games.
"""
import random
from uno_ai.game import Game
def main():
while True:
g = Game(4)
num_moves = 0
while g.winner() is None:
action = random.choice(g.options())
g.act(action)
num_moves += 1
print(num_moves)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9bd919b284a2108b62fb412c5d961bcb422c8d89 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_iapp_template.py | 4437352d228d92f1318fbf343532623181c1e425 | [
"GPL-3.0-only",
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 15,691 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_iapp_template
short_description: Manages TCL iApp templates on a BIG-IP
description:
- Manages TCL iApp templates on a BIG-IP. This module will allow you to
deploy iApp templates to the BIG-IP and manage their lifecycle. The
conventional way to use this module is to import new iApps as needed
or by extracting the contents of the iApp archive that is provided at
downloads.f5.com and then importing all the iApps with this module.
This module can also update existing iApps provided that the source
of the iApp changed while the name stayed the same. Note however that
this module will not reconfigure any services that may have been
created using the C(bigip_iapp_service) module. iApps are normally
not updated in production. Instead, new versions are deployed and then
existing services are changed to consume that new template. As such,
the ability to update templates in-place requires the C(force) option
to be used.
version_added: 2.4
options:
force:
description:
- Specifies whether or not to force the uploading of an iApp. When
C(yes), will force update the iApp even if there are iApp services
using it. This will not update the running service though. Use
C(bigip_iapp_service) to do that. When C(no), will update the iApp
only if there are no iApp services using the template.
type: bool
name:
description:
- The name of the iApp template that you want to delete. This option
is only available when specifying a C(state) of C(absent) and is
provided as a way to delete templates that you may no longer have
the source of.
content:
description:
- Sets the contents of an iApp template directly to the specified
value. This is for simple values, but can be used with lookup
plugins for anything complex or with formatting. C(content) must
be provided when creating new templates.
state:
description:
- Whether the iApp template should exist or not.
default: present
choices:
- present
- absent
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add the iApp contained in template iapp.tmpl
bigip_iapp_template:
content: "{{ lookup('template', 'iapp.tmpl') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Update a template in place
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Update a template in place that has existing services created from it.
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
force: yes
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
import uuid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fq_name
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.utils.iapp_parser import NonextantTemplateNameException
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import fq_name
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.utils.iapp_parser import NonextantTemplateNameException
except ImportError:
HAS_F5SDK = False
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
@property
def name(self):
if self._values['name']:
return self._values['name']
if self._values['content']:
try:
name = self._get_template_name()
return name
except NonextantTemplateNameException:
raise F5ModuleError(
"No template name was found in the template"
)
return None
@property
def content(self):
if self._values['content'] is None:
return None
result = self._squash_template_name_prefix()
result = self._replace_template_name(result)
return result
@property
def checksum(self):
return self._values['tmplChecksum']
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def _squash_template_name_prefix(self):
"""Removes the template name prefix
The IappParser in the SDK treats the partition prefix as part of
the iApp's name. This method removes that partition from the name
in the iApp so that comparisons can be done properly and entries
can be created properly when using REST.
:return string
"""
pattern = r'sys\s+application\s+template\s+/Common/'
replace = 'sys application template '
return re.sub(pattern, replace, self._values['content'])
def _replace_template_name(self, template):
"""Replaces template name at runtime
To allow us to do the switch-a-roo with temporary templates and
checksum comparisons, we need to take the template provided to us
and change its name to a temporary value so that BIG-IP will create
a clone for us.
:return string
"""
pattern = r'sys\s+application\s+template\s+[^ ]+'
if self._values['name']:
name = self._values['name']
else:
name = self._get_template_name()
replace = 'sys application template {0}'.format(fq_name(self.partition, name))
return re.sub(pattern, replace, template)
def _get_template_name(self):
# There is a bug in the iApp parser in the F5 SDK that prevents us from
# using it in all cases to get the name of an iApp. So we'll use this
# pattern for now and file a bug with the F5 SDK
pattern = r'sys\s+application\s+template\s+(?P<path>\/[^\{}"\'*?|#]+\/)?(?P<name>[^\{}"\'*?|#]+)'
matches = re.search(pattern, self._values['content'])
try:
result = matches.group('name').strip()
except IndexError:
result = None
if result:
return result
raise NonextantTemplateNameException
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
changed = False
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.templates_differ():
return False
if not self.want.force and self.template_in_use():
return False
if self.module.check_mode:
return True
self._remove_iapp_checksum()
# The same process used for creating (load) can be used for updating
self.create_on_device()
self._generate_template_checksum_on_device()
return True
def template_in_use(self):
collection = self.client.api.tm.sys.application.services.get_collection()
fullname = '/{0}/{1}'.format(self.want.partition, self.want.name)
for resource in collection:
if resource.template == fullname:
return True
return False
def read_current_from_device(self):
self._generate_template_checksum_on_device()
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def exists(self):
result = self.client.api.tm.sys.application.templates.template.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def _remove_iapp_checksum(self):
"""Removes the iApp tmplChecksum
This is required for updating in place or else the load command will
fail with a "AppTemplate ... content does not match the checksum"
error.
:return:
"""
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(tmplChecksum=None)
def templates_differ(self):
# BIG-IP can generate checksums of iApps, but the iApp needs to be
# on the box to do this. Additionally, the checksum is MD5, but it
# is not an MD5 of the entire content of the template. Instead, it
# is a hash of some portion of the template that is unknown to me.
#
# The code below is responsible for uploading the provided template
# under a unique name and creating a checksum for it so that that
# checksum can be compared to the one of the existing template.
#
# Using this method we can compare the checksums of the existing
# iApp and the iApp that the user is providing to the module.
backup = self.want.name
# Override whatever name may have been provided so that we can
# temporarily create a new template to test checksums with
self.want.update({
'name': 'ansible-{0}'.format(str(uuid.uuid4()))
})
# Create and remove temporary template
temp = self._get_temporary_template()
# Set the template name back to what it was originally so that
# any future operations only happen on the real template.
self.want.update({
'name': backup
})
if temp.checksum != self.have.checksum:
return True
return False
def _get_temporary_template(self):
self.create_on_device()
temp = self.read_current_from_device()
self.remove_from_device()
return temp
def _generate_template_checksum_on_device(self):
generate = 'tmsh generate sys application template {0} checksum'.format(
self.want.name
)
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(generate)
)
def create(self):
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the iApp template")
def create_on_device(self):
remote_path = "/var/config/rest/downloads/{0}".format(self.want.name)
load_command = 'tmsh load sys application template {0}'.format(remote_path)
template = StringIO(self.want.content)
upload = self.client.api.shared.file_transfer.uploads
upload.upload_stringio(template, self.want.name)
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(load_command)
)
if hasattr(output, 'commandResult'):
result = output.commandResult
if 'Syntax Error' in result:
raise F5ModuleError(output.commandResult)
if 'ERROR' in result:
raise F5ModuleError(output.commandResult)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp template")
return True
def remove_from_device(self):
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
force=dict(
type='bool'
),
content=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
571b5e21a17bb0386eb30bd81b021035a58c3802 | 5b56d0ec345d19c3e9c17764cdfa4ef8180f25e0 | /2020-01-python/api.py | fd5f9add8cd66d0c4436d45b28fc09d9b3c73da0 | [] | no_license | suzuki-hoge/warikan | 6e6d5f814fe4a9130b61a416f495326c316e2a8c | d47c32338421d4c6c88022a7d64a478e79708835 | refs/heads/master | 2020-12-04T08:54:07.960635 | 2020-02-07T03:29:52 | 2020-02-07T10:09:56 | 231,702,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | from bottle import route, get, post, put, request, response, hook, run
import json
import db, party
def handle(f):
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
return {'status': 'ok', 'result': result} if result is not None else {'status': 'ok'}
except BaseException as e:
return {'status': 'ng', 'error': e.message}
return wrapper
@hook('after_request')
def allow_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
@route('<any:path>', method = 'OPTIONS')
def options(**kwargs):
return {}
@get('/party/<partyName>')
@handle
def find(partyName):
def party_dict(p):
return {'partyName': p.partyName, 'partyHoldAt': p.partyHoldAt, 'participants': map(participant_dict, p.participants), 'billingAmount': p.billingAmount, 'adjustingUnitAmount': p.adjustingUnitAmount}
def participant_dict(p):
return {'participantName': p.participantName, 'participantType': p.participantType, 'paymentSection': p.paymentSection}
return party_dict(db.read(partyName))
@post('/party/plan')
@handle
def plan():
p = request.json
new = party.Party.plan(p.get('partyName'), p.get('partyHoldAt'), p.get('secretaryName'), p.get('paymentSection'), p.get('billingAmount'), p.get('adjustingUnitAmount'))
db.write(new)
@put('/party/<partyName>/add')
@handle
def add(partyName):
p = request.json
found = db.read(partyName)
updated = found.add(party.Participant(p.get('participantName'), 'NotSec', p.get('paymentSection')))
db.write(updated)
@put('/party/<partyName>/remove')
@handle
def remove(partyName):
p = request.params
found = db.read(partyName)
updated = found.remove(p.participantName)
db.write(updated)
@put('/party/<partyName>/change')
@handle
def change(partyName):
p = request.json
found = db.read(partyName)
updated = found.change(p.get('adjustingUnitAmount'))
db.write(updated)
@get('/party/<partyName>/demand')
@handle
def demand(partyName):
found = db.read(partyName)
return map(lambda (participantName, paymentAmount): {'participantName': participantName, 'paymentAmount': str(paymentAmount)}, found.demand())
run(host = 'localhost', port = 9000)
| [
"[email protected]"
] | |
01df404873ee9e3bba62ab69c2e05d7863ae98c4 | 2ce0c770b6ebf1122cfe2cc02b943101172920f4 | /wwt_data_formats/tests/test_wtml_tree.py | 56668db83d32b8c0c1913e626cf661c4e392067c | [
"MIT"
] | permissive | WorldWideTelescope/wwt_data_formats | 48269945ab835706f75fbf56801c5f19c38c1930 | 8f3a977b87d36c5a903e3bf63ff2ea89547447bb | refs/heads/master | 2022-10-31T02:02:51.003406 | 2022-10-25T19:49:38 | 2022-10-25T19:49:38 | 225,955,212 | 2 | 4 | MIT | 2023-08-18T00:18:54 | 2019-12-04T20:54:27 | Python | UTF-8 | Python | false | false | 833 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2020 the .NET Foundation
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import os.path
from .. import cli
from . import tempdir
def test_cli(tempdir):
"Simple smoke test to see if it runs at all."
prev_dir = os.getcwd()
try:
os.chdir(tempdir)
cli.entrypoint(
[
"tree",
"fetch",
"https://web.wwtassets.org/engine/assets/builtin-image-sets.wtml",
]
)
cli.entrypoint(["tree", "summarize"])
cli.entrypoint(["tree", "print-image-urls"])
cli.entrypoint(["tree", "print-dem-urls"])
finally:
# Windows can't remove the temp tree unless we chdir out of it.
os.chdir(prev_dir)
| [
"[email protected]"
] | |
505e01d16c4946a2cc61a71edd7d0ee2504ca6d6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/network/v20171001/get_virtual_network_gateway_bgp_peer_status.py | ce971110c0cb3c1a127751e2520bf66c4337635f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponseResult']]:
"""
List of BGP peers
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20171001:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
| [
"[email protected]"
] | |
ae137b1b1b702ea94707b85faf4024ec372f1832 | 83d36e8795b19d537fab32c4ced52359561a6b3b | /ingredients/apps.py | b0fea569eed9b6ec9258bfa94c37a231c4b4fcd0 | [] | no_license | vubon/django-graphql | b1325ebc31136d19b5ca5b5fd85c6fea98972e6c | 9586b5b5098dfeb25aa26521b24bc6c3beb333bc | refs/heads/master | 2020-04-14T08:02:04.957523 | 2019-12-05T05:02:46 | 2019-12-05T05:02:46 | 163,727,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class IngrdientsConfig(AppConfig):
name = 'ingredients'
| [
"[email protected]"
] | |
c7a3468c7cae4eb4836690dd475d98f13f9a6ac2 | f854ef28002a3931a8d8b8d0b9cc691b8a449db3 | /home-assistant/custom_components/hacs/helpers/classes/manifest.py | c0e43b9ba3f570e1740dbe3c9e52024391ae5891 | [
"MIT"
] | permissive | Burningstone91/smart-home-setup | 030cdaa13d05fb19a82b28ea455614d3276522ab | c2f34cc8b8243bc6ce620b3f03e3e44ff28150ca | refs/heads/master | 2023-02-23T06:25:04.476657 | 2022-02-26T16:05:02 | 2022-02-26T16:05:02 | 239,319,680 | 421 | 36 | MIT | 2023-02-08T01:16:54 | 2020-02-09T14:39:06 | JavaScript | UTF-8 | Python | false | false | 1,156 | py | """
Manifest handling of a repository.
https://hacs.xyz/docs/publish/start#hacsjson
"""
from typing import List
import attr
from custom_components.hacs.exceptions import HacsException
@attr.s(auto_attribs=True)
class HacsManifest:
"""HacsManifest class."""
name: str = None
content_in_root: bool = False
zip_release: bool = False
filename: str = None
manifest: dict = {}
hacs: str = None
hide_default_branch: bool = False
domains: List[str] = []
country: List[str] = []
homeassistant: str = None
persistent_directory: str = None
iot_class: str = None
render_readme: bool = False
@staticmethod
def from_dict(manifest: dict):
"""Set attributes from dicts."""
if manifest is None:
raise HacsException("Missing manifest data")
manifest_data = HacsManifest()
manifest_data.manifest = manifest
if country := manifest.get("country"):
if isinstance(country, str):
manifest["country"] = [country]
for key in manifest:
setattr(manifest_data, key, manifest[key])
return manifest_data
| [
"[email protected]"
] | |
7343fb8defbea9a314d6f3be0e874c35f13e8940 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/dlmmin002/question3.py | 7a33ac4f48f3eddf6202f2094e5bd3b2da9e4fde | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | #personal spam message
#nolwazi dlamini
#3 march 2014
name =input("Enter first name: \n")
surname=input("Enter last name: \n")
money=eval(input("Enter sum of money in USD: \n"))
country=input("Enter country name: \n")
print("\nDearest" ,name)
print("It is with a heavy heart that I inform you of the death of my father,")
print("General Fayk ",surname,", your long lost relative from Mapsfostol.",sep="")
print("My father left the sum of ", money,"USD for us, your distant cousins. ",sep="")
print("Unfortunately, we cannot access the money as it is in a bank in ",country,".",sep="")
print("I desperately need your assistance to access this money.")
print("I will even pay you generously, 30% of the amount - ",(money*0.3),"USD,",sep="")
print("for your help. Please get in touch with me at this email address asap.")
print("Yours sincerely")
print("Frank" ,surname) | [
"[email protected]"
] | |
53d2e5d291801ab5cf03ead215d5c4ba7b43273e | 947fa6a4a6155ffce0038b11f4d743603418ad68 | /.c9/metadata/environment/fb_post_learning/fb_post_learning/settings/base_aws_s3.py | 50a8801a8acf4d0f51a64b61ae58285d2bc56de6 | [] | no_license | bharathi151/bharathi_diyyala | bd75e10639d7d22b332d5ce677e7799402dc4984 | 99f8657d010c790a0e4e4c9d6b57f81814784eb0 | refs/heads/master | 2022-11-21T12:43:48.401239 | 2020-07-23T09:05:52 | 2020-07-23T09:05:52 | 281,903,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | {"filter":false,"title":"base_aws_s3.py","tooltip":"/fb_post_learning/fb_post_learning/settings/base_aws_s3.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1589610124498,"hash":"94324fee64bfb279ca1b0e507e1414c07b06fab6"} | [
"[email protected]"
] | |
6f3c7087617984089152d4cc6b9c5fafc46b3f17 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano200.py | 73b13cf0c88c6b8338ab73ca1e913d4a70757784 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/A2A03ED2-C2C7-D446-B850-478F84233086.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest200.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
fafedd086eb52ca3a26667cd17b01a87c8ca5b04 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/791.py | f4c857964fa46a84265cc71f3b483d20abda438d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | __author__ = 'rrampage'
t = int(input())
def input_format():
s = input().split()[1]
return [int(i) for i in s]
def ovation(aud):
extras = 0
tot_standing = 0
for i, a in enumerate(aud):
if a == 0:
continue
if tot_standing >= i:
tot_standing += a
else:
extras += (i - tot_standing)
tot_standing += (i - tot_standing)
tot_standing += a
return extras
for x in range(t):
print("Case #%d: %d" % (x+1, ovation(input_format()))) | [
"[email protected]"
] | |
1f0050636b553377350ef958e53062abe0a0aec4 | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/String/266. 回文排列.py | 2dba117a4cfd0caece5666e521229f85abe7fe4f | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | '''
奇数个的char最多只能有一个
'''
from collections import defaultdict
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
if s == '':
return True
info = defaultdict(int)
for i in s:
info[i] += 1
count = 0
for v in info.values():
if v % 2 == 1:
count += 1
if count >= 2:
return False
return True | [
"[email protected]"
] | |
c439e8bc4823a5c6fc7da35db3637314de577c9c | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /say_big_person_of_fact/hand_and_case/try_able_company_up_week.py | dfd64790159e034f5c52cd28b6e4a81e19f11920 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py |
#! /usr/bin/env python
def different_place(str_arg):
way(str_arg)
print('thing')
def way(str_arg):
print(str_arg)
if __name__ == '__main__':
different_place('know_right_world_over_year')
| [
"[email protected]"
] | |
9f8aaad6b22ea7ecc6945c8288570a353c7d7b8f | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2/async_samples/sample_classify_document_from_url_async.py | 9e4775d42c58ae924f0d55dc072fb01011589d59 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 5,413 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_classify_document_from_url_async.py
DESCRIPTION:
This sample demonstrates how to classify a document from a URL using a trained document classifier.
To learn how to build your custom classifier, see sample_build_classifier.py.
More details on building a classifier and labeling your data can be found here:
https://aka.ms/azsdk/formrecognizer/buildclassifiermodel
USAGE:
python sample_classify_document_from_url_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CLASSIFIER_ID - the ID of your trained document classifier
-OR-
CLASSIFIER_CONTAINER_SAS_URL - The shared access signature (SAS) Url of your Azure Blob Storage container with your training files.
A document classifier will be built and used to run the sample.
"""
import os
import asyncio
async def classify_document_from_url_async(classifier_id):
# [START classify_document_from_url_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
classifier_id = os.getenv("CLASSIFIER_ID", classifier_id)
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_analysis_client:
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/forms/IRS-1040.pdf"
poller = await document_analysis_client.begin_classify_document_from_url(
classifier_id, document_url=url
)
result = await poller.result()
print("----Classified documents----")
for doc in result.documents:
print(
f"Found document of type '{doc.doc_type or 'N/A'}' with a confidence of {doc.confidence} contained on "
f"the following pages: {[region.page_number for region in doc.bounding_regions]}"
)
# [END classify_document_from_url_async]
async def main():
classifier_id = None
if os.getenv("CLASSIFIER_CONTAINER_SAS_URL") and not os.getenv("CLASSIFIER_ID"):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
from azure.ai.formrecognizer import (
ClassifierDocumentTypeDetails,
AzureBlobContentSource,
)
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
blob_container_sas_url = os.environ["CLASSIFIER_CONTAINER_SAS_URL"]
document_model_admin_client = DocumentModelAdministrationClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_model_admin_client:
poller = await document_model_admin_client.begin_build_document_classifier(
doc_types={
"IRS-1040-A": ClassifierDocumentTypeDetails(
azure_blob_source=AzureBlobContentSource(
container_url=blob_container_sas_url,
prefix="IRS-1040-A/train",
)
),
"IRS-1040-D": ClassifierDocumentTypeDetails(
azure_blob_source=AzureBlobContentSource(
container_url=blob_container_sas_url,
prefix="IRS-1040-D/train",
)
),
},
)
classifier = await poller.result()
classifier_id = classifier.classifier_id
await classify_document_from_url_async(classifier_id)
if __name__ == "__main__":
from azure.core.exceptions import HttpResponseError
try:
asyncio.run(main())
except HttpResponseError as error:
print(
"For more information about troubleshooting errors, see the following guide: "
"https://aka.ms/azsdk/python/formrecognizer/troubleshooting"
)
# Examples of how to check an HttpResponseError
# Check by error code:
if error.error is not None:
if error.error.code == "InvalidImage":
print(f"Received an invalid image error: {error.error}")
if error.error.code == "InvalidRequest":
print(f"Received an invalid request error: {error.error}")
# Raise the error again after printing it
raise
# If the inner error is None and then it is possible to check the message to get more information:
if "Invalid request".casefold() in error.message.casefold():
print(f"Uh-oh! Seems there was an invalid request: {error}")
# Raise the error again
raise
| [
"[email protected]"
] | |
b51914fd7b3e6ca960cf28e6f04ff6f317fe58a5 | 66865b7ed119f42c8490bf3f8821602e1201eb0b | /tests/performance/time_mean.py | f6149a4c0aef131f24928bd33fcd8962974edd8b | [
"MIT"
] | permissive | chanedwin/pandas-profiling | 1a8a35f6d985a93f02a25af6e1c650b24e11218a | d9ee4a8a589e075cfced9fc71ca500a20e2a3e73 | refs/heads/develop_spark_profiling | 2023-08-01T19:53:31.340751 | 2021-01-07T15:59:22 | 2021-01-07T15:59:22 | 288,504,610 | 1 | 3 | MIT | 2021-04-26T14:09:43 | 2020-08-18T16:14:57 | Jupyter Notebook | UTF-8 | Python | false | false | 726 | py | import timeit
testcode = """
import numpy as np
import pandas as pd
np.random.seed(12)
vals = np.random.random(1000)
series = pd.Series(vals)
series[series < 0.2] = pd.NA
def f1(series):
arr = series.values
arr_without_nan = arr[~np.isnan(arr)]
return np.mean(arr_without_nan)
def f2(series):
arr = series.values
return np.nanmean(arr)
def f3(series):
return series.mean()
def f4(series):
return series[series.notna()].mean()
"""
print(timeit.timeit("f1(series)", number=10, setup=testcode))
print(timeit.timeit("f2(series)", number=10, setup=testcode))
print(timeit.timeit("f3(series)", number=10, setup=testcode))
print(timeit.timeit("f4(series)", number=10, setup=testcode))
| [
"[email protected]"
] | |
69bed29a7ff68e4bc1e38f20eff1032b0709cdc7 | ce9593eb4ec109b86f3f75ac161a372e6d99f067 | /Problems/Beautify both output and code/main.py | cd06ffb22a2bed0cad2a1cdff2cf0c609f3bb1b4 | [] | no_license | wangpengda1210/Rock-Paper-Scissors | 0b2e5ef9b946dd209a85fa7440a7e40acfd83923 | 05c558ddfdf69eb4170185a158ded8a3a063359c | refs/heads/main | 2023-02-20T08:35:09.379752 | 2021-01-23T06:31:48 | 2021-01-23T06:31:48 | 332,143,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | print("http://example.com/{}/desirable/{}/profile".format(input(), input()))
| [
"[email protected]"
] | |
d8327625f3951b94827154fcd1efc3bb31fd7e6a | a4e59c4f47873daf440374367a4fb0383194d2ce | /Python/987.py | 071ba61e1dee050a891b2d02116afb3a3671fc25 | [] | no_license | maxjing/LeetCode | e37cbe3d276e15775ae028f99cf246150cb5d898 | 48cb625f5e68307390d0ec17b1054b10cc87d498 | refs/heads/master | 2021-05-23T17:50:18.613438 | 2021-04-02T17:14:55 | 2021-04-02T17:14:55 | 253,406,966 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
q = deque([(root, 0)])
res = defaultdict(list)
while q:
level = defaultdict(list)
for _ in range(len(q)):
node, col = q.popleft()
level[col].append(node.val)
if node.left:
q.append((node.left, col - 1))
if node.right:
q.append((node.right, col + 1))
for col in level:
res[col].extend(sorted(level[col]))
return [res[i] for i in sorted(res)] | [
"[email protected]"
] | |
d4c8bd1b9a8bce6b448f64fc215674c63f47f37e | ca77e9e45d666771c7b0897e7e3093b3d3c12f65 | /graphs/graphs.py | 918a04980fe52256e43ef0951a6fea0dfcaf64e8 | [] | no_license | 2gDigitalPost/custom | 46175d3a3fc4c3be21dc20203ff0a48fb93b5639 | 6a3a804ef4ef6178044b70ad1e4bc5c56ab42d8d | refs/heads/master | 2020-04-04T07:40:17.962611 | 2016-12-28T18:35:28 | 2016-12-28T18:35:28 | 39,648,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,650 | py | ll__ = ["GraphHoursWdg"]
import tacticenv, os
from datetime import date, timedelta as td
from pyasm.biz import *
from pyasm.web import Table, DivWdg, HtmlElement
from pyasm.common import jsonloads, jsondumps, Environment
from tactic.ui.common import BaseTableElementWdg
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.widget import CalendarInputWdg
class GraphHoursWdg(BaseTableElementWdg):
def init(my):
from tactic_client_lib import TacticServerStub
my.server = TacticServerStub.get()
def get_title(my):
div = DivWdg()
div.add_behavior(my.get_load_behavior())
return div
def kill_mul_spaces(my, origstrg):
newstrg = ''
for word in origstrg.split():
newstrg=newstrg+' '+word
return newstrg
def get_dates(my):
import datetime
rightnow = datetime.datetime.now()
rnmo = str(rightnow.month)
if len(rnmo) == 1:
rnmo = '0%s' % rnmo
rnday = str(rightnow.day)
if len(rnday) == 1:
rnday = '0%s' % rnday
date2 = '%s-%s-%s' % (rightnow.year, rnmo, rnday)
date1 = date.today()-td(days=31)
#print "D1 = %s, D2 = %s" % (date1, date2)
date1 = '2013-04-01'
date2 = '2013-04-30'
return [str(date1), str(date2)]
def make_TV_data_dict(my, file_path):
the_file = open(file_path, 'r')
fields = []
data_dict = {}
count = 0
boolio = True
line_count = 0
flen = 0
for line in the_file:
first_name = ''
last_name = ''
name = ''
fixed_date = ''
if line_count > 5:
line = line.rstrip('\r\n')
if line in [None,'',' ']:
boolio = False
if boolio:
data = line.split('","')
if line_count == 6:
dc = 0
for field in data:
if dc == 0:
field = field[1:]
field = my.kill_mul_spaces(field)
field = field.strip(' ')
fields.append(field)
dc = dc + 1
flen = len(fields)
fields[flen - 1] = fields[flen - 1][:-1]
elif line_count > 6:
data_count = 0
this_code = ''
this_data = {}
this_time = 0
for val in data:
field = fields[data_count]
if data_count == 0:
val = val[1:]
val = my.kill_mul_spaces(val)
val = val.strip(' ')
if data_count == flen - 1:
val = val[:-1]
if field in ['First Name', 'Last Name', 'Date', 'Total Work Hours']:
if field == 'Total Work Hours':
if val in ['-','',' ',None]:
val = 0
this_data[field] = val
if field == 'First Name':
first_name = val
elif field == 'Last Name':
last_name = val
elif field == 'Date':
date_s = val.split('/')
fixed_date = '%s-%s-%s' % (date_s[2], date_s[0], date_s[1])
data_count = data_count + 1
this_data['fixed_date'] = fixed_date
name = '%s %s' % (first_name.lower(), last_name.lower())
if name not in data_dict.keys():
data_dict[name] = {'first_name': first_name, 'last_name': last_name, 'name': name, 'days': {}}
if fixed_date not in data_dict[name]['days'].keys():
data_dict[name]['days'][fixed_date] = float(this_data['Total Work Hours'])
else:
data_dict[name]['days'][fixed_date] = float(data_dict[name]['days'][fixed_date]) + float(this_data['Total Work Hours'])
count = count + 1
line_count = line_count + 1
the_file.close()
return data_dict
def make_data_dict(my, file_name, mode):
the_file = open(file_name, 'r')
fields = []
data_dict = {}
count = 0
boolio = True
code_index = 0
hours = {}
for line in the_file:
line = line.rstrip('\r\n')
#data = line.split('\t')
data = line.split('|')
if boolio:
if count == 0:
field_counter = 0
for field in data:
field = my.kill_mul_spaces(field)
field = field.strip(' ')
fields.append(field)
if mode == 'group':
if field == 'id':
code_index = field_counter
elif mode == 'hours':
if field == 'login':
code_index = field_counter
else:
if field == 'code':
code_index = field_counter
field_counter = field_counter + 1
elif count == 1:
nothing = True
elif data[0][0] == '(':
boolio = False
else:
data_count = 0
this_code = ''
this_data = {}
hour_data = {}
for val in data:
field = fields[data_count]
val = my.kill_mul_spaces(val)
val = val.strip(' ')
if data_count == code_index:
this_code = val
if mode == 'hours':
if this_code not in hours.keys():
hours[this_code] = []
elif mode == 'hours':
if field == 'straight_time':
if val in [None,'']:
val = 0
hour_data['straight_time'] = float(val)
elif field == 'day':
hour_data['day'] = val.split(' ')[0]
this_data[field] = val
data_count = data_count + 1
if mode == 'hours':
hours[this_code].append(hour_data)
data_dict[this_code] = this_data
count = count + 1
the_file.close()
return [data_dict, hours]
def make_string_dict(my, data_arr):
out_str = ''
for data in data_arr:
if out_str == '':
out_str = '|||'
else:
out_str = '%s|||' % out_str
for key, val in data.iteritems():
out_str = '%sWvWXsKEYsX:%sXsVALsX:%sWvW' % (out_str, key, val)
out_str = '%s|||' % out_str
return out_str
def get_toggle_row_behavior(my, group):
behavior = {'css_class': 'clickme', 'type': 'click_up', 'cbjs_action': '''
try{
var group = '%s';
var top_el = spt.api.get_parent(bvr.src_el, '.graph_top');
row = top_el.getElementById('graphs_' + group + '_row');
if(row.style.display == 'none'){
row.style.display = 'table-row';
bvr.src_el.innerHTML = '<b><u>Hide Users</u></b>';
}else{
row.style.display = 'none';
bvr.src_el.innerHTML = '<b><u>Show Users</u></b>';
}
}
catch(err){
spt.app_busy.hide();
spt.alert(spt.exception.handler(err));
//alert(err);
}
''' % (group)}
return behavior
def get_load_again(my):
behavior = {'css_class': 'clickme', 'type': 'click_up', 'cbjs_action': '''
try{
var top_el = spt.api.get_parent(bvr.src_el, '.graph_surrounder');
var inputs = top_el.getElementsByTagName('input');
date1 = '';
date2 = '';
for(var r = 0; r < inputs.length; r++){
if(inputs[r].getAttribute('name') == 'wh_graph_date1'){
date1 = inputs[r].value;
}else if(inputs[r].getAttribute('name') == 'wh_graph_date2'){
date2 = inputs[r].value;
}
}
alert(date1 + ' ||||| ' + date2);
spt.api.load_panel(top_el, 'graphs.GraphHoursWdg', {'date1': date1.split(' ')[0], 'date2': date2.split(' ')[0]});
}
catch(err){
spt.app_busy.hide();
spt.alert(spt.exception.handler(err));
//alert(err);
}
'''}
return behavior
def draw_chart3(my, div, idx, title):
behavior = {'type': 'load', 'cbjs_action':
'''
function decode_string_dict(data)
{
ret_arr = [];
pts = data.split('|||');
for(var r = 0; r < pts.length; r++){
chunk = pts[r];
if(chunk != '' && chunk != null){
dict = {};
corrs = chunk.split('WvW');
for(var t = 0; t < corrs.length; t++){
corr = corrs[t];
if(corr != '' && corr != null){
rightmost = corr.split('XsKEYsX:')[1];
segged = rightmost.split('XsVALsX:');
key = segged[0];
val = segged[1];
dict[key] = val;
}
}
ret_arr.push(dict);
}
}
return ret_arr;
}
var clicking = function(idx) {
title = '%s';
idx_data_el = document.getElementById('chartdiv_' + idx);
idx_data = idx_data_el.getAttribute('datastr');
var chartData = decode_string_dict(idx_data);
var chart;
chart = new AmCharts.AmSerialChart();
chart.dataProvider = chartData;
chart.categoryField = "cat";
chart.marginTop = 5;
chart.plotAreaFillAlphas = 0.2;
//chart.rotate = true;
// the following two lines makes chart 3D
chart.depth3D = 30;
chart.angle = 20;
// AXES
// category axis
var dateAxis = chart.categoryAxis;
dateAxis.parseDates = false; // as our data is date-based, we set parseDates to true
dateAxis.minPeriod = "DD"; // our data is daily, so we set minPeriod to DD
dateAxis.autoGridCount = false;
dateAxis.gridCount = 50;
dateAxis.gridAlpha = 0.2;
dateAxis.gridColor = "#000000";
dateAxis.axisColor = "#555555";
dateAxis.labelRotation = 30;
// we want custom date formatting, so we change it in next line
var hoursAxis = new AmCharts.ValueAxis();
hoursAxis.title = title;
hoursAxis.gridAlpha = 0.2;
hoursAxis.dashLength = 5;
hoursAxis.axisAlpha = 0.5;
hoursAxis.inside = false;
hoursAxis.position = "left";
chart.addValueAxis(hoursAxis);
var pctAxis = new AmCharts.ValueAxis();
pctAxis.title = 'Efficiency %%';
//pctAxis.stackType = "100%%";
pctAxis.gridAlpha = 0.2;
pctAxis.axisAlpha = 0.5;
//pctAxis.labelsEnabled = false;
pctAxis.position = "right";
pctAxis.min = 0;
pctAxis.max = 100;
chart.addValueAxis(pctAxis);
// GRAPHS
// duration graph
var timevantageGraph = new AmCharts.AmGraph();
timevantageGraph.title = "TimeVantage:";
timevantageGraph.valueField = "tv";
timevantageGraph.type = "column";
timevantageGraph.valueAxis = hoursAxis; // indicate which axis should be used
timevantageGraph.lineColor = "#CC0000";
timevantageGraph.balloonText = "TimeVantage: [[value]] hrs";
timevantageGraph.fillAlphas = 1;
timevantageGraph.lineThickness = 1;
timevantageGraph.legendValueText = " [[value]] Hrs";
//timevantageGraph.bullet = "square";
chart.addGraph(timevantageGraph);
// distance graph
var tacticGraph = new AmCharts.AmGraph();
tacticGraph.valueField = "tactic";
tacticGraph.title = "Tactic:";
tacticGraph.type = "column";
tacticGraph.fillAlphas = 1;
//tacticGraph.valueAxis = distanceAxis; // indicate which axis should be used
tacticGraph.valueAxis = hoursAxis; // indicate which axis should be used
tacticGraph.balloonText = "Tactic: [[value]] hrs";
tacticGraph.legendValueText = "[[value]] Hrs";
//tacticGraph.lineColor = "#ffe0e0";
tacticGraph.lineColor = "#2e0854";
tacticGraph.lineThickness = 1;
tacticGraph.lineAlpha = 0;
chart.addGraph(tacticGraph);
var pctGraph = new AmCharts.AmGraph();
pctGraph.title = "Efficiency:";
pctGraph.valueField = "percentage";
pctGraph.type = "line";
pctGraph.valueAxis = pctAxis; // indicate which axis should be used
//pctGraph.valueAxis = hoursAxis; // indicate which axis should be used
pctGraph.lineColor = "#00b200";
pctGraph.balloonText = "Efficiency: [[value]]%%";
pctGraph.fillAlphas = 0;
pctGraph.lineThickness = .5;
pctGraph.legendValueText = " Efficiency [[value]]%%";
pctGraph.bullet = "square";
chart.addGraph(pctGraph);
// CURSOR
var chartCursor = new AmCharts.ChartCursor();
chartCursor.zoomable = false;
chartCursor.categoryBalloonDateFormat = "DD";
chartCursor.cursorAlpha = 0;
chart.addChartCursor(chartCursor);
// LEGEND
var legend = new AmCharts.AmLegend();
legend.bulletType = "round";
legend.equalWidths = false;
legend.valueWidth = 40;
legend.color = "#000000";
chart.addLegend(legend);
// WRITE
chart.write("chartdiv_" + idx);
}
var js_files = ["amcharts/amcharts/amcharts.js"];
spt.dom.load_js(js_files, clicking);
clicking(%s);
'''% (title, idx)
}
div.add_behavior(behavior)
def get_load_behavior(my):
idx = my.get_current_index()
behavior = {'type': 'load', 'cbjs_action': '''
//spt.graph = {};
clicking = function(idx) {
var chartData = [{ country: 'USA29', visits: 4252 },
{ country: 'China', visits: 1882 },
{ country: 'Japan', visits: 1809 },
{ country: 'Poland', visits: 328}];
var chart = new AmCharts.AmSerialChart();
console.log(chart);
chart.dataProvider = chartData;
chart.categoryField = 'country';
chart.marginTop = 15;
chart.marginLeft = 55;
chart.marginRight = 15;
chart.marginBottom = 80;
chart.angle = 30;
chart.depth3D = 15;
var catAxis = chart.categoryAxis;
catAxis.gridCount = chartData.length;
catAxis.labelRotation = 90;
var graph = new AmCharts.AmGraph();
graph.balloonText = '[[category]]: [[value]]';
graph.valueField = 'visits'
graph.type = 'column';
graph.lineAlpha = 0;
graph.fillAlphas = 0.8;
chart.addGraph(graph);
chart.invalidateSize()
chart.write('chartdiv_' + idx);
chart.validateData();
chart.animateAgain();
console.log("finished")
var js_files = ["amcharts/amcharts/amcharts.js"];
spt.dom.load_js(js_files, clicking);
}
console.log("done onload");
'''
}
return behavior
def get_snapshot_file_path(my,snapshot_code):
what_to_ret = ''
rel_paths = my.server.get_all_paths_from_snapshot(snapshot_code, mode='local_repo')
if len(rel_paths) > 0:
rel_path = rel_paths[0]
splits = rel_path.split('/')
if len(splits) < 2:
splits = rel_path.split('\\')
file_only = splits[len(splits) - 1]
what_to_ret = rel_path
return what_to_ret
def get_display(my):
logine = Environment.get_login()
user_name = logine.get_login()
all_days = {}
group_days = {}
user_days = {}
tv_all_days = {}
tv_group_days = {}
tv_user_days = {}
tv_obj = my.server.eval("@SOBJECT(twog/global_resource['name','TimeVantage'])")[0]
snaps = my.server.eval("@SOBJECT(sthpw/snapshot['search_type','twog/global_resource?project=twog']['search_id','%s']['is_latest','true'])" % tv_obj.get('id'))
#print "SNAPS = %s" % snaps
file_path = my.get_snapshot_file_path(snaps[0].get('code'))
date1, date2 = my.get_dates()
if 'date1' in my.kwargs.keys():
date1 = my.kwargs.get('date1')
if 'date2' in my.kwargs.keys():
date2 = my.kwargs.get('date2')
#print "DATE1 = %s, DATE2 = %s" % (date1, date2)
#file_path = '/opt/spt/custom/graphs/tv.csv'
tv_data = my.make_TV_data_dict(file_path)
login_file = '/opt/spt/custom/graphs/login_file'
work_hour_file = '/opt/spt/custom/graphs/work_hour_file'
login_in_group_file = '/opt/spt/custom/graphs/login_in_group_file'
login_query = '/opt/spt/custom/graphs/login_query'
login_in_group_query = '/opt/spt/custom/graphs/login_in_group_query'
work_hour_query = '/opt/spt/custom/graphs/work_hour_query'
os.system('''psql -U postgres sthpw < %s > %s''' % (login_query, login_file))
os.system('''psql -U postgres sthpw < %s > %s''' % (work_hour_query, work_hour_file))
os.system('''psql -U postgres sthpw < %s > %s''' % (login_in_group_query, login_in_group_file))
login_data = my.make_data_dict(login_file, '')[0]
work_hour_data = my.make_data_dict(work_hour_file, 'hours')[1]
lig_data = my.make_data_dict(login_in_group_file, 'group')[0]
login_groups = {}
# Create login_group lookup by login name
for key, val in login_data.iteritems():
if key not in login_groups.keys():
login_groups[key] = []
for id, data in lig_data.iteritems():
if data.get('login') == key:
login_groups[key].append(data.get('login_group'))
# Match up TimeVantage names with tactic logins
# Fill user_dates dict with all matched logins
user_dates = {}
name_to_login = {}
for name, data in tv_data.iteritems():
for ld, ldata in login_data.iteritems():
lname = '%s %s' % (ldata.get('first_name').lower(), ldata.get('last_name').lower())
if name == lname:
if name not in user_dates.keys():
user_dates[name] = {'login': ldata.get('login'), 'dates': {}}
if name not in name_to_login.keys():
name_to_login[name] = ldata.get('login')
#print "TV-DATA = %s" % tv_data
group_dates = {}
all_dates = {}
for name, data in user_dates.iteritems():
tdata = tv_data[name]
tlogin = data.get('login')
ugroups = []
if tlogin in login_groups.keys():
ugroups = login_groups[tlogin]
print "TLOGIN = %s, UGROUPS = %s" % (tlogin, ugroups)
for tdate, ttime in tdata['days'].iteritems():
if tdate < date2 and tdate > date1:
if tdate not in user_dates[name]['dates'].keys():
user_dates[name]['dates'][tdate] = {'cat': tdate, 'tv': ttime, 'tactic': 0}
else:
user_dates[name]['dates'][tdate]['tv'] = user_dates[name]['dates'][tdate]['tv'] + ttime
for g in ugroups:
if g not in group_dates.keys():
group_dates[g] = {}
if tdate not in group_dates[g].keys():
group_dates[g][tdate] = {'cat': tdate, 'tv': ttime, 'tactic': 0}
else:
group_dates[g][tdate]['tv'] = group_dates[g][tdate]['tv'] + ttime
if tdate not in all_dates.keys():
all_dates[tdate] = {'cat': tdate, 'tv': ttime, 'tactic': 0}
else:
all_dates[tdate]['tv'] = all_dates[tdate]['tv'] + ttime
if tlogin in work_hour_data.keys():
for tdict in work_hour_data[tlogin]:
day = tdict.get('day')
amt = tdict.get('straight_time')
if day < date2 and day > date1:
if day not in user_dates[name]['dates'].keys():
user_dates[name]['dates'][day] = {'cat': day, 'tv': 0, 'tactic': amt}
else:
user_dates[name]['dates'][day]['tactic'] = user_dates[name]['dates'][day]['tactic'] + amt
for g in ugroups:
if g not in group_dates.keys():
group_dates[g] = {}
if day not in group_dates[g].keys():
print "DAY = %s, Group Dates Keys = %s" % (day, group_dates[g].keys())
group_dates[g][day] = {'cat': day, 'tv': 0, 'tactic': amt}
print "GROUP DATES KEYS = %s" % group_dates[g].keys()
else:
print "GROUP_DATES[%s][%s]['tactic'] = %s, amt = %s" % (g, day, group_dates[g][day]['tactic'], amt)
group_dates[g][day]['tactic'] = group_dates[g][day]['tactic'] + amt
print "GROUP_DATES[%s][%s]['tactic'] = %s" % (g, day, group_dates[g][day]['tactic'])
if day not in all_dates.keys():
all_dates[day] = {'cat': day, 'tv': 0, 'tactic': amt}
else:
all_dates[day]['tactic'] = all_dates[day]['tactic'] + amt
print "GROUP DATES = %s" % group_dates
d1s = date1.split('-')
d2s = date2.split('-')
d1 = date(int(d1s[0]),int(d1s[1]),int(d1s[2]))
d2 = date(int(d2s[0]),int(d2s[1]),int(d2s[2]))
delta = d2 - d1
dates_to_fill = []
for i in range(delta.days + 1):
dates_to_fill.append('%s' % (d1 + td(days=i)))
users = user_dates.keys()
idx = 0
for user in users:
udkeys = user_dates[user]['dates'].keys()
if len(udkeys) > 0:
for dtf in dates_to_fill:
found = False
for d, l in user_dates[user]['dates'].iteritems():
if d == dtf:
found = True
if not found:
user_dates[user]['dates'][dtf] = {'cat': dtf, 'tactic': 0, 'tv': 0}
for grp, gdata in group_dates.iteritems():
for dtf in dates_to_fill:
found = False
for d, l in group_dates[grp].iteritems():
if d == dtf:
found = True
if not found:
group_dates[grp][dtf] = {'cat': dtf, 'tactic': 0, 'tv': 0}
for dtf in dates_to_fill:
found = False
for d, l in all_dates.iteritems():
if d == dtf:
found = True
if not found:
all_dates[dtf] = {'cat': dtf, 'tactic': 0, 'tv': 0}
#print "LOGIN GROUPS = %s" % login_groups
filtbl = Table()
filtbl.add_row()
date1_el = CalendarInputWdg("wh_graph_date1")
date1_el.set_option('show_activator',True)
date1_el.set_option('show_confirm', False)
date1_el.set_option('show_text', True)
date1_el.set_option('show_today', False)
date1_el.set_option('show_value', True)
date1_el.set_option('read_only', False)
if date1 not in [None,'']:
date1_el.set_option('default', date1)
date1_el.get_top().add_style('width: 150px')
date1_el.set_persist_on_submit()
date2_el = CalendarInputWdg("wh_graph_date2")
date2_el.set_option('show_activator',True)
date2_el.set_option('show_confirm', False)
date2_el.set_option('show_text', True)
date2_el.set_option('show_today', False)
date2_el.set_option('show_value', True)
date2_el.set_option('read_only', False)
if date2 not in [None,'']:
date2_el.set_option('default', date2)
date2_el.get_top().add_style('width: 150px')
date2_el.set_persist_on_submit()
f1 = filtbl.add_cell(' ')
f11 = filtbl.add_cell(' Date 1: ')
f2 = filtbl.add_cell(date1_el)
f21 = filtbl.add_cell(' Date 2: ')
f3 = filtbl.add_cell(date2_el)
f4 = filtbl.add_cell('<input type="button" value="Load Graph" name="not_yo_date"/>')
f4.add_style('cursor: pointer;')
f4.add_behavior(my.get_load_again())
f1.add_attr('width','40%%')
f4.add_attr('width','40%%')
surrounder = Table()
surrounder.add_attr('width','100%%')
surrounder.add_attr('class','graph_surrounder')
surrounder.add_row()
surrounder.add_cell(filtbl)
table = Table()
table.add_attr('width','100%%')
table.add_attr('class','graph_top')
table.add_style('background-color: #60ca9d;')
lgroupkeys = login_groups.keys()
arr = []
# Need to show this one for elites only
# Show supervisors their department's
# Show individuals their own
# Try to implement drop-downs
for d, l in all_dates.iteritems():
arr.append(l)
if len(arr) > 0:
arr2 = sorted(arr, key=lambda k: k['cat'])
acount = 0
for a1 in arr2:
percentage = 0
tv = float(a1.get('tv'))
tc = float(a1.get('tactic'))
if tv != 0:
percentage = tc/tv * 100
pps = '%.2f' % percentage
percentage = float(pps)
if percentage > 100:
percentage = 100
a1['percentage'] = percentage
arr2[acount] = a1
acount = acount + 1
widget = DivWdg("Chart area 2")
widget.add_attr('id','chartdiv_%s'%idx)
str_data = my.make_string_dict(arr2)
widget.add_attr('datastr', str_data)
widget.add_styles('width: 100%%;height: 200px;')
my.draw_chart3(widget, idx, 'All')
table.add_row()
tc = table.add_cell(widget)
tc.add_attr('width','100%%')
tc.add_attr('title','ALL')
idx = idx + 1
groups = group_dates.keys()
for group in groups:
grptbl = Table()
grptbl.add_attr('width','100%%')
grptbl.add_style('background-color: #a1b3e6;')
#print "GROUP = %s" % group
arr = []
for d, l in group_dates[group].iteritems():
arr.append(l)
if len(arr) > 0:
arr2 = sorted(arr, key=lambda k: k['cat'])
acount = 0
for a1 in arr2:
percentage = 0
tv = float(a1.get('tv'))
tc = float(a1.get('tactic'))
if tv != 0:
percentage = tc/tv * 100
pps = '%.2f' % percentage
percentage = float(pps)
if percentage > 100:
percentage = 100
a1['percentage'] = percentage
arr2[acount] = a1
acount = acount + 1
widget = DivWdg("Chart area 2")
widget.add_attr('id','chartdiv_%s'%idx)
str_data = my.make_string_dict(arr2)
widget.add_attr('datastr', str_data)
widget.add_styles('width: 100%%;height: 200px;')
my.draw_chart3(widget, idx, group)
grptbl.add_row()
tc = grptbl.add_cell(widget)
tc.add_attr('width','100%%')
tc.add_attr('title',group)
grptbl.add_row()
opener = grptbl.add_cell('<b><u>Show Users</u></b>')
opener.add_style('cursor: pointer;')
toggle_row_behavior = my.get_toggle_row_behavior(group)
opener.add_behavior(toggle_row_behavior)
idx = idx + 1
grpusers = 0
usertbl = Table()
usertbl.add_attr('width','100%%')
usertbl.add_style('background-color: #c8d0e7;')
for user in users:
if user in name_to_login.keys():
login_name = name_to_login[user]
#print "USER = %s, LOGIN NAME = %s" % (user, login_name)
if login_name in lgroupkeys:
lgroups = []
lgroups = login_groups[login_name]
#print "GROUP = %s, USER = %s, LGROUPS = %s" % (group, user, lgroups)
#print "LOGIN GROUPS = %s" % login_groups
if group in lgroups:
arr3 = []
for d, l in user_dates[user]['dates'].iteritems():
arr3.append(l)
if len(arr) > 0:
arr4 = sorted(arr3, key=lambda k: k['cat'])
acount = 0
for a1 in arr4:
percentage = 0
tv = float(a1.get('tv'))
tc = float(a1.get('tactic'))
if tv != 0:
percentage = tc/tv * 100
pps = '%.2f' % percentage
percentage = float(pps)
if percentage > 100:
percentage = 100
a1['percentage'] = percentage
arr4[acount] = a1
acount = acount + 1
widget = DivWdg("Chart area 2")
widget.add_attr('id','chartdiv_%s'%idx)
str_data = my.make_string_dict(arr4)
widget.add_attr('datastr', str_data)
widget.add_styles('width: 100%%;height: 200px;')
my.draw_chart3(widget, idx, user)
if grpusers % 2 == 0:
usertbl.add_row()
tc = usertbl.add_cell(widget)
tc.add_attr('width','50%%')
tc.add_attr('title',user)
idx = idx + 1
grpusers = grpusers + 1
if grpusers % 2 == 1:
te = usertbl.add_cell(' ')
te.add_attr('width','50%%')
grprow = grptbl.add_row()
grprow.add_attr('id','graphs_%s_row' % group)
grprow.add_style('display: table-row;')
grptbl.add_cell(usertbl)
table.add_row()
table.add_cell(grptbl)
surrounder.add_row()
surrounder.add_cell(table)
return surrounder
| [
"[email protected]"
] | |
02f6df5ae4820400c31f0a44ab0af1722aff4957 | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/datahub/databus/shippers/mysql/shipper.py | 47bb186ba428a43fa955ca786b37cc8b70ff1a25 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 3,040 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from datahub.databus.settings import MODULE_SHIPPER
from datahub.databus.shippers.base_shipper import BaseShipper
class MysqlShipper(BaseShipper):
storage_type = "mysql"
module = MODULE_SHIPPER
def _get_shipper_task_conf(self, cluster_name):
# physical_table_name 格式 "dbname_123.table_name"
arr = self.physical_table_name.split(".")
if len(arr) == 1:
db_name = "mapleleaf_%s" % (self.rt_info["bk_biz_id"])
table_name = self.physical_table_name
else:
db_name = arr[0]
table_name = arr[1]
conn_url = (
"jdbc:mysql://{}:{}/{}?autoReconnect=true&useServerPrepStmts=false&rewriteBatchedStatements=true".format(
self.sink_storage_conn["host"],
self.sink_storage_conn["port"],
db_name,
)
)
return self.config_generator.build_tspider_config_param(
cluster_name,
self.connector_name,
self.rt_id,
self.source_channel_topic,
self.task_nums,
conn_url,
self.sink_storage_conn["user"],
self.sink_storage_conn["password"],
table_name,
)
@classmethod
def _field_handler(cls, field, storage_params):
if field.get("is_index"):
storage_params.indexed_fields.append(field["physical_field"])
@classmethod
def _get_storage_config(cls, params, storage_params):
return json.dumps(
{
"indexed_fields": storage_params.indexed_fields,
}
)
| [
"[email protected]"
] | |
10a476e13c38323dbe8b0c4072c8570fa256f26c | 40fc1d38f2d4b643bc99df347c4ff3a763ba65e3 | /examples/menus/basic1/data/states/menu2.py | 6adb8054f7339f609ba0c3ea440473cc73fedab8 | [] | no_license | alecordev/pygaming | 0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c | 35e479b703acf038f47c2151b3759ad852781e4c | refs/heads/master | 2023-05-14T05:03:28.484678 | 2021-06-03T10:11:08 | 2021-06-03T10:11:08 | 372,768,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | import pygame as pg
from .. import tools
import random
class Menu(tools.States):
def __init__(self, screen_rect):
tools.States.__init__(self)
self.screen_rect = screen_rect
self.title, self.title_rect = self.make_text(
"Menu2 State", (75, 75, 75), (self.screen_rect.centerx, 75), 50
)
self.pre_render_options()
self.from_bottom = 200
self.spacer = 75
def update(self, now, keys):
self.change_selected_option()
def const_event(self, keys):
pass
def cleanup(self):
pass
def entry(self):
pass
| [
"[email protected]"
] | |
380a73b7ffd584930d557f8d65de872cfdc9fbc7 | a0cc2d898690a33db84124aae128a014f05c1748 | /PE5/PE5_1.py | 78699fa1d857a31568c764111d00a9e4d689e91e | [] | no_license | Hilgon2/prog | 2bf9adb6315de3e6c95bb4cc944ec1e2842ae64f | b594dd523e2efa51250d3be43cf74cf2ca6229e9 | refs/heads/master | 2020-03-29T09:40:33.924573 | 2019-01-29T13:39:48 | 2019-01-29T13:39:48 | 149,769,217 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | def som(getal1, getal2, getal3):
return getal1 + getal2 + getal3
print(som(1, 5, 8)) | [
"[email protected]"
] | |
eababec9f6471e53a80fca79134347940be8d290 | fe91ffa11707887e4cdddde8f386a8c8e724aa58 | /components/schema_org/generate_schema_org_code_unittest.py | efe4f2b9872edd705ddf08553a7364cb1d9eefc1 | [
"BSD-3-Clause"
] | permissive | akshaymarch7/chromium | 78baac2b45526031846ccbaeca96c639d1d60ace | d273c844a313b1e527dec0d59ce70c95fd2bd458 | refs/heads/master | 2023-02-26T23:48:03.686055 | 2020-04-15T01:20:07 | 2020-04-15T01:20:07 | 255,778,651 | 2 | 1 | BSD-3-Clause | 2020-04-15T02:04:56 | 2020-04-15T02:04:55 | null | UTF-8 | Python | false | false | 5,946 | py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for generate_schema_org_code."""
import sys
import unittest
import generate_schema_org_code
from generate_schema_org_code import schema_org_id
import os
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import mock
_current_dir = os.path.dirname(os.path.realpath(__file__))
# jinja2 is in chromium's third_party directory
# Insert at front to override system libraries, and after path[0] == script dir
sys.path.insert(
1, os.path.join(_current_dir, *([os.pardir] * 2 + ['third_party'])))
import jinja2
class GenerateSchemaOrgCodeTest(unittest.TestCase):
def test_get_template_vars(self):
schema = {
"@graph": [{
"@id": "http://schema.org/MediaObject",
"@type": "rdfs:Class"
},
{
"@id": "http://schema.org/propertyName",
"@type": "rdf:Property"
}]
}
names = {
"http://schema.org/MediaObject": 1234,
"MediaObject": 1235,
"http://schema.org/propertyName": 2345,
"propertyName": 2346
}
self.assertEqual(
generate_schema_org_code.get_template_vars(schema, names), {
'entities': [{
'name': 'MediaObject',
'name_hash': 1235
}],
'properties': [{
'name': 'propertyName',
'name_hash': 2346,
'thing_types': [],
'enum_types': []
}],
'enums': [],
'entity_parent_lookup':
[{
'name': 'MediaObject',
'name_hash': 1235,
'parents': [{
'name': 'MediaObject',
'name_hash': 1235
}]
}]
})
def test_lookup_parents(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
brand = {'@id': schema_org_id('Brand'), 'rdfs:subClassOf': intangible}
schema = {'@graph': [thing, intangible, structured_value, brand]}
self.assertSetEqual(
generate_schema_org_code.lookup_parents(brand, schema, {}),
set(['Thing', 'Intangible', 'Brand']))
def test_get_root_type_thing(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
schema = {'@graph': [thing, intangible, structured_value]}
self.assertEqual(
generate_schema_org_code.get_root_type(structured_value, schema),
thing)
def test_get_root_type_datatype(self):
number = {
'@id': schema_org_id('Number'),
'@type': [schema_org_id('DataType'), 'rdfs:Class']
}
integer = {'@id': schema_org_id('Integer'), 'rdfs:subClassOf': number}
schema = {'@graph': [integer, number]}
self.assertEqual(
generate_schema_org_code.get_root_type(integer, schema), number)
def test_get_root_type_enum(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
enumeration = {
'@id': schema_org_id('Enumeration'),
'rdfs:subClassOf': intangible
}
actionStatusType = {
'@id': schema_org_id('ActionStatusType'),
'rdfs:subClassOf': enumeration
}
schema = {'@graph': [thing, intangible, enumeration, actionStatusType]}
self.assertEqual(
generate_schema_org_code.get_root_type(actionStatusType, schema),
actionStatusType)
def test_parse_property_identifier(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
property_value = {
'@id': schema_org_id('PropertyValue'),
'rdfs:subClassOf': structured_value
}
number = {
'@id': schema_org_id('Number'),
'@type': [schema_org_id('DataType'), 'rdfs:Class']
}
integer = {'@id': schema_org_id('Integer'), 'rdfs:subClassOf': number}
identifier = {
'@id': schema_org_id('Identifier'),
schema_org_id('rangeIncludes'): [property_value, integer, number]
}
schema = {
'@graph': [
thing, intangible, structured_value, property_value, number,
integer, identifier
]
}
names = {"http://schema.org/Identifier": 1234, "Identifier": 1235}
self.assertEqual(
generate_schema_org_code.parse_property(identifier, schema, names),
{
'name': 'Identifier',
'name_hash': 1235,
'has_number': True,
'thing_types': [property_value['@id']],
'enum_types': []
})
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
36a7393b21a2682ca5683e48c739bc8a39c968ea | c6ed09339ff21fa70f154f34328e869f0dd8e394 | /python/mysql-replication/binlog_rows_dump.py | 255f603361ff6f90e670bf2990edb1f0b99845fd | [] | no_license | fits/try_samples | f9b15b309a67f7274b505669db4486b17bd1678b | 0986e22d78f35d57fe1dd94673b68a4723cb3177 | refs/heads/master | 2023-08-22T14:35:40.838419 | 2023-08-07T12:25:07 | 2023-08-07T12:25:07 | 642,078 | 30 | 19 | null | 2022-12-28T06:31:24 | 2010-05-02T02:23:55 | Java | UTF-8 | Python | false | false | 3,022 | py |
import configparser
from datetime import date, datetime
import json
import os
import sys
import signal
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent)
class BinlogConfig:
def __init__(self, conf_file):
self.config = configparser.ConfigParser()
self.conf_file = conf_file
def load(self):
self.config.read(self.conf_file)
if 'binlog' in self.config:
return (
self.config['binlog']['log_file'],
int(self.config['binlog']['log_pos'])
)
return (None, None)
def save(self, log_file, log_pos):
self.config['binlog'] = {
'log_file': log_file,
'log_pos': log_pos
}
with open(self.conf_file, 'w') as f:
self.config.write(f)
def to_bool(s):
return s.lower() in ['true', 't', 'ok', 'yes', 'y', 'on', '1']
def split_env(name):
v = os.getenv(name)
if v is None:
return None
return v.split(',')
ini_file = os.getenv('INI_FILE', 'binlog.ini')
bconf = BinlogConfig(ini_file)
(log_file, log_pos) = bconf.load()
blocking = to_bool(os.getenv('BLOCKING', 'off'))
host = os.getenv('MYSQL_HOST', 'localhost')
port = int(os.getenv('MYSQL_PORT', '3306'))
user = os.getenv('MYSQL_USER')
password = os.getenv('MYSQL_PASSWORD')
schemas = split_env('SCHEMAS')
tables = split_env('TABLES')
cfg = {'host': host, 'port': port, 'user': user, 'password': password}
def to_json(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
return str(obj)
def handle_signal(sig, frame):
sys.exit(1)
stream = BinLogStreamReader(
connection_settings = cfg,
server_id = 1,
only_events = [WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent],
only_schemas = schemas,
only_tables = tables,
resume_stream = True,
log_file = log_file,
log_pos = log_pos,
blocking = blocking
)
try:
signal.signal(signal.SIGTERM, handle_signal)
for ev in stream:
for r in ev.rows:
data = {'table': '', 'schema': '', 'event_type': ''}
if 'values' in r:
data.update(r['values'])
if 'after_values' in r:
data.update(r['after_values'])
data['table'] = ev.table
data['schema'] = ev.schema
if isinstance(ev, WriteRowsEvent):
data['event_type'] = 'insert'
elif isinstance(ev, UpdateRowsEvent):
data['event_type'] = 'update'
elif isinstance(ev, DeleteRowsEvent):
data['event_type'] = 'delete'
print( json.dumps(data, default=to_json) )
finally:
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
stream.close()
bconf.save(stream.log_file, stream.log_pos)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
| [
"[email protected]"
] | |
cd966a58e69dc06f2d0a257a5dfbfcd40725bc3e | dec9ede4b28b8a5ac79ab5c89754f6ff5d65d8e1 | /source/main/settings.py | a97452f77ebe21ed189fdfb51743c0d75bacf140 | [] | no_license | Beknasar/python_group_6_homework_57_Ulanbek_uulu_Beknasar | 036f1eb2f84626344581bb7d864e63e40c3d2e4f | 3bf5e4eaa7133955b1bbb0131ebf9f4732965b1f | refs/heads/master | 2022-12-09T02:10:11.232216 | 2020-09-02T15:44:49 | 2020-09-02T15:44:49 | 292,327,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '83le51*6hai4mci%b-xtei(cms3smwhl9k4wy2m+l$8(^s=0qf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'accounts',
'webapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
LOGIN_URL = 'login'
| [
"[email protected]"
] | |
99dd2ad93382d05efd81223bfd055ed492ed1616 | 103e45cb0d6b25d0c90e533439cd7e525f25b1a8 | /tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py | f73ac2d1a91f5a501e93c6d1a2da04f2a930a4fe | [
"Apache-2.0"
] | permissive | rushabh-v/estimator | ae9ff7c485e4b5beafe2ba6be452eeb099cd63d9 | 6915557cef8bfc86f29f87e4467d601e4553b957 | refs/heads/master | 2021-01-06T18:14:19.948301 | 2020-02-13T02:28:23 | 2020-02-13T02:28:59 | 241,434,870 | 2 | 0 | Apache-2.0 | 2020-02-18T18:20:37 | 2020-02-18T18:20:36 | null | UTF-8 | Python | false | false | 91,108 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import run_config
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
OCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
# This is so that we can easily switch between feature_column and
# feature_column_v2 for testing.
feature_column.numeric_column = feature_column._numeric_column
feature_column.categorical_column_with_hash_bucket = feature_column._categorical_column_with_hash_bucket # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_list = feature_column._categorical_column_with_vocabulary_list # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_file = feature_column._categorical_column_with_vocabulary_file # pylint: disable=line-too-long
feature_column.embedding_column = feature_column._embedding_column
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return tf.compat.v1.debugging.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [tf.compat.v1.initializers.global_variables()]
with tf.compat.v1.Session() as sess:
sess.run(init_all_op)
tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(tf.compat.v1.train.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = tf.compat.v1.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables_lib.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with tf.compat.v1.test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,),)
}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,), (1,))
}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with tf.Graph().as_default():
tf.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
tf.Variable([7.0, 8.0], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
self._fc_lib.numeric_column('age'),
self._fc_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([20, 40]),
'height': np.array([4, 8])
},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns_mix(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column.numeric_column('age'),
tf.feature_column.numeric_column('height')
]
def _input_fn():
features_ds = tf.compat.v1.data.Dataset.from_tensor_slices({
'age': np.array([20, 40]),
'height': np.array([4, 8])
})
labels_ds = tf.compat.v1.data.Dataset.from_tensor_slices(
np.array([[213.], [421.]]))
return (tf.compat.v1.data.Dataset.zip(
(features_ds, labels_ds)).batch(batch_size).repeat(None))
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=_input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (self._fc_lib.numeric_column('x', shape=(x_dim,)),)
with tf.Graph().as_default():
tf.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
tf.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x0'),
self._fc_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testTwoFeatureColumnsMix(self):
"""Tests predict with two feature columns."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column.numeric_column('x0'),
tf.feature_column.numeric_column('x1')),
model_dir=self._model_dir)
def _predict_input_fn():
return tf.compat.v1.data.Dataset.from_tensor_slices({
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
}).batch(1)
predictions = linear_regressor.predict(input_fn=_predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,),)
}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,), (15,))
}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return tf.compat.v1.assign_add(global_step, 1).op
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
return tf.compat.v1.assign_add(global_step, 1).op
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
n_classes,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(
expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_2
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=(-1 * math.log(1.0 / n_classes)))
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17.0, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# expected_loss = loss[0] + loss[1]
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
# expected_loss = loss[0] + loss[1]
if n_classes == 2:
expected_loss = 1.3133 + 2.1269
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age),
'w': (weights)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE:
(max(label_mean, 1 - label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'all_class_ids': [0, 1],
'classes': [label_output_fn(0)],
'all_classes': [label_output_fn(0),
label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
all_class_ids = list(range(len(onedim_logits)))
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'all_class_ids': all_class_ids,
'classes': [label_output_fn(class_ids)],
'all_classes': [label_output_fn(i) for i in all_class_ids],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllEqual(expected_predictions['all_classes'],
predictions[0]['all_classes'])
expected_predictions.pop('all_classes')
predictions[0].pop('all_classes')
self.assertAllClose(
sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=x)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def __init__(self, fc_lib=feature_column):
self._fc_lib = fc_lib
def test_basic_logit_correctness(self):
"""linear_logit_fn simply wraps feature_column_lib.linear_model."""
age = self._fc_lib.numeric_column('age')
with tf.Graph().as_default():
logit_fn = linear.linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
bias_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
'linear_model/bias_weights')[0]
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
def test_compute_fraction_of_zero(self):
"""Tests the calculation of sparsity."""
if self._fc_lib != feature_column:
return
age = tf.feature_column.numeric_column('age')
occupation = feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
cols_to_vars = {}
tf.compat.v1.feature_column.linear_model(
features={
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
},
feature_columns=[age, occupation],
units=3,
cols_to_vars=cols_to_vars)
cols_to_vars.pop('bias')
fraction_zero = linear._compute_fraction_of_zero(
list(cols_to_vars.values()))
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
def test_compute_fraction_of_zero_v2(self):
"""Tests the calculation of sparsity."""
if self._fc_lib != feature_column_v2:
return
age = tf.feature_column.numeric_column('age')
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
model = feature_column_v2.LinearModel(
feature_columns=[age, occupation], units=3, name='linear_model')
features = {
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
}
model(features)
variables = model.variables
variables.remove(model.bias)
fraction_zero = linear._compute_fraction_of_zero(variables)
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
class BaseLinearWarmStartingTest(object):
def __init__(self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column):
self._linear_classifier_fn = _linear_classifier_fn
self._linear_regressor_fn = _linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'age_in_years': [[23.], [31.]],
'occupation': [['doctor'], ['consultant']]
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of LinearClassifier default warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_classifier.model_dir)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_classifier.get_variable_names():
self.assertAllClose(
linear_classifier.get_variable_value(variable_name),
warm_started_linear_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of LinearRegressor default warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearRegressor and train to save a checkpoint.
linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
optimizer='SGD')
linear_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearRegressor, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_regressor.model_dir)
warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_regressor.get_variable_names():
self.assertAllClose(
linear_regressor.get_variable_value(variable_name),
warm_started_linear_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
# The provided regular expression will only warm-start the age variable
# and not the bias.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
vars_to_warm_start='.*(age).*'))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(AGE_WEIGHT_NAME),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# Bias should still be zero from initialization.
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_vocab_remapping_and_partitioning(self):
"""Tests warm-starting with vocab remapping and partitioning."""
vocab_list = ['doctor', 'lawyer', 'consultant']
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list))
# Create a LinearClassifier and train to save a checkpoint.
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=2)
linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD',
partitioner=partitioner)
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change). Use a new FeatureColumn with a
# different vocabulary for occupation.
new_vocab_list = ['doctor', 'consultant', 'engineer']
new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
new_occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list))
# We can create our VocabInfo object from the new and old occupation
# FeatureColumn's.
occupation_vocab_info = estimator.VocabInfo(
new_vocab=new_occupation.vocabulary_file,
new_vocab_size=new_occupation.vocabulary_size,
num_oov_buckets=new_occupation.num_oov_buckets,
old_vocab=occupation.vocabulary_file,
old_vocab_size=occupation.vocabulary_size,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=tf.compat.v1.initializers.random_uniform(
minval=0.39, maxval=0.39))
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_vocab_info={
OCCUPATION_WEIGHT_NAME: occupation_vocab_info
},
# Explicitly providing None here will only warm-start variables
# referenced in var_name_to_vocab_info (the bias will not be
# warm-started).
vars_to_warm_start=None),
partitioner=partitioner)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# 'doctor' was ID-0 and still ID-0.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[0, :])
# 'consultant' was ID-2 and now ID-1.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[1, :])
# 'engineer' is a new entry and should be initialized with the
# backup_initializer in VocabInfo.
self.assertAllClose([0.39] * 4,
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[2, :])
# Bias should still be zero (from initialization logic).
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_naming_change(self):
"""Tests warm-starting with a Tensor name remapping."""
age_in_years = self._fc_lib.numeric_column('age_in_years')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age_in_years],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[self._fc_lib.numeric_column('age')],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
# The 'age' variable correspond to the 'age_in_years' variable in the
# previous model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_prev_var_name={
AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')
}))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(
AGE_WEIGHT_NAME.replace('age', 'age_in_years')),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# The bias is also warm-started (with no name remapping).
self.assertAllClose(
linear_classifier.get_variable_value(BIAS_NAME),
warm_started_linear_classifier.get_variable_value(BIAS_NAME))
| [
"[email protected]"
] | |
c2786c1ec09a518cd998b9512ec4a0142ff1e4ce | f735a0265dbad9eaf3c5ce791273c567ad2907a2 | /example/ui/dw_widgets_pyside_ui.py | cc46c19375c7ad94236f94848a9e737cbc94e205 | [
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-3.0"
] | permissive | RicBent/QDarkStyleSheet | cd10d57bf7bbbc22bf3f1dde5f736df26993143a | a085ecbc79d4502afc0c68ffb3bfc702dcf4e65b | refs/heads/master | 2020-04-04T16:19:17.442084 | 2018-11-01T19:34:29 | 2018-11-01T19:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,616 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dw_widgets.ui'
#
# Created: Thu Nov 1 16:06:05 2018
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_DockWidget(object):
def setupUi(self, DockWidget):
DockWidget.setObjectName("DockWidget")
DockWidget.resize(269, 306)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout = QtGui.QGridLayout(self.dockWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.label_81 = QtGui.QLabel(self.dockWidgetContents)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_81.setFont(font)
self.label_81.setObjectName("label_81")
self.gridLayout.addWidget(self.label_81, 0, 1, 1, 1)
self.label_82 = QtGui.QLabel(self.dockWidgetContents)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_82.setFont(font)
self.label_82.setObjectName("label_82")
self.gridLayout.addWidget(self.label_82, 0, 2, 1, 1)
self.label_56 = QtGui.QLabel(self.dockWidgetContents)
self.label_56.setMinimumSize(QtCore.QSize(0, 0))
self.label_56.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_56.setFont(font)
self.label_56.setObjectName("label_56")
self.gridLayout.addWidget(self.label_56, 1, 0, 1, 1)
self.listWidget = QtGui.QListWidget(self.dockWidgetContents)
self.listWidget.setMinimumSize(QtCore.QSize(0, 0))
self.listWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.listWidget.setObjectName("listWidget")
QtGui.QListWidgetItem(self.listWidget)
QtGui.QListWidgetItem(self.listWidget)
QtGui.QListWidgetItem(self.listWidget)
QtGui.QListWidgetItem(self.listWidget)
self.gridLayout.addWidget(self.listWidget, 1, 1, 1, 1)
self.listWidgetDis = QtGui.QListWidget(self.dockWidgetContents)
self.listWidgetDis.setEnabled(False)
self.listWidgetDis.setObjectName("listWidgetDis")
QtGui.QListWidgetItem(self.listWidgetDis)
QtGui.QListWidgetItem(self.listWidgetDis)
QtGui.QListWidgetItem(self.listWidgetDis)
QtGui.QListWidgetItem(self.listWidgetDis)
self.gridLayout.addWidget(self.listWidgetDis, 1, 2, 1, 1)
self.label_57 = QtGui.QLabel(self.dockWidgetContents)
self.label_57.setMinimumSize(QtCore.QSize(0, 0))
self.label_57.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_57.setFont(font)
self.label_57.setObjectName("label_57")
self.gridLayout.addWidget(self.label_57, 2, 0, 1, 1)
self.treeWidget = QtGui.QTreeWidget(self.dockWidgetContents)
self.treeWidget.setMinimumSize(QtCore.QSize(0, 0))
self.treeWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.treeWidget.setObjectName("treeWidget")
item_0 = QtGui.QTreeWidgetItem(self.treeWidget)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_2 = QtGui.QTreeWidgetItem(item_1)
item_0 = QtGui.QTreeWidgetItem(self.treeWidget)
item_1 = QtGui.QTreeWidgetItem(item_0)
self.gridLayout.addWidget(self.treeWidget, 2, 1, 1, 1)
self.treeWidgetDis = QtGui.QTreeWidget(self.dockWidgetContents)
self.treeWidgetDis.setEnabled(False)
self.treeWidgetDis.setObjectName("treeWidgetDis")
item_0 = QtGui.QTreeWidgetItem(self.treeWidgetDis)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_2 = QtGui.QTreeWidgetItem(item_1)
item_0 = QtGui.QTreeWidgetItem(self.treeWidgetDis)
item_1 = QtGui.QTreeWidgetItem(item_0)
self.gridLayout.addWidget(self.treeWidgetDis, 2, 2, 1, 1)
self.label_58 = QtGui.QLabel(self.dockWidgetContents)
self.label_58.setMinimumSize(QtCore.QSize(0, 0))
self.label_58.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_58.setFont(font)
self.label_58.setObjectName("label_58")
self.gridLayout.addWidget(self.label_58, 3, 0, 1, 1)
self.tableWidget = QtGui.QTableWidget(self.dockWidgetContents)
self.tableWidget.setMinimumSize(QtCore.QSize(0, 0))
self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(2, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(2, 1, item)
self.gridLayout.addWidget(self.tableWidget, 3, 1, 1, 1)
self.tableWidgetDis = QtGui.QTableWidget(self.dockWidgetContents)
self.tableWidgetDis.setEnabled(False)
self.tableWidgetDis.setObjectName("tableWidgetDis")
self.tableWidgetDis.setColumnCount(2)
self.tableWidgetDis.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(2, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(2, 1, item)
self.gridLayout.addWidget(self.tableWidgetDis, 3, 2, 1, 1)
DockWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(DockWidget)
QtCore.QMetaObject.connectSlotsByName(DockWidget)
def retranslateUi(self, DockWidget):
DockWidget.setWindowTitle(QtGui.QApplication.translate("DockWidget", "Widgets", None, QtGui.QApplication.UnicodeUTF8))
self.label_81.setText(QtGui.QApplication.translate("DockWidget", "Enabled", None, QtGui.QApplication.UnicodeUTF8))
self.label_82.setText(QtGui.QApplication.translate("DockWidget", "Disabled", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setText(QtGui.QApplication.translate("DockWidget", "ListWidget", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
self.listWidget.item(0).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.item(1).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.item(2).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.item(3).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setSortingEnabled(__sortingEnabled)
__sortingEnabled = self.listWidgetDis.isSortingEnabled()
self.listWidgetDis.setSortingEnabled(False)
self.listWidgetDis.item(0).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.item(1).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.item(2).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.item(3).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.setSortingEnabled(__sortingEnabled)
self.label_57.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_57.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_57.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_57.setText(QtGui.QApplication.translate("DockWidget", "TreeWidget", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setSortingEnabled(True)
self.treeWidget.headerItem().setText(0, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.headerItem().setText(1, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(0).child(0).setText(1, QtGui.QApplication.translate("DockWidget", "Test", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(0).child(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(1).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(1).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.treeWidgetDis.setSortingEnabled(True)
self.treeWidgetDis.headerItem().setText(0, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.headerItem().setText(1, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.treeWidgetDis.isSortingEnabled()
self.treeWidgetDis.setSortingEnabled(False)
self.treeWidgetDis.topLevelItem(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(0).child(0).setText(1, QtGui.QApplication.translate("DockWidget", "Test", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(0).child(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(1).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(1).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.setSortingEnabled(__sortingEnabled)
self.label_58.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_58.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_58.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_58.setText(QtGui.QApplication.translate("DockWidget", "TableWidget", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.verticalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.verticalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.verticalHeaderItem(2).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.tableWidget.item(0, 0).setText(QtGui.QApplication.translate("DockWidget", "1.23", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(0, 1).setText(QtGui.QApplication.translate("DockWidget", "Hello", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(1, 0).setText(QtGui.QApplication.translate("DockWidget", "1,45", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(1, 1).setText(QtGui.QApplication.translate("DockWidget", "Olá", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(2, 0).setText(QtGui.QApplication.translate("DockWidget", "12/12/2012", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(2, 1).setText(QtGui.QApplication.translate("DockWidget", "Oui", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setSortingEnabled(__sortingEnabled)
self.tableWidgetDis.verticalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.verticalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.verticalHeaderItem(2).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.tableWidgetDis.isSortingEnabled()
self.tableWidgetDis.setSortingEnabled(False)
self.tableWidgetDis.item(0, 0).setText(QtGui.QApplication.translate("DockWidget", "1.23", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(0, 1).setText(QtGui.QApplication.translate("DockWidget", "Hello", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(1, 0).setText(QtGui.QApplication.translate("DockWidget", "1,45", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(1, 1).setText(QtGui.QApplication.translate("DockWidget", "Olá", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(2, 0).setText(QtGui.QApplication.translate("DockWidget", "12/12/2012", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(2, 1).setText(QtGui.QApplication.translate("DockWidget", "Oui", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.setSortingEnabled(__sortingEnabled)
| [
"[email protected]"
] | |
a4e2cd025347721566a4b4b6d33b1669cba139cf | 93a720d9242c73c919ec30f6018d126a391f473f | /ShowUserNonOwnerDriveACLs.py | 4611a76f24cca4f71c23283af816a2f0ad50292c | [] | no_license | scottreleehw/GAM-Scripts3 | c8fa4abddb64e47d8a3d30dd7e19e29634c9e965 | 7eab4f86214bfeb00ee4dd6131828a55f1f42c56 | refs/heads/master | 2023-01-09T06:08:08.093789 | 2020-11-05T19:36:14 | 2020-11-05T19:36:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,286 | py | #!/usr/bin/env python3
"""
# Purpose: For a Google Drive User, get all drive file ACLs for files except those indicating the user as owner
# Note: This script can use Basic or Advanced GAM:
# https://github.com/jay0lee/GAM
# https://github.com/taers232c/GAMADV-XTD3
# 1: Use print filelist to get selected ACLs
# Basic: gam user [email protected] print filelist id title permissions owners > filelistperms.csv
# Advanced: gam user [email protected] print filelist fields id,title,permissions,owners.emailaddress > filelistperms.csv
# 2: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,emailAddress"
# that lists the driveFileIds/Titles for all ACLs except those indicating the user as owner
# $ python3 ShowUserNonOwnerDriveACLs.py filelistperms.csv localperms.csv
"""
import csv
import re
import sys
FILE_NAME = 'name'
ALT_FILE_NAME = 'title'
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'emailAddress'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in iter(row.items()):
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v:
permissions_N = mg.group(1)
emailAddress = row.get(f'permissions.{permissions_N}.emailAddress', '')
if v != 'user' or row[f'permissions.{permissions_N}.role'] != 'owner' or emailAddress != row['owners.0.emailAddress']:
outputCSV.writerow({'Owner': row['owners.0.emailAddress'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'emailAddress': emailAddress})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
| [
"[email protected]"
] | |
becd6b60081e776ae5a505d8fda91b85fce26a25 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/servicefabric/v20190301preview/service.py | a1dd32a3ef5290bb5498425ed70ed2ee1b75dad7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 29,220 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
application_name: pulumi.Input[str],
cluster_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_kind: pulumi.Input[Union[str, 'ServiceKind']],
correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]] = None,
default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_description: Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]] = None,
placement_constraints: Optional[pulumi.Input[str]] = None,
service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,
service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]] = None,
service_type_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Service resource.
:param pulumi.Input[str] application_name: The name of the application resource.
:param pulumi.Input[str] cluster_name: The name of the cluster resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'ServiceKind']] service_kind: The kind of service (Stateless or Stateful).
:param pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]] correlation_scheme: A list that describes the correlation of the service with other services.
:param pulumi.Input[Union[str, 'MoveCost']] default_move_cost: Specifies the move cost for the service.
:param pulumi.Input[str] location: Azure resource location.
:param pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']] partition_description: Describes how the service is partitioned.
:param pulumi.Input[str] placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
:param pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]] service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects.
:param pulumi.Input[str] service_name: The name of the service resource in the format of {applicationName}~{serviceName}.
:param pulumi.Input[Union[str, 'ArmServicePackageActivationMode']] service_package_activation_mode: The activation Mode of the service package
:param pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]] service_placement_policies: A list that describes the correlation of the service with other services.
:param pulumi.Input[str] service_type_name: The name of the service type
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Azure resource tags.
"""
pulumi.set(__self__, "application_name", application_name)
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_kind", service_kind)
if correlation_scheme is not None:
pulumi.set(__self__, "correlation_scheme", correlation_scheme)
if default_move_cost is not None:
pulumi.set(__self__, "default_move_cost", default_move_cost)
if location is not None:
pulumi.set(__self__, "location", location)
if partition_description is not None:
pulumi.set(__self__, "partition_description", partition_description)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if service_load_metrics is not None:
pulumi.set(__self__, "service_load_metrics", service_load_metrics)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if service_package_activation_mode is not None:
pulumi.set(__self__, "service_package_activation_mode", service_package_activation_mode)
if service_placement_policies is not None:
pulumi.set(__self__, "service_placement_policies", service_placement_policies)
if service_type_name is not None:
pulumi.set(__self__, "service_type_name", service_type_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> pulumi.Input[str]:
"""
The name of the application resource.
"""
return pulumi.get(self, "application_name")
@application_name.setter
def application_name(self, value: pulumi.Input[str]):
pulumi.set(self, "application_name", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the cluster resource.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceKind")
def service_kind(self) -> pulumi.Input[Union[str, 'ServiceKind']]:
"""
The kind of service (Stateless or Stateful).
"""
return pulumi.get(self, "service_kind")
@service_kind.setter
def service_kind(self, value: pulumi.Input[Union[str, 'ServiceKind']]):
pulumi.set(self, "service_kind", value)
@property
@pulumi.getter(name="correlationScheme")
def correlation_scheme(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "correlation_scheme")
@correlation_scheme.setter
def correlation_scheme(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]]):
pulumi.set(self, "correlation_scheme", value)
@property
@pulumi.getter(name="defaultMoveCost")
def default_move_cost(self) -> Optional[pulumi.Input[Union[str, 'MoveCost']]]:
"""
Specifies the move cost for the service.
"""
return pulumi.get(self, "default_move_cost")
@default_move_cost.setter
def default_move_cost(self, value: Optional[pulumi.Input[Union[str, 'MoveCost']]]):
pulumi.set(self, "default_move_cost", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Azure resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="partitionDescription")
def partition_description(self) -> Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]]:
"""
Describes how the service is partitioned.
"""
return pulumi.get(self, "partition_description")
@partition_description.setter
def partition_description(self, value: Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]]):
pulumi.set(self, "partition_description", value)
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[pulumi.Input[str]]:
"""
The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
"""
return pulumi.get(self, "placement_constraints")
@placement_constraints.setter
def placement_constraints(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "placement_constraints", value)
@property
@pulumi.getter(name="serviceLoadMetrics")
def service_load_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]]:
"""
The service load metrics is given as an array of ServiceLoadMetricDescription objects.
"""
return pulumi.get(self, "service_load_metrics")
@service_load_metrics.setter
def service_load_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]]):
pulumi.set(self, "service_load_metrics", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service resource in the format of {applicationName}~{serviceName}.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="servicePackageActivationMode")
def service_package_activation_mode(self) -> Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]]:
"""
The activation Mode of the service package
"""
return pulumi.get(self, "service_package_activation_mode")
@service_package_activation_mode.setter
def service_package_activation_mode(self, value: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]]):
pulumi.set(self, "service_package_activation_mode", value)
@property
@pulumi.getter(name="servicePlacementPolicies")
def service_placement_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "service_placement_policies")
@service_placement_policies.setter
def service_placement_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]]):
pulumi.set(self, "service_placement_policies", value)
@property
@pulumi.getter(name="serviceTypeName")
def service_type_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service type
"""
return pulumi.get(self, "service_type_name")
@service_type_name.setter
def service_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_type_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCorrelationDescriptionArgs']]]]] = None,
default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_description: Optional[pulumi.Input[Union[pulumi.InputType['NamedPartitionSchemeDescriptionArgs'], pulumi.InputType['SingletonPartitionSchemeDescriptionArgs'], pulumi.InputType['UniformInt64RangePartitionSchemeDescriptionArgs']]]] = None,
placement_constraints: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_kind: Optional[pulumi.Input[Union[str, 'ServiceKind']]] = None,
service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadMetricDescriptionArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,
service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementPolicyDescriptionArgs']]]]] = None,
service_type_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
The service resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_name: The name of the application resource.
:param pulumi.Input[str] cluster_name: The name of the cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCorrelationDescriptionArgs']]]] correlation_scheme: A list that describes the correlation of the service with other services.
:param pulumi.Input[Union[str, 'MoveCost']] default_move_cost: Specifies the move cost for the service.
:param pulumi.Input[str] location: Azure resource location.
:param pulumi.Input[Union[pulumi.InputType['NamedPartitionSchemeDescriptionArgs'], pulumi.InputType['SingletonPartitionSchemeDescriptionArgs'], pulumi.InputType['UniformInt64RangePartitionSchemeDescriptionArgs']]] partition_description: Describes how the service is partitioned.
:param pulumi.Input[str] placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'ServiceKind']] service_kind: The kind of service (Stateless or Stateful).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadMetricDescriptionArgs']]]] service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects.
:param pulumi.Input[str] service_name: The name of the service resource in the format of {applicationName}~{serviceName}.
:param pulumi.Input[Union[str, 'ArmServicePackageActivationMode']] service_package_activation_mode: The activation Mode of the service package
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementPolicyDescriptionArgs']]]] service_placement_policies: A list that describes the correlation of the service with other services.
:param pulumi.Input[str] service_type_name: The name of the service type
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Azure resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The service resource.
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCorrelationDescriptionArgs']]]]] = None,
default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_description: Optional[pulumi.Input[Union[pulumi.InputType['NamedPartitionSchemeDescriptionArgs'], pulumi.InputType['SingletonPartitionSchemeDescriptionArgs'], pulumi.InputType['UniformInt64RangePartitionSchemeDescriptionArgs']]]] = None,
placement_constraints: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_kind: Optional[pulumi.Input[Union[str, 'ServiceKind']]] = None,
service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadMetricDescriptionArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,
service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementPolicyDescriptionArgs']]]]] = None,
service_type_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
if application_name is None and not opts.urn:
raise TypeError("Missing required property 'application_name'")
__props__.__dict__["application_name"] = application_name
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["correlation_scheme"] = correlation_scheme
__props__.__dict__["default_move_cost"] = default_move_cost
__props__.__dict__["location"] = location
__props__.__dict__["partition_description"] = partition_description
__props__.__dict__["placement_constraints"] = placement_constraints
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_kind is None and not opts.urn:
raise TypeError("Missing required property 'service_kind'")
__props__.__dict__["service_kind"] = service_kind
__props__.__dict__["service_load_metrics"] = service_load_metrics
__props__.__dict__["service_name"] = service_name
__props__.__dict__["service_package_activation_mode"] = service_package_activation_mode
__props__.__dict__["service_placement_policies"] = service_placement_policies
__props__.__dict__["service_type_name"] = service_type_name
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicefabric/v20190301preview:Service"), pulumi.Alias(type_="azure-native:servicefabric:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20170701preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20170701preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20190301:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20190301:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20190601preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20190601preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20191101preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20191101preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20200301:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20200301:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20201201preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20201201preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20210601:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20210601:Service")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Service, __self__).__init__(
'azure-native:servicefabric/v20190301preview:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["correlation_scheme"] = None
__props__.__dict__["default_move_cost"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partition_description"] = None
__props__.__dict__["placement_constraints"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["service_kind"] = None
__props__.__dict__["service_load_metrics"] = None
__props__.__dict__["service_package_activation_mode"] = None
__props__.__dict__["service_placement_policies"] = None
__props__.__dict__["service_type_name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="correlationScheme")
def correlation_scheme(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceCorrelationDescriptionResponse']]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "correlation_scheme")
@property
@pulumi.getter(name="defaultMoveCost")
def default_move_cost(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the move cost for the service.
"""
return pulumi.get(self, "default_move_cost")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Azure resource etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Azure resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionDescription")
def partition_description(self) -> pulumi.Output[Optional[Any]]:
"""
Describes how the service is partitioned.
"""
return pulumi.get(self, "partition_description")
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> pulumi.Output[Optional[str]]:
"""
The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
"""
return pulumi.get(self, "placement_constraints")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceKind")
def service_kind(self) -> pulumi.Output[str]:
"""
The kind of service (Stateless or Stateful).
"""
return pulumi.get(self, "service_kind")
@property
@pulumi.getter(name="serviceLoadMetrics")
def service_load_metrics(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceLoadMetricDescriptionResponse']]]:
"""
The service load metrics is given as an array of ServiceLoadMetricDescription objects.
"""
return pulumi.get(self, "service_load_metrics")
@property
@pulumi.getter(name="servicePackageActivationMode")
def service_package_activation_mode(self) -> pulumi.Output[Optional[str]]:
"""
The activation Mode of the service package
"""
return pulumi.get(self, "service_package_activation_mode")
@property
@pulumi.getter(name="servicePlacementPolicies")
def service_placement_policies(self) -> pulumi.Output[Optional[Sequence['outputs.ServicePlacementPolicyDescriptionResponse']]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "service_placement_policies")
@property
@pulumi.getter(name="serviceTypeName")
def service_type_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the service type
"""
return pulumi.get(self, "service_type_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
c0680485e5008a6554b28a45fbd927848f84b0a4 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/bwscrape/basic_twitter_scrapersefton_12.py | 60ef63d50469e74904a0718aad24bf8abe06d460 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,490 | py | ###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'STEVEN Patton MANN'
RESULTS_PER_PAGE = '50'
LANGUAGE = 'en'
NUM_PAGES = 1500
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, faileddd to scrape %s' % base_url
break
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'STEVEN Patton MANN'
RESULTS_PER_PAGE = '50'
LANGUAGE = 'en'
NUM_PAGES = 1500
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, faileddd to scrape %s' % base_url
break
| [
"[email protected]"
] | |
9960074f78a8ff9d0d36b41eb50fb4ad3968e291 | f0adf5afb93b7f0a67802e876a02e898cd92a172 | /Tencent/GNN/metapath2vec/Generate_metapaths.py | 18ff11473dcf9de3cb3004299cde11057d87f76b | [
"Apache-2.0"
] | permissive | orange-eng/internship | 9a2f746b3d50673038481392100d375f6eec82d3 | c8c566df453d3a4bdf692338f74916ae15792fa1 | refs/heads/main | 2023-07-18T11:46:36.659858 | 2021-08-31T09:39:10 | 2021-08-31T09:39:10 | 358,230,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | import networkx as nx
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch import GraphConv #内置的GCNlayer
import dgl
import matplotlib.pyplot as plt
import random
import time
import tqdm
import sys
import os
def construct_graph():
file_user = './data/user_features.csv'
file_item = './data/item_features.csv'
file_edge = './data/JData_Action_201602.csv'
f_user = pd.read_csv(file_user)
f_item = pd.read_csv(file_item)
f_edge = pd.read_csv(file_edge)
#
f_edge = f_edge.sample(10000)
users = set()
items = set()
for index, row in f_edge.iterrows():
users.add(row['user_id'])
items.add(row['sku_id'])
user_ids_index_map = {x: i for i, x in enumerate(users)} # user编号
item_ids_index_map = {x: i for i, x in enumerate(items)} # item编号
user_index_id_map = {i: x for i, x in enumerate(users)} # index:user
item_index_id_map = {i: x for i, x in enumerate(items)} # index:item
user_item_src = []
user_item_dst = []
for index, row in f_edge.iterrows():
user_item_src.append(user_ids_index_map.get(row['user_id'])) # 获取user的编号
user_item_dst.append(item_ids_index_map.get(row['sku_id'])) # 获取item编号
# 构图; 异构图的编号
'''
ui = dgl.bipartite((user_item_src, user_item_dst), 'user', 'ui', 'item') # 构建异构图; bipartite
iu = dgl.bipartite((user_item_dst, user_item_src), 'item', 'iu', 'user')
hg = dgl.hetero_from_relations([ui, iu])
'''
data_dict = {('user', 'item', 'user'): (torch.tensor(user_item_src), torch.tensor(user_item_dst))}
hg = dgl.heterograph(data_dict)
return hg, user_index_id_map, item_index_id_map
def parse_trace(trace, user_index_id_map, item_index_id_map):
s = []
for index in range(trace.size):
if index % 2 == 0:
s.append(user_index_id_map[trace[index]])
else:
s.append(item_index_id_map[trace[index]])
return ','.join(s)
def main():
hg, user_index_id_map, item_index_id_map = construct_graph()
meta_path = ['ui','iu','ui','iu','ui','iu']
num_walks_per_node = 1
f = open("./output/output_path.txt", "w")
for user_idx in tqdm.trange(hg.number_of_nodes('user')): #以user开头的metapath
traces = dgl.contrib.sampling.metapath_random_walk(
hg=hg, etypes=meta_path, seeds=[user_idx,], num_traces=num_walks_per_node)
dgl.sampling.random_walk
tr = traces[0][0].numpy()
tr = np.insert(tr,0,user_idx)
res = parse_trace(tr, user_index_id_map, item_index_id_map)
f.write(res+'\n')
f.close()
if __name__=='__main__':
main() | [
"[email protected]"
] | |
410778eda359ba00d8f98afb5deb6ac84ae624c1 | 86319aad3690906f614ac1af28b8843529e9e0da | /thwackbin/__init__.py | ab95f9cc0ab6c8b46a7c0f643cb504f8c070fdcc | [] | no_license | sohgoh/thwackbin | b5828783a6179e96784bed0bdb894b179e3bea07 | ba9fedc4bcec598f367aa6d4f2567d1840c65c51 | refs/heads/master | 2021-01-21T03:14:08.261732 | 2014-04-16T03:53:51 | 2014-04-16T04:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """
thwackbin
~~~~~~~~~
Thwackbin is an HTTP request/response test service which exposes the AppThwack REST API.
This service should be used to test/validate clients which wish to consume the actual API endpoint.
"""
__name__ = 'thwackbin'
__version__ = '0.0.1'
__author__ = 'Andrew Hawker <[email protected]>'
import flask
def create_app():
"""
Create the thwackbin WSGI application.
"""
app = flask.Flask(__name__)
#Initialize mock data.
from thwackbin import data
data.init()
#Register blueprints.
from thwackbin import appthwack
app.register_blueprint(appthwack.api)
#Patch exc handlers to always return JSON.
from thwackbin import patch
app = patch.patch_exception_handlers(app)
app.config['DOWNLOAD_FOLDER'] = data.ROOT
return app
| [
"[email protected]"
] | |
a6f1df8c8c3dd73bd2c937dd3e0186367e7ecc93 | 19980ea46bb169873f01aaa1e89fc0d8ba488030 | /samples/sampleopenflow/demos/demo11.py | b7138557fd71f15abbc7ceeaa7af146675c781a3 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gaberger/pybvc | b0e4c7fd280d87330fe15e18eecca94f089bf1a4 | bf546c4595a1a6282fca084865c5a0e69194030f | refs/heads/master | 2023-01-13T21:19:01.625744 | 2015-12-01T16:01:00 | 2015-12-01T16:01:00 | 42,198,126 | 0 | 0 | BSD-3-Clause | 2022-12-26T20:18:11 | 2015-09-09T18:53:12 | Python | UTF-8 | Python | false | false | 8,091 | py | #!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_ICMP,
IP_DSCP_CS2,
IP_ECN_CE)
def of_demo_11():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 11 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# ICMPv4 Type
# ICMPv4 Code
# IP DSCP
# IP ECN
# Input Port
# NOTE: Ethernet type MUST be 2048 (0x800) -> IPv4 protocol
# IP Protocol Type MUST be 1 -> ICMP
eth_type = ETH_TYPE_IPv4
eth_src = "00:00:00:11:23:ae"
eth_dst = "00:ff:20:01:1a:3d"
ipv4_src = "17.1.2.3/8"
ipv4_dst = "172.168.5.6/18"
ip_proto = IP_PROTO_ICMP
ip_dscp = IP_DSCP_CS2 # 'Class Selector' = 'Immediate'
ip_ecn = IP_ECN_CE # Congestion Encountered
icmpv4_type = 6 # Alternate Host Address
icmpv4_code = 3 # Alternate Address for Host
input_port = 10
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'"
% (ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" IP Protocol Number (%s)\n"
" IP DSCP (%s)\n"
" IP ECN (%s)\n"
" ICMPv4 Type (%s)\n"
" ICMPv4 Code (%s)\n"
" Input Port (%s)"
% (hex(eth_type), eth_src,
eth_dst, ipv4_src, ipv4_dst,
ip_proto, ip_dscp, ip_ecn,
icmpv4_type, icmpv4_code,
input_port))
print (" Action: Output (NORMAL)")
time.sleep(rundelay)
flow_entry = FlowEntry()
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 18
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_hard_timeout(0)
flow_entry.set_flow_idle_timeout(0)
flow_entry.set_flow_priority(1009)
# --- Instruction: 'Apply-actions'
# Action: 'Output' NORMAL
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port="NORMAL")
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# IP ECN
# ICMPv4 Type
# ICMPv4 Code
# Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_ipv4_src(ipv4_src)
match.set_ipv4_dst(ipv4_dst)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_ip_ecn(ip_ecn)
match.set_icmpv4_type(icmpv4_type)
match.set_icmpv4_code(icmpv4_code)
match.set_in_port(input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node"
% (flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_11()
| [
"[email protected]"
] | |
e31a7060d75486ec7fd9ef972bacfc4b74111180 | b4f66ebb5084efa6839771b62a1034a82094df6e | /setup.py | d1770422892a97a4591b7399521fb5a79403887d | [] | no_license | mhfowler/howdoispeak | 06f49dab64f62dea727a429557887742d1509265 | 110287dba64ae308943f431f628e528d7c941748 | refs/heads/master | 2016-09-05T14:04:48.605245 | 2015-01-17T23:27:15 | 2015-01-17T23:27:15 | 18,955,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['munging/iphone_backup_upload.py']
DATA_FILES = ["munging/secrets.json"]
OPTIONS = {
'argv_emulation': True,
'iconfile':'green_circles.icns',
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
py_modules=["munging.common"]
)
| [
"[email protected]"
] | |
41ddd091df6ea055f01a6a9169e98ab77a7ceedd | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/app_loader/decorators.py | 49d57664e5a33eceff808d2385710e8a082e19e6 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,256 | py | # 2015.11.10 21:25:12 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/app_loader/decorators.py
from gui.app_loader.loader import g_appLoader
from gui.app_loader.settings import APP_NAME_SPACE as _SPACE
class app_getter(property):
def __init__(self, fget = None, doc = None, space = None):
super(app_getter, self).__init__(fget=fget, doc=doc)
self._space = space
def __get__(self, obj, objType = None):
return g_appLoader.getApp(self._space)
class def_lobby(property):
def __get__(self, obj, objType = None):
return g_appLoader.getDefLobbyApp()
class def_battle(property):
def __get__(self, obj, objType = None):
return g_appLoader.getDefBattleApp()
class sf_lobby(app_getter):
def __init__(self, fget = None, doc = None):
super(sf_lobby, self).__init__(fget, doc, _SPACE.SF_LOBBY)
class sf_battle(app_getter):
def __init__(self, fget = None, doc = None):
super(sf_battle, self).__init__(fget, doc, _SPACE.SF_BATTLE)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\app_loader\decorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:25:12 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
7e0c9c66789b5d70e91d999e13647ddd4b2098ae | e6132244015942c5ec75c8eff4f90cd0e9302470 | /src/wshop/apps/customer/notifications/services.py | 46bab44bef79471157b1207adfc9a79e677340e1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | vituocgia/wshop-core | d3173f603861685b523f6b66af502b9e94b7b0c2 | 5f6d1ec9e9158f13aab136c5bd901c41e69a1dba | refs/heads/master | 2020-03-18T08:25:14.669538 | 2018-05-23T05:55:56 | 2018-05-23T05:55:56 | 134,508,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | from wshop.core.loading import get_model
Notification = get_model('customer', 'Notification')
def notify_user(user, subject, **kwargs):
"""
Send a simple notification to a user
"""
Notification.objects.create(recipient=user, subject=subject, **kwargs)
def notify_users(users, subject, **kwargs):
"""
Send a simple notification to an iterable of users
"""
for user in users:
notify_user(user, subject, **kwargs)
| [
"[email protected]"
] | |
9ac659ed774916b83e4235fa8eecb1f0508c3ea5 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_08_01/aio/_storage_management_client.py | 70166161a2d224894dce9e90a48fb8f889e68c78 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 10,437 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import StorageManagementClientConfiguration
from .operations import BlobContainersOperations, BlobInventoryPoliciesOperations, BlobServicesOperations, DeletedAccountsOperations, EncryptionScopesOperations, FileServicesOperations, FileSharesOperations, LocalUsersOperations, ManagementPoliciesOperations, ObjectReplicationPoliciesOperations, Operations, PrivateEndpointConnectionsOperations, PrivateLinkResourcesOperations, QueueOperations, QueueServicesOperations, SkusOperations, StorageAccountsOperations, TableOperations, TableServicesOperations, UsagesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class StorageManagementClient: # pylint: disable=too-many-instance-attributes
"""The Azure Storage Management API.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storage.v2021_08_01.aio.operations.Operations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.storage.v2021_08_01.aio.operations.SkusOperations
:ivar storage_accounts: StorageAccountsOperations operations
:vartype storage_accounts:
azure.mgmt.storage.v2021_08_01.aio.operations.StorageAccountsOperations
:ivar deleted_accounts: DeletedAccountsOperations operations
:vartype deleted_accounts:
azure.mgmt.storage.v2021_08_01.aio.operations.DeletedAccountsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.storage.v2021_08_01.aio.operations.UsagesOperations
:ivar management_policies: ManagementPoliciesOperations operations
:vartype management_policies:
azure.mgmt.storage.v2021_08_01.aio.operations.ManagementPoliciesOperations
:ivar blob_inventory_policies: BlobInventoryPoliciesOperations operations
:vartype blob_inventory_policies:
azure.mgmt.storage.v2021_08_01.aio.operations.BlobInventoryPoliciesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.storage.v2021_08_01.aio.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources:
azure.mgmt.storage.v2021_08_01.aio.operations.PrivateLinkResourcesOperations
:ivar object_replication_policies: ObjectReplicationPoliciesOperations operations
:vartype object_replication_policies:
azure.mgmt.storage.v2021_08_01.aio.operations.ObjectReplicationPoliciesOperations
:ivar local_users: LocalUsersOperations operations
:vartype local_users: azure.mgmt.storage.v2021_08_01.aio.operations.LocalUsersOperations
:ivar encryption_scopes: EncryptionScopesOperations operations
:vartype encryption_scopes:
azure.mgmt.storage.v2021_08_01.aio.operations.EncryptionScopesOperations
:ivar blob_services: BlobServicesOperations operations
:vartype blob_services: azure.mgmt.storage.v2021_08_01.aio.operations.BlobServicesOperations
:ivar blob_containers: BlobContainersOperations operations
:vartype blob_containers:
azure.mgmt.storage.v2021_08_01.aio.operations.BlobContainersOperations
:ivar file_services: FileServicesOperations operations
:vartype file_services: azure.mgmt.storage.v2021_08_01.aio.operations.FileServicesOperations
:ivar file_shares: FileSharesOperations operations
:vartype file_shares: azure.mgmt.storage.v2021_08_01.aio.operations.FileSharesOperations
:ivar queue_services: QueueServicesOperations operations
:vartype queue_services: azure.mgmt.storage.v2021_08_01.aio.operations.QueueServicesOperations
:ivar queue: QueueOperations operations
:vartype queue: azure.mgmt.storage.v2021_08_01.aio.operations.QueueOperations
:ivar table_services: TableServicesOperations operations
:vartype table_services: azure.mgmt.storage.v2021_08_01.aio.operations.TableServicesOperations
:ivar table: TableOperations operations
:vartype table: azure.mgmt.storage.v2021_08_01.aio.operations.TableOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-08-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = StorageManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize
)
self.skus = SkusOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.storage_accounts = StorageAccountsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.deleted_accounts = DeletedAccountsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.management_policies = ManagementPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.blob_inventory_policies = BlobInventoryPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.object_replication_policies = ObjectReplicationPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.local_users = LocalUsersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.encryption_scopes = EncryptionScopesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.blob_services = BlobServicesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.blob_containers = BlobContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.file_services = FileServicesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.file_shares = FileSharesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.queue_services = QueueServicesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.queue = QueueOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.table_services = TableServicesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.table = TableOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "StorageManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
c36e62063a94a409390144111aa8b1febb637d79 | 1c594498900dd6f25e0a598b4c89b3e33cec5840 | /iqps/search/views.py | c6c5dfb564a3088854e3a4badd988789e7fb6d3b | [
"MIT"
] | permissive | thealphadollar/iqps | cef42ed8c86e4134e724a5f4967e96a83d672fcd | 187f6b134d82e2dce951b356cb0c7151994ca3ab | refs/heads/master | 2023-07-14T04:41:13.190595 | 2020-06-25T14:51:17 | 2020-06-25T14:51:17 | 277,360,692 | 0 | 0 | MIT | 2020-07-05T18:29:17 | 2020-07-05T18:29:16 | null | UTF-8 | Python | false | false | 3,320 | py | from django.shortcuts import render
from django.db import connection
from django.http import JsonResponse
from iqps.settings import DATABASES
#from .processors import SearchCursor
#Use this with sqlite
#db_name = DATABASES['default']['NAME']
def sqlite_search(subject, year=0, department="", paper_type=""):
year_filter = "AND p.year = {}".format(year) if year > 0 else ""
dep_filter = "AND d.code = '{}'".format(department) if department != "" else ""
type_filter = "AND p.paper_type = '{}'".format(paper_type) if paper_type != "" else ""
if subject == "":
return []
query =\
"""SELECT p.subject, p.year, p.department_id, d.id, d.code, p.paper_type, p.link, SIMILARITYSCORE(p.subject, '{}') AS s
FROM papers p JOIN departments d ON p.department_id = d.id
WHERE s > 70 {} {} {} ORDER BY s DESC;""".format(subject, year_filter, dep_filter, type_filter)
results = []
with SearchCursor(db_name) as c:
c.execute(query)
for row in c.fetchall():
results.append(row)
return results
def _search(subject, year=0, department="", paper_type="", keywords=""):
year_filter = "AND p.year = {}".format(year) if year > 0 else ""
dep_filter = "AND d.code = '{}'".format(department) if department != "" else ""
type_filter = "AND p.paper_type = '{}'".format(paper_type) if paper_type != "" else ""
keyword_filter = "AND kt.text IN {}".format(keywords) if keywords != "" else ""
if subject == "":
return []
if keyword_filter == "":
query =\
"""SELECT p.subject, p.year, d.code, p.paper_type, p.link, p.id
FROM papers p JOIN departments d ON p.department_id = d.id
WHERE SOUNDEX(SUBSTRING(p.subject, 1, LENGTH('{}'))) = SOUNDEX('{}') {} {} {} ORDER BY year DESC LIMIT 30;""".format(subject, subject, year_filter, dep_filter, type_filter)
else:
query =\
"""SELECT p.subject, p.year, d.code, p.paper_type, p.link, p.id, GROUP_CONCAT(kt.text) AS keywords
FROM papers AS p JOIN departments AS d ON p.department_id = d.id
LEFT OUTER JOIN (
SELECT pk.paper_id, k.text FROM papers_keywords AS pk JOIN keywords AS k ON pk.keyword_id = k.id
) AS kt
ON p.id = kt.paper_id
WHERE SOUNDEX(SUBSTRING(p.subject, 1, LENGTH('{}'))) = SOUNDEX('{}')
{} {} {} {}
ORDER BY p.year DESC LIMIT 30;
""".format(subject, subject, year_filter, dep_filter, type_filter, keyword_filter)
results = []
with connection.cursor() as c:
c.execute(query)
for row in c.fetchall():
results.append(row)
return results
def hitSearch(request):
"""
Meant to be an independent API.
Request args:
q -> subject name
year -> year filter
dep -> department filter
typ -> paper_type filter
"""
q = request.GET.get('q', "")
year = request.GET.get('year', 0)
dep = request.GET.get('dep', "")
typ = request.GET.get('typ', "")
keywords = request.GET.get('keys', "")
try:
year = int(year)
except:
year = 0
results = _search(q, year=year, department=dep, paper_type=typ, keywords=keywords)
response = JsonResponse({"papers": results})
response["Access-Control-Allow-Origin"] = "*" #For CORS
return response
| [
"[email protected]"
] | |
e077f429daff201e907044fe1dafc3a66af86952 | 26fc334777ce27d241c67d97adc1761e9d23bdba | /tests/django_tests/tests/middleware_exceptions/tests.py | 0c39f09f9156cf2b9787fa67ac627a5c7dd4a653 | [
"BSD-3-Clause"
] | permissive | alihoseiny/djongo | 1434c9e78c77025d7e0b3330c3a40e9ea0029877 | e2edf099e398573faa90e5b28a32c3d7f1c5f1e9 | refs/heads/master | 2020-03-27T23:27:02.530397 | 2018-08-30T14:44:37 | 2018-08-30T14:44:37 | 147,317,771 | 2 | 1 | BSD-3-Clause | 2018-09-04T09:00:53 | 2018-09-04T09:00:53 | null | UTF-8 | Python | false | false | 6,887 | py | from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
from . import middleware as mw
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class MiddlewareTests(SimpleTestCase):
def tearDown(self):
mw.log = []
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewNoneMiddleware'])
def test_process_view_return_none(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, ['processed view normal_view'])
self.assertEqual(response.content, b'OK')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewMiddleware'])
def test_process_view_return_response(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.LogMiddleware',
])
def test_templateresponse_from_process_view_rendered(self):
"""
TemplateResponses returned from process_view() must be rendered before
being passed to any middleware that tries to access response.content,
such as middleware_exceptions.middleware.LogMiddleware.
"""
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view\nProcessViewTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.TemplateResponseMiddleware',
])
def test_templateresponse_from_process_view_passed_to_process_template_response(self):
"""
TemplateResponses returned from process_view() should be passed to any
template response middleware.
"""
response = self.client.get('/middleware_exceptions/view/')
expected_lines = [
b'Processed view normal_view',
b'ProcessViewTemplateResponseMiddleware',
b'TemplateResponseMiddleware',
]
self.assertEqual(response.content, b'\n'.join(expected_lines))
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.TemplateResponseMiddleware'])
def test_process_template_response(self):
response = self.client.get('/middleware_exceptions/template_response/')
self.assertEqual(response.content, b'template_response OK\nTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.LogMiddleware'])
def test_view_exception_converted_before_middleware(self):
response = self.client.get('/middleware_exceptions/permission_denied/')
self.assertEqual(mw.log, [(response.status_code, response.content)])
self.assertEqual(response.status_code, 403)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_view_exception_handled_by_process_exception(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessExceptionLogMiddleware',
'middleware_exceptions.middleware.ProcessExceptionMiddleware',
])
def test_response_from_process_exception_short_circuits_remainder(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(mw.log, [])
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.LogMiddleware',
'middleware_exceptions.middleware.NotFoundMiddleware',
])
def test_exception_in_middleware_converted_before_prior_middleware(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, [(404, response.content)])
self.assertEqual(response.status_code, 404)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_exception_in_render_passed_to_process_exception(self):
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware:
def __init__(self, get_response=None):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage:
def __init__(self, get_response=None):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
MIDDLEWARE=['django.middleware.common.CommonMiddleware'],
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddleware'])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
| [
"[email protected]"
] | |
9d72434ff4c42cd9934c292efbbb2cdcf75e5a58 | f719ec76a8417fc05a2d46ada2501052e2bf9469 | /exp_runners/traffic/cent_traffic_runner.py | 2179e0136d393da470ab919a3989f6ab9e970282 | [] | no_license | yang-xy20/DICG | cc31064a3e4a3dd01414161e42b228c2c09bfea7 | c64ba9dbbe0f2b745cd04ce516aa1fed4c2cffc7 | refs/heads/master | 2023-07-04T18:25:18.461196 | 2021-08-19T21:34:06 | 2021-08-19T21:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,351 | py | import sys
import os
current_file_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_file_path + '/../../')
import socket
import collections
import numpy as np
import argparse
import joblib
import time
import matplotlib.pyplot as plt
from types import SimpleNamespace
import torch
from torch.nn import functional as F
import akro
import garage
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment.deterministic import set_seed
from envs import TrafficJunctionWrapper
from dicg.torch.baselines import GaussianMLPBaseline
from dicg.torch.algos import CentralizedMAPPO
from dicg.torch.policies import CentralizedCategoricalMLPPolicy
from dicg.experiment.local_runner_wrapper import LocalRunnerWrapper
from dicg.sampler import CentralizedMAOnPolicyVectorizedSampler
def run(args):
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# garage.torch.utils.set_gpu_mode(mode=torch.cuda.is_available())
# print(garage.torch.utils.global_device())
if args.exp_name is None:
exp_layout = collections.OrderedDict([
('cent{}_ppo', ''),
('entcoeff={}', args.ent),
('dim={}', args.dim),
('nagents={}', args.n_agents),
('difficulty={}', args.difficulty),
('curr={}', bool(args.curriculum)),
('steps={}', args.max_env_steps),
('nenvs={}', args.n_envs),
('bs={:0.0e}', args.bs),
('splits={}', args.opt_n_minibatches),
('miniepoch={}', args.opt_mini_epochs),
('seed={}', args.seed)
])
exp_name = '_'.join(
[key.format(val) for key, val in exp_layout.items()]
)
else:
exp_name = args.exp_name
prefix = 'traffic'
id_suffix = ('_' + str(args.run_id)) if args.run_id != 0 else ''
unseeded_exp_dir = './data/' + args.loc +'/' + exp_name[:-7]
exp_dir = './data/' + args.loc +'/' + exp_name + id_suffix
# Enforce
args.center_adv = False if args.entropy_method == 'max' else args.center_adv
if args.mode == 'train':
# making sequential log dir if name already exists
@wrap_experiment(name=exp_name,
prefix=prefix,
log_dir=exp_dir,
snapshot_mode='last',
snapshot_gap=1)
def train_traffic(ctxt=None, args_dict=vars(args)):
args = SimpleNamespace(**args_dict)
set_seed(args.seed)
if args.curriculum:
curr_start = int(0.125 * args.n_epochs)
curr_end = int(0.625 * args.n_epochs)
else:
curr_start = 0
curr_end = 0
args.add_rate_min = args.add_rate_max
env = TrafficJunctionWrapper(
centralized=True, # centralized training and critic
dim=args.dim,
vision=1,
add_rate_min=args.add_rate_min,
add_rate_max=args.add_rate_max,
curr_start=curr_start,
curr_end=curr_end,
difficulty=args.difficulty,
n_agents=args.n_agents,
max_steps=args.max_env_steps
)
env = GarageEnv(env)
runner = LocalRunnerWrapper(
ctxt,
eval=args.eval_during_training,
n_eval_episodes=args.n_eval_episodes,
eval_greedy=args.eval_greedy,
eval_epoch_freq=args.eval_epoch_freq,
save_env=env.pickleable
)
hidden_nonlinearity = F.relu if args.hidden_nonlinearity == 'relu' \
else torch.tanh
policy = CentralizedCategoricalMLPPolicy(
env.spec,
env.n_agents,
hidden_nonlinearity=hidden_nonlinearity,
hidden_sizes=args.hidden_sizes,
name='dec_categorical_mlp_policy'
)
baseline = GaussianMLPBaseline(env_spec=env.spec,
hidden_sizes=(64, 64, 64))
# Set max_path_length <= max_steps
# If max_path_length > max_steps, algo will pad obs
# obs.shape = torch.Size([n_paths, algo.max_path_length, feat_dim])
algo = CentralizedMAPPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=args.max_env_steps, # Notice
discount=args.discount,
center_adv=bool(args.center_adv),
positive_adv=bool(args.positive_adv),
gae_lambda=args.gae_lambda,
policy_ent_coeff=args.ent,
entropy_method=args.entropy_method,
stop_entropy_gradient=True \
if args.entropy_method == 'max' else False,
clip_grad_norm=args.clip_grad_norm,
optimization_n_minibatches=args.opt_n_minibatches,
optimization_mini_epochs=args.opt_mini_epochs,
)
runner.setup(algo, env,
sampler_cls=CentralizedMAOnPolicyVectorizedSampler,
sampler_args={'n_envs': args.n_envs})
runner.train(n_epochs=args.n_epochs,
batch_size=args.bs)
train_traffic(args_dict=vars(args))
elif args.mode in ['restore', 'eval']:
data = joblib.load(exp_dir + '/params.pkl')
env = data['env']
algo = data['algo']
if args.mode == 'restore':
from dicg.experiment.runner_utils import restore_training
restore_training(exp_dir, exp_name, args,
env_saved=env.pickleable, env=env)
elif args.mode == 'eval':
env.eval(algo.policy, n_episodes=args.n_eval_episodes, greedy=args.eval_greedy,
load_from_file=True, max_steps=args.max_env_steps, render=args.render)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Meta
parser.add_argument('--mode', '-m', type=str, default='train')
parser.add_argument('--loc', type=str, default='local')
parser.add_argument('--exp_name', type=str, default=None)
# Train
parser.add_argument('--seed', '-s', type=int, default=1)
parser.add_argument('--n_epochs', type=int, default=1000)
parser.add_argument('--bs', type=int, default=60000)
parser.add_argument('--n_envs', type=int, default=1)
# Eval
parser.add_argument('--run_id', type=int, default=0) # sequential naming
parser.add_argument('--n_eval_episodes', type=int, default=100)
parser.add_argument('--render', type=int, default=0)
parser.add_argument('--inspect_steps', type=int, default=0)
parser.add_argument('--eval_during_training', type=int, default=1)
parser.add_argument('--eval_greedy', type=int, default=1)
parser.add_argument('--eval_epoch_freq', type=int, default=5)
# Env
parser.add_argument('--max_env_steps', type=int, default=20)
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--n_agents', '-n', type=int, default=5)
parser.add_argument('--difficulty', type=str, default='easy')
parser.add_argument('--add_rate_max', type=float, default=0.3)
parser.add_argument('--add_rate_min', type=float, default=0.1)
parser.add_argument('--curriculum', type=int, default=0)
# Algo
# parser.add_argument('--max_algo_path_length', type=int, default=n_steps)
parser.add_argument('--hidden_nonlinearity', type=str, default='tanh')
parser.add_argument('--discount', type=float, default=0.99)
parser.add_argument('--center_adv', type=int, default=1)
parser.add_argument('--positive_adv', type=int, default=0)
parser.add_argument('--gae_lambda', type=float, default=0.97)
parser.add_argument('--ent', type=float, default=0.02) # 0.01 is too small
parser.add_argument('--entropy_method', type=str, default='regularized')
parser.add_argument('--clip_grad_norm', type=float, default=7)
parser.add_argument('--opt_n_minibatches', type=int, default=4,
help='The number of splits of a batch of trajectories for optimization.')
parser.add_argument('--opt_mini_epochs', type=int, default=10,
help='The number of epochs the optimizer runs for each batch of trajectories.')
# Policy
# Example: --encoder_hidden_sizes 12 123 1234
parser.add_argument('--hidden_sizes', nargs='+', type=int)
args = parser.parse_args()
# Enforce values
if args.difficulty == 'hard':
args.max_env_steps = 60
args.dim = 18
args.n_agents = 20
args.add_rate_min = 0.02
args.add_rate_max = 0.05
elif args.difficulty == 'medium':
args.max_env_steps = 40
args.dim = 14
args.n_agents = 10
args.add_rate_min = 0.05
args.add_rate_max = 0.2
elif args.difficulty == 'easy':
args.max_env_steps = 20
args.dim = 8
args.n_agents = 5
args.add_rate_min = 0.1
args.add_rate_max = 0.3
if args.hidden_sizes is None:
args.hidden_sizes = [265, 128, 64]
run(args)
| [
"[email protected]"
] | |
5b2787c83a0a8eb0caae96635e595e2bc7f9dbed | bc441bb06b8948288f110af63feda4e798f30225 | /database_delivery_sdk/api/sqlpkgs/update_pb2.py | 0f09d354715753abc6b91286cff95f9f6a2d58bf | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 14,764 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from database_delivery_sdk.model.database_delivery import sql_package_version_pb2 as database__delivery__sdk_dot_model_dot_database__delivery_dot_sql__package__version__pb2
from database_delivery_sdk.model.database_delivery import app_pb2 as database__delivery__sdk_dot_model_dot_database__delivery_dot_app__pb2
from database_delivery_sdk.model.database_delivery import dbservice_pb2 as database__delivery__sdk_dot_model_dot_database__delivery_dot_dbservice__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='update.proto',
package='sqlpkgs',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0cupdate.proto\x12\x07sqlpkgs\x1aGdatabase_delivery_sdk/model/database_delivery/sql_package_version.proto\x1a\x37\x64\x61tabase_delivery_sdk/model/database_delivery/app.proto\x1a=database_delivery_sdk/model/database_delivery/dbservice.proto\"\xbd\x01\n\x17UpdateSQLPackageRequest\x12\r\n\x05pkgId\x18\x01 \x01(\t\x12\x43\n\x0cupdateSqlpkg\x18\x02 \x01(\x0b\x32-.sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg\x1aN\n\x0cUpdateSqlpkg\x12\r\n\x05\x61ppId\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x62ServiceId\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0c\n\x04memo\x18\x04 \x01(\t\"\xa1\x02\n\x18UpdateSQLPackageResponse\x12\x39\n\x0bversionList\x18\x01 \x03(\x0b\x32$.database_delivery.SQLPackageVersion\x12+\n\x03\x41PP\x18\x02 \x03(\x0b\x32\x1e.database_delivery.Application\x12/\n\tDBSERVICE\x18\x03 \x03(\x0b\x32\x1c.database_delivery.DBService\x12\n\n\x02id\x18\x04 \x01(\t\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x0c\n\x04memo\x18\x06 \x01(\t\x12\x0f\n\x07\x63reator\x18\x07 \x01(\t\x12\r\n\x05\x63time\x18\x08 \x01(\x03\x12\r\n\x05mtime\x18\t \x01(\x03\x12\x15\n\rrepoPackageId\x18\n \x01(\t\"\x84\x01\n\x1fUpdateSQLPackageResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12/\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32!.sqlpkgs.UpdateSQLPackageResponseb\x06proto3')
,
dependencies=[database__delivery__sdk_dot_model_dot_database__delivery_dot_sql__package__version__pb2.DESCRIPTOR,database__delivery__sdk_dot_model_dot_database__delivery_dot_app__pb2.DESCRIPTOR,database__delivery__sdk_dot_model_dot_database__delivery_dot_dbservice__pb2.DESCRIPTOR,])
_UPDATESQLPACKAGEREQUEST_UPDATESQLPKG = _descriptor.Descriptor(
name='UpdateSqlpkg',
full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='appId', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.appId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbServiceId', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.dbServiceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.memo', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=408,
)
_UPDATESQLPACKAGEREQUEST = _descriptor.Descriptor(
name='UpdateSQLPackageRequest',
full_name='sqlpkgs.UpdateSQLPackageRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pkgId', full_name='sqlpkgs.UpdateSQLPackageRequest.pkgId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateSqlpkg', full_name='sqlpkgs.UpdateSQLPackageRequest.updateSqlpkg', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UPDATESQLPACKAGEREQUEST_UPDATESQLPKG, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=408,
)
_UPDATESQLPACKAGERESPONSE = _descriptor.Descriptor(
name='UpdateSQLPackageResponse',
full_name='sqlpkgs.UpdateSQLPackageResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='versionList', full_name='sqlpkgs.UpdateSQLPackageResponse.versionList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='APP', full_name='sqlpkgs.UpdateSQLPackageResponse.APP', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DBSERVICE', full_name='sqlpkgs.UpdateSQLPackageResponse.DBSERVICE', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='sqlpkgs.UpdateSQLPackageResponse.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='sqlpkgs.UpdateSQLPackageResponse.name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='sqlpkgs.UpdateSQLPackageResponse.memo', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='sqlpkgs.UpdateSQLPackageResponse.creator', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='sqlpkgs.UpdateSQLPackageResponse.ctime', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='sqlpkgs.UpdateSQLPackageResponse.mtime', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repoPackageId', full_name='sqlpkgs.UpdateSQLPackageResponse.repoPackageId', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=411,
serialized_end=700,
)
_UPDATESQLPACKAGERESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateSQLPackageResponseWrapper',
full_name='sqlpkgs.UpdateSQLPackageResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=703,
serialized_end=835,
)
_UPDATESQLPACKAGEREQUEST_UPDATESQLPKG.containing_type = _UPDATESQLPACKAGEREQUEST
_UPDATESQLPACKAGEREQUEST.fields_by_name['updateSqlpkg'].message_type = _UPDATESQLPACKAGEREQUEST_UPDATESQLPKG
_UPDATESQLPACKAGERESPONSE.fields_by_name['versionList'].message_type = database__delivery__sdk_dot_model_dot_database__delivery_dot_sql__package__version__pb2._SQLPACKAGEVERSION
_UPDATESQLPACKAGERESPONSE.fields_by_name['APP'].message_type = database__delivery__sdk_dot_model_dot_database__delivery_dot_app__pb2._APPLICATION
_UPDATESQLPACKAGERESPONSE.fields_by_name['DBSERVICE'].message_type = database__delivery__sdk_dot_model_dot_database__delivery_dot_dbservice__pb2._DBSERVICE
_UPDATESQLPACKAGERESPONSEWRAPPER.fields_by_name['data'].message_type = _UPDATESQLPACKAGERESPONSE
DESCRIPTOR.message_types_by_name['UpdateSQLPackageRequest'] = _UPDATESQLPACKAGEREQUEST
DESCRIPTOR.message_types_by_name['UpdateSQLPackageResponse'] = _UPDATESQLPACKAGERESPONSE
DESCRIPTOR.message_types_by_name['UpdateSQLPackageResponseWrapper'] = _UPDATESQLPACKAGERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateSQLPackageRequest = _reflection.GeneratedProtocolMessageType('UpdateSQLPackageRequest', (_message.Message,), {
'UpdateSqlpkg' : _reflection.GeneratedProtocolMessageType('UpdateSqlpkg', (_message.Message,), {
'DESCRIPTOR' : _UPDATESQLPACKAGEREQUEST_UPDATESQLPKG,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg)
})
,
'DESCRIPTOR' : _UPDATESQLPACKAGEREQUEST,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageRequest)
})
_sym_db.RegisterMessage(UpdateSQLPackageRequest)
_sym_db.RegisterMessage(UpdateSQLPackageRequest.UpdateSqlpkg)
UpdateSQLPackageResponse = _reflection.GeneratedProtocolMessageType('UpdateSQLPackageResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATESQLPACKAGERESPONSE,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageResponse)
})
_sym_db.RegisterMessage(UpdateSQLPackageResponse)
UpdateSQLPackageResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateSQLPackageResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATESQLPACKAGERESPONSEWRAPPER,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageResponseWrapper)
})
_sym_db.RegisterMessage(UpdateSQLPackageResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
788b1114cf8da3899edd4800a1fbc676bf8142ee | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /pyobjc/pyobjc/pyobjc-framework-Quartz-2.5.1/Examples/Programming with Quartz/BasicDrawing/MyAppController.py | 7108ddb749d657bf205c4db6e76aba0164427919 | [
"MIT"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | UTF-8 | Python | false | false | 4,062 | py | from Cocoa import *
import objc
import PDFHandling
import BitmapContext
import Utilities
# Initial defaults
_dpi = 144
_useQT = False
def getURLToExport(suffix):
savePanel = NSSavePanel.savePanel()
initialFileName = "BasicDrawing.%s"%(suffix,)
if savePanel.runModalForDirectory_file_(None, initialFileName) == NSFileHandlingPanelOKButton:
return savePanel.URL()
return None
class MyAppController (NSObject):
theView = objc.IBOutlet()
currentDPIMenuItem = objc.IBOutlet()
currentExportStyleMenuItem = objc.IBOutlet()
@objc.IBAction
def print_(self, sender):
self.theView.print_(sender)
def updateDPIMenu_(self, sender):
if self.currentDPIMenuItem is not sender:
# Uncheck the previous item.
if self.currentDPIMenuItem is not None:
self.currentDPIMenuItem.setState_(NSOffState)
# Update to the current item.
self.currentDPIMenuItem = sender
# Check new menu item.
self.currentDPIMenuItem.setState_(NSOnState)
def updateExportStyleMenu_(self, sender):
if self.currentExportStyleMenuItem is not sender:
# Uncheck the previous item.
if self.currentExportStyleMenuItem is not None:
self.currentExportStyleMenuItem.setState_(NSOffState)
# Update to the current item.
self.currentExportStyleMenuItem = sender
# Check new menu item.
self.currentExportStyleMenuItem.setState_(NSOnState)
@objc.IBAction
def setExportResolution_(self, sender):
global _dpi
_dpi = sender.tag()
self.updateDPIMenu_(sender)
@objc.IBAction
def setUseQT_(self, sender):
global _useQT
_useQT = True
self.updateExportStyleMenu_(sender)
@objc.IBAction
def setUseCGImageSource_(self, sender):
global _useQT
_useQT = False
self.updateExportStyleMenu_(sender)
def setupExportInfo_(self, exportInfoP):
# Use the printable version of the current command. This produces
# the best results for exporting.
exportInfoP.command = self.theView.currentPrintableCommand()
exportInfoP.fileType = ' ' # unused
exportInfoP.useQTForExport = _useQT
exportInfoP.dpi = _dpi
@objc.IBAction
def exportAsPDF_(self, sender):
url = getURLToExport("pdf")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
PDFHandling.MakePDFDocument(url, exportInfo)
@objc.IBAction
def exportAsPNG_(self, sender):
url = getURLToExport("png")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakePNGDocument(url, exportInfo)
@objc.IBAction
def exportAsTIFF_(self, sender):
url = getURLToExport("tif")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakeTIFFDocument(url, exportInfo)
@objc.IBAction
def exportAsJPEG_(self, sender):
url = getURLToExport("jpg")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakeJPEGDocument(url, exportInfo)
def validateMenuItem_(self, menuItem):
if menuItem.tag == _dpi:
currentDPIMenuItem = menuItem
menuItem.setState_(True)
elif menuItem.action() == 'setUseQT:':
if _useQT:
self.currentDPIMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
elif menuItem.action() == 'setUseCGImageSource:':
if _useQT:
currentDPIMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
return True
| [
"[email protected]"
] | |
01685b4a849a3156658fa0dbdaad10650ff9d148 | b14802e3892a661fa62d9d0772f72becc0abd612 | /evaluation/get_top_socored.py | 0bd0d8919ad1d0eed44022b6a57cbb69617117bb | [] | no_license | gombru/HateSpeech | e4c4b7993354ce2cb49334b814f929364fdcb446 | 7891c7e2835f17ed2a9985abd285e19788685c66 | refs/heads/master | 2022-02-23T08:57:34.909778 | 2022-02-10T12:54:41 | 2022-02-10T12:54:41 | 138,057,409 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | import numpy as np
import operator
import shutil
import os
model_name = 'MMHS_classification_CNNinit_SCM_ALL_epoch_10_ValAcc_62'
out_folder_name = 'top_MMHS_classification_CNNinit_SCM_ALL_epoch_10_ValAcc_62'
out_file = open('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name + '.txt','w')
if not os.path.exists('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name):
os.makedirs('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name)
results = {}
with open('../../../datasets/HateSPic/MMHS/results/' + model_name + '/test.txt') as f:
for line in f:
data = line.split(',')
id = int(data[0])
label = int(data[1])
hate_score = float(data[3])
notHate_score = float(data[2])
softmax_hate_score = np.exp(hate_score) / (np.exp(hate_score) + np.exp(notHate_score))
results[id] = softmax_hate_score
results = sorted(results.items(), key=operator.itemgetter(1))
results = list(reversed(results))
for i,r in enumerate(results):
if i == 50: break
print r[1]
shutil.copyfile('../../../datasets/HateSPic/MMHS/img_resized/' + str(str(r[0])) + '.jpg', '../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name + '/' + str(i) + '-' + str(r[0]) + '.jpg')
out_file.write(str(r[0]) + '\n')
out_file.close()
print("Done") | [
"[email protected]"
] | |
429d42c8fd21f8aeed2ca8697dc6fab586d5a1dd | 1fec393454ffe7f65fce3617c14a2fcedf1da663 | /Searching/Searching I/matrix_median.py | 9cab3f6da7a1a3e9e867bcedf81f9997880f980b | [] | no_license | VarmaSANJAY/InterviewBit-Solution-Python | fbeb1d855a5244a89b40fbd2522640dc596c79b6 | ea26394cc1b9d22a9ab474467621d2b61ef15a31 | refs/heads/master | 2022-11-27T22:46:34.966395 | 2020-08-09T14:10:58 | 2020-08-09T14:10:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | from bisect import *
class Solution:
def binary_search(self,A, min_el, max_el, cnt_before_mid):
s = min_el
e = max_el
while s < e:
mid = (s+e) // 2
count = 0
for row in A:
count += bisect_right(row, mid)
if count > cnt_before_mid:
e = mid
else:
s = mid + 1
return s
def Solve(self,A):
min_el = float('inf')
max_el = float('-inf')
for i in A:
min_el = min(i[0], min_el)
max_el = max(i[-1], max_el)
m=len(A)
n=len(A[0])
cnt_before_mid = (m*n) // 2
return self.binary_search(A, min_el, max_el,cnt_before_mid)
if __name__ == '__main__':
A = [[1, 3, 5],
[2, 6, 9],
[3, 6, 9]]
B = Solution()
print(B.Solve(A))
| [
"[email protected]"
] | |
4b324a9f9ea99b231e13b55494bd0092b1cf52ec | c3ca0bcea4d1b4013a0891f014928922fc81fe7a | /examples/multi_step_training.py | 605e0ac42e4b43a5d9c9b7ba9d1573554d4f6c74 | [
"MIT"
] | permissive | takuseno/d3rlpy | 47894b17fc21fab570eca39fe8e6925a7b5d7d6f | 4ba297fc6cd62201f7cd4edb7759138182e4ce04 | refs/heads/master | 2023-08-23T12:27:45.305758 | 2023-08-14T12:07:03 | 2023-08-14T12:07:03 | 266,369,147 | 1,048 | 222 | MIT | 2023-09-02T08:12:48 | 2020-05-23T15:51:51 | Python | UTF-8 | Python | false | false | 1,483 | py | import argparse
import gym
import d3rlpy
GAMMA = 0.99
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default="Pendulum-v1")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--n-steps", type=int, default=1)
parser.add_argument("--gpu", action="store_true")
args = parser.parse_args()
env = gym.make(args.env)
eval_env = gym.make(args.env)
# fix seed
d3rlpy.seed(args.seed)
d3rlpy.envs.seed_env(env, args.seed)
d3rlpy.envs.seed_env(eval_env, args.seed)
# setup algorithm
sac = d3rlpy.algos.SACConfig(
batch_size=256,
gamma=GAMMA,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
temp_learning_rate=3e-4,
action_scaler=d3rlpy.preprocessing.MinMaxActionScaler(),
).create(device=args.gpu)
# multi-step transition sampling
transition_picker = d3rlpy.dataset.MultiStepTransitionPicker(
n_steps=args.n_steps,
gamma=GAMMA,
)
# replay buffer for experience replay
buffer = d3rlpy.dataset.create_fifo_replay_buffer(
limit=100000,
env=env,
transition_picker=transition_picker,
)
# start training
sac.fit_online(
env,
buffer,
eval_env=eval_env,
n_steps=100000,
n_steps_per_epoch=1000,
update_interval=1,
update_start_step=1000,
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c3bbb5738b81da3295cb82f51894e74b8553f71b | 7765c093fbfaebc3328f8500db2e462977ac42a5 | /sqlite/sample.py | f4dc2f38f85c48f038a9b6f853da204c4bf0df63 | [] | no_license | iamkamleshrangi/datascience | e118e41591850f24438aa344100a07737490fd29 | 7add9501c3ac75323e94df5351e2baf6cadb73ae | refs/heads/master | 2022-02-02T20:19:20.986813 | 2018-07-23T13:26:37 | 2018-07-23T13:26:37 | 128,158,552 | 0 | 0 | null | 2022-01-21T04:26:26 | 2018-04-05T04:22:15 | Python | UTF-8 | Python | false | false | 358 | py | # Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Open engine in context manager
with engine.connect() as con:
rs = con.execute('select * from Employee order by BirthDate asc')
df = pd.DataFrame(rs.fetchall())
# Set the DataFrame's column names
df.columns = rs.keys()
# Print head of DataFrame
print(df.head())
| [
"[email protected]"
] | |
28afd10dd4bf86cc9fc12239cac8891a7b46c5df | a9243f735f6bb113b18aa939898a97725c358a6d | /0.12/_downloads/plot_time_frequency_mixed_norm_inverse.py | 65ac593e852afd7ae0cd4471a6c573000a16b131 | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 4,959 | py | """
=============================================
Compute MxNE with time-frequency sparse prior
=============================================
The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)
that promotes focal (sparse) sources (such as dipole fitting techniques).
The benefit of this approach is that:
- it is spatio-temporal without assuming stationarity (sources properties
can vary over time)
- activations are localized in space, time and frequency in one step.
- with a built-in filtering process based on a short time Fourier
transform (STFT), data does not need to be low passed (just high pass
to make the signals zero mean).
- the solver solves a convex optimization problem, hence cannot be
trapped in local minima.
References:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
https://doi.org/10.1007/978-3-642-22092-0_49
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.inverse_sparse import tf_mixed_norm
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left visual'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked = mne.pick_channels_evoked(evoked)
# We make the window slightly larger than what you'll eventually be interested
# in ([-0.05, 0.3]) to avoid edge effects.
evoked.crop(tmin=-0.1, tmax=0.4)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname, force_fixed=False,
surf_ori=True)
###############################################################################
# Run solver
# alpha_space regularization parameter is between 0 and 100 (100 is high)
alpha_space = 50. # spatial regularization parameter
# alpha_time parameter promotes temporal smoothness
# (0 means no temporal regularization)
alpha_time = 1. # temporal regularization parameter
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
loose=loose, depth=depth)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute TF-MxNE inverse solution
stc, residual = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
loose=loose, depth=depth, maxit=200, tol=1e-4,
weights=stc_dspm, weights_min=8., debias=True,
wsize=16, tstep=4, window=0.05,
return_residual=True)
# Crop to remove edges
stc.crop(tmin=-0.05, tmax=0.3)
evoked.crop(tmin=-0.05, tmax=0.3)
residual.crop(tmin=-0.05, tmax=0.3)
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
proj=True)
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1, fig_name="TF-MxNE (cond %s)"
% condition, modes=['sphere'], scale_factors=[1.])
time_label = 'TF-MxNE time=%0.2f ms'
clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])
brain = stc.plot('sample', 'inflated', 'rh', clim=clim, time_label=time_label,
smoothing_steps=5, subjects_dir=subjects_dir)
brain.show_view('medial')
brain.set_data_time_index(120)
brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
brain.add_label("V2", color="red", scalar_thresh=.5, borders=True)
| [
"[email protected]"
] | |
b548eedfdd00fe7c08f5ba00618fbe44e0cba7df | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/pipeline/service_group/number_office/time/fact.py | e3cbfccb649de7dbf84162e340a4f0fe1510ddd6 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | package textTranslator;
import java.io.*;
import java.net.*;
import java.util.*;
import com.google.gson.*;
import com.squareup.okhttp.*;
public class Translate {
String subscriptionKey = 'b58103fec253e2c21b0fdc1a24e16352';
String url = "https://api.cognitive.microsofttranslator.com/translate?api-version=3.0&to=";
public Translate(String subscriptionKey) {
this.subscriptionKey = subscriptionKey;
}
// Instantiates the OkHttpClient.
OkHttpClient client = new OkHttpClient();
// This function performs a POST request.
public String Post() throws IOException {
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType,
"[{\n\t\"Text\": \"Welcome to Microsoft Translator. Guess how many languages I speak!\"\n}]");
Request request = new Request.Builder()
.url(url).post(body)
.addHeader("ec0c96a092ea0a3ba1041f4738a0b33a", subscriptionKey)
.addHeader("Content-type", "application/json").build();
Response response = client.newCall(request).execute();
return response.body().string();
}
public String Post(String bodyStr, String translateTo) throws IOException {
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType,
"[{\n\t\"Text\": \"" + bodyStr + "\"\n}]");
Request request = new Request.Builder()
.url(url + translateTo).post(body)
.addHeader("f460aacf46d11f243d71d7221840dbe5", subscriptionKey)
.addHeader("Content-type", "application/json").build();
Response response = client.newCall(request).execute();
return response.body().string();
}
// This function prettifies the json response.
public static String prettify(String json_text) {
JsonParser parser = new JsonParser();
JsonElement json = parser.parse(json_text);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
return gson.toJson(json);
}
public static String getTranslatedText(String jsonText) {
JsonParser parser = new JsonParser();
JsonArray json = parser.parse(jsonText).getAsJsonArray();
String translatedText = null;
for (int i = 0; i < json.size(); i++) {
if (translatedText != null)
break;
JsonObject jsonObj = json.get(i).getAsJsonObject();
JsonArray translations = jsonObj.getAsJsonArray("translations");
if (translations == null) return "";
for (int j = 0; j < translations.size(); j++) {
if (translatedText != null) break;
JsonObject translation = translations.get(j).getAsJsonObject();
JsonElement text = translation.get("text");
if (text == null) return "";
translatedText = text.getAsString();
}
}
return translatedText;
}
// public static void main(String[] args) {
// try {
// Translate translateRequest = new Translate(System.getenv("Translator"));
//// String response = translateRequest.Post();
//// System.out.println(prettify(response));
//
// String response = translateRequest.Post("Hello", "fr");
// System.out.println(Translate.prettify(response));
//
// System.out.println(getTranslatedText(response));
//
//
// } catch (Exception e) {
// System.out.println(e);
// }
// }
}
| [
"[email protected]"
] | |
ffe965efd83b48d88452e41df5c8274713eac169 | ca565548206583a58fe8d646bfd9a6f1ba51c673 | /problem2.py | fa5313404ef249962fe28fa2f3edd13684ba5711 | [] | no_license | GLAU-TND/python-programming-assignment2-kirtimansinghcs19 | fbd772f38fa3546e579ffc2bdf99cc2b34e9937b | 5dc16c8b24186a2e00c749e14eecaac426f51e90 | refs/heads/master | 2021-01-13T22:51:02.990390 | 2020-02-23T16:32:51 | 2020-02-23T16:32:51 | 242,519,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from itertools import permutations
def largest(l):
lst=[]
for i in permutations(l, len(l)):
lst.append(''.join(map(str,i)))
return max(lst)
ls=[]
n=int(input('Enter the no element'))
for i in range(0,n):
ls.append(int(input()))
print(largest(ls))
| [
"[email protected]"
] | |
142f68111255fe08b0cfa29a4378494361ef2c57 | 8ee5dcbdbd407eb5f294d430813b16eca22f571c | /data/HW5/hw5_253.py | 628a39851ed1f06194065eadcb2c20d9da276de9 | [] | no_license | MAPLE-Robot-Subgoaling/IPT | 5e60e2ee4d0a5688bc8711ceed953e76cd2ad5d9 | f512ea3324bfdceff8df63b4c7134b5fcbb0514e | refs/heads/master | 2021-01-11T12:31:00.939051 | 2018-08-13T23:24:19 | 2018-08-13T23:24:19 | 79,373,489 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | def main():
width = int(input("please enter the width of the box "))
height = int(input("please enter the height of thebox "))
sym = input("please enter a symbol for the outline ")
fill = input("please enter a fill symbol ")
for h in range(height):
for w in range(width):
print(sym if h in(0,height-1) or w in(0,width-1) else fill, end = ' ')
print()
main()
| [
"[email protected]"
] | |
0fe08899b3a8f27f944baf7bfb39b3fcdf8ebdff | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/synapse/azure-synapse-accesscontrol/azure/synapse/accesscontrol/aio/__init__.py | 8eafa989fcbc836fcc407acd2ea0859726442db7 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 558 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._access_control_client import AccessControlClient
__all__ = ['AccessControlClient']
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.