content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from cardboard.cards.core import cards, card
| 22.5 | 44 | 0.822222 | [
"MIT"
] | Julian/cardboard | cardboard/cards/__init__.py | 45 | Python |
"""
Listas
Listas em Python funcionam como vetores/matrizes (arrays) em outras linguagens, com a diferença
de serem DINÂMICO e também de podermos colocar QUALQUER tipo de dado.
Linguagens C/Java: Arrays
- Possuem tamanho e tipo de dado fixo;
Ou seja, nestas linguagens se você criar um array do tipo int e com tamanho 5, este array
sera SEMPRE do tipo inteiro e poderá ter SEMPRE no máximo 5 valores.
Já em Python:
- Dinâmico: Não possui tamanho fixo; Ou seja, podemos criar a lista e simplesmente ir adicionando elementos;
- Qualquer tipo de dado; Não possuem tipo de dado fixo; Ou seja, podemos colocar qualquer tipo de dado;
As listas são mutáveis!
As listas em Python são representadas por colchetes: []
type([])
lista1 = [1, 99, 4, 27, 15, 22, 3, 1, 44, 42, 27]
lista2 = ['G', 'e', 'e', 'k', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'y']
lista3 = []
lista4 = list(range(11))
lista5 = list('Geek University')
# Podemos facilmente checar se determinado valor está contido na lista
num = 18
if num in lista4:
print(f'Encontrei o número {num}')
else:
print(f'Não encontrei o número {num}')
# Podemos facilmente ordenar uma lista
print(lista1)
lista1.sort()
print(lista1)
# Podemos facilmente contar o número de ocorrências de um valor em uma lista
print(lista1)
print(lista1.count(1))
print(lista5)
print(lista5.count('e'))
# Adicionar elementos em listas
# Para adicionar elementos em listas, utilizamos a função append
print(lista1)
lista1.append(42)
print(lista1)
# OBS: Com append, nós só conseguimos adicionar um (1) elementos por vez
# lista1.append(12, 14, 56) # Erro
lista1.append([8, 3, 1]) # Coloca a lista como elemento único (sublista)
print(lista1)
if [8, 3, 1] in lista1:
print('Encontrei a lista')
else:
print('Nao encontrei a lista')
lista1.extend([123, 44, 67]) # Coloca cada elemento da lista como valor adicional á lista
print(lista1)
# Podemos inserir um novo elemento na lista informando a posição do índice
# Isso nao substitui o valor inicial. O mesmo será deslocado para a direita da lista.
lista1.insert(2, 'Novo Valor')
print(lista1)
# Podemos facilmente juntar duas listas
lista1 = lista1 + lista2
# lista1.extend(lista2)
print(lista1)
# Podemos facilmente inverter uma lista
# Forma 1
lista1.reverse()
lista2.reverse()
print(lista1)
print(lista2)
# Forma 2
print(lista1[::-1])
print(lista2[::-1])
# Copiar uma lista
lista6 = lista2.copy()
print(lista6)
# Podemos contar quantos elementos existem dentro da lista
print(len(lista1))
# Podemos remover facilmente o último elemento de uma lista
# O pop não somente remove o último elemento, mas também o retorna
print(lista5)
lista5.pop()
print(lista5)
# Podemos remover um elemento pelo índice
# OBS: Os elementos á direita deste índice serão deslocados para a esquerda.
# OBS: Se não houver elemento no índice informado, teremos o erro IndexError
lista5.pop(2)
print(lista5)
# Podemos remover todos os elementos (Zerar a lista)
print(lista5)
lista5.clear()
print(lista5)
# Podemos facilmente repetir elementos em uma lista
nova = [1, 2, 3]
print(nova)
nova = nova * 3
print(nova)
# Podemos facilmente converter uma string para uma lista
# Exemplo 1
curso = 'Programação em Python Essencial'
print(curso)
curso = curso.split()
print(curso)
# OBS: Por padrão, o split separa os elementos da lista pelo espaço entre elas.
# Exemplo 2
curso = 'Programação,em,Python, Essencial'
print(curso)
curso = curso.split(',')
print(curso)
# Convertendo uma lista em uma string
lista6 = ['Programação', 'em', 'Python', 'Essencial']
print(lista6)
# Abaixo estamos falando: Pega a lista6, coloca o cifrão entre cada elemento e transforma em uma string
curso = ' '.join(lista6)
print(curso)
curso = '$'.join(lista6)
print(curso)
# Podemos realmente colocar qualquer tipo de dado em uma lista, inclusive misturando esses dados
lista6 = [1, 2.34, True, 'Geek', 'd', [1, 2, 3], 45345345345]
print(lista6)
print(type(lista6))
# Iterando sobre listas
# Exemplo 1 - Utilizando for
soma = 0
for elemento in lista1:
print(elemento)
soma = soma + elemento
print(soma)
# Exemplo 2 - Utlizando while
carrinho = []
produto = ''
while produto != 'sair':
print("Adicione um produto na lista ou digite 'sair' para sair: ")
produto = input()
if produto != 'sair':
carrinho.append(produto)
for produto in carrinho:
print(produto)
# Utilizando variáveis em listas
numeros = [1, 2, 3, 4, 5]
print(numeros)
num1 = 1
num2 = 2
num3 = 3
num4 = 4
num5 = 5
numeros = [num1, num2, num3, num4, num5]
print(numeros)
# Fazemos acessos aos elementos de forma indexada
cores = ['verde', 'amarelo', 'azul', 'branco']
print(cores[0]) # verde
print(cores[1]) # amarelo
print(cores[2]) # azul
print(cores[3]) # branco
# Fazer acesso aos elementos de forma indexada inversa
# Para entender melhor o índice negativo, pense na lista como um círculo, onde
# o final de um elemento está ligado ao início da lista
print(cores[-1]) # branco
print(cores[-2]) # azul
print(cores[-3]) # amarelo
print(cores[-4]) # verde
for cor in cores:
print(cor)
indice = 0
while indice < len(cores):
print(cores[indice])
indice = indice + 1
cores = ['verde', 'amarelo', 'azul', 'branco']
# Gerar índice em um for
for indice, cor in enumerate(cores):
print(indice, cor)
# Listas aceitam valores repetidos
lista = []
lista.append(42)
lista.append(42)
lista.append(33)
lista.append(33)
lista.append(42)
# Outros métodos não tão importantes mas também úteis
# Encontrar o índice de um elemento na lista
numeros = [5, 6, 7, 5, 8, 9, 10]
# Em qual índice da lista está o valor 6?
print(numeros.index(6))
# Em qual índice da lista está o valor 9??
print(numeros.index(9))
# print(numeros.index(19)) # Gera ValueError
# OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError
# OBS: Retorna o índice do primeiro elemento encontrado
print(numeros.index(5))
# Podemos fazer busca dentro de um range, ou seja, qual índice começar a buscar
print(numeros.index(5, 1)) # Buscando a partir do índice 1
print(numeros.index(5, 2)) # Buscando a partir do índice 2
print(numeros.index(5, 3)) # Buscando a partir do índice 3
# print(numeros.index(5, 4)) # Buscando a partir do índice 4
# OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError
# Podemos fazer busca dentro de um range, início/fim
print(numeros.index(8, 3, 6)) # Buscar o índice do valor 8, entre os índices 3 a 6
# Revisão do slicing
# lista[inicio:fim:passo]
# range(inicio:fim:passo)
# Trabalhando com slice de listas com o parâmetro 'início'
lista = [1, 2, 3, 4]
print(lista[1:]) # Iniciando no índice 1 e pegando todos os elementos restantes
# Trabalhando com slice de listas com o parâmetro 'fim'
print(lista[:2]) # Começa em 0, pega até o índice 2 - 1
print(lista[:4]) # Começa em 0, pega até o índice 4 - 1
print(lista[1:3]) # Começa em 1, pega até o índice 3 - 1
# Trabalhando com slice de listas com o parâmetro 'passo'
print(lista[1::2]) # Começa em 1, vai até o final, de 2 em 2
print(lista[::2]) # Começa em 0, vai até o final, de 2 em 2
# Invertendo valores em uma lista
nomes = ['Geek', 'University']
nomes[0], nomes[1] = nomes[1], nomes[0]
print(nomes)
nomes = ['Geek', 'University']
nomes.reverse()
print(nomes)
# Soma*, Valor Máximo*, Valor Mínimo*, Tamanho
# * Se os valores forem todos inteiros ou reais
lista = [1, 2, 3, 4, 5, 6]
print(sum(lista)) # Soma
print(max(lista)) # Máximo Valor
print(min(lista)) # Mínimo Valor
print(len(lista)) # Tamanho da Lista
# Transformar uma lista em tupla
lista = [1, 2, 3, 4, 5, 6]
print(lista)
print(type(lista))
tupla = tuple(lista)
print(tupla)
print(type(tupla))
# Desempacotamento de listas
listas = [1, 2, 3]
num1, num2, num3 = lista
print(num1)
print(num2)
print(num3)
# OBS: Se tivermos um número diferente de elementos na lista ou variáveis para receber os dados, teremos ValueError
# Copiando uma lista para outra (Shallow Copy e Deep Copy)
# Forma 1 - Deep Copy
lista = [1, 2, 3] e
print(lista)
nova = lista.copy() # Cópia
print(nova)
nova.append(4)
print(lista)
print(nova)
# Veja que ao utilizarmos lista.copy() copiamos os dados da lista para uma nova lista, mas elas
# ficaram totalmente independentes, ou seja, modificando uma lista, não afeta a outra. Isso em Python
# é chamado de Deep Copy (Cópia Profunda)
# Forma 2 - Shallow Copy
lista = [1, 2, 3]
print(lista)
nova = lista # Cópia
print(nova)
nova.append(4)
print(lista)
print(nova)
# Veja que utilizamos a cópia via atribuição e copiamos os dados da lista para a nova lista, mas
# após realizar modificação em uma das listas, essa modificação se refletiu em ambas as listas.
# Isso em Python é chamado de Shallow Copy.
"""
| 25.703812 | 116 | 0.712949 | [
"MIT"
] | vdonoladev/aprendendo-programacao | Python/Programação_em_Python_Essencial/5- Coleções/listas.py | 8,882 | Python |
import sys
import numpy as np
import cv2
def overlay(img, glasses, pos):
sx = pos[0]
ex = pos[0] + glasses.shape[1]
sy = pos[1]
ey = pos[1] + glasses.shape[0]
if sx < 0 or sy < 0 or ex > img.shape[1] or ey > img.shape[0]:
return
img1 = img[sy:ey, sx:ex]
img2 = glasses[:, :, 0:3]
alpha = 1. - (glasses[:, :, 3] / 255.)
img1[..., 0] = (img1[..., 0] * alpha + img2[..., 0] * (1. - alpha)).astype(np.uint8)
img1[..., 1] = (img1[..., 1] * alpha + img2[..., 1] * (1. - alpha)).astype(np.uint8)
img1[..., 2] = (img1[..., 2] * alpha + img2[..., 2] * (1. - alpha)).astype(np.uint8)
# cam open
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print('cam not opened')
sys.exit()
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi', fourcc, 30 , (w,h))
# XML file load
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
eye_classifier = cv2.CascadeClassifier('haarcascade_eye.xml')
if face_classifier.empty() or eye_classifier.empty():
print('xml load error')
sys.exit()
glasses = cv2.imread('glasses.png', cv2.IMREAD_UNCHANGED)
if glasses is None:
print('png file load error')
sys.exit()
ew, eh = glasses.shape[:2]
ex1, ey1 = 240, 300
ex2, ey2 = 660, 300
# Video process
while True:
ret, frame = cap.read()
if not ret:
break
faces = face_classifier.detectMultiScale(frame ,scaleFactor=1.2, minSize=(100,100), maxSize=(400,400))
for (x, y, w, h) in faces:
faceROI = frame[y: y+h//2, x: x+w]
eyes = eye_classifier.detectMultiScale(faceROI)
if len(eyes) != 2:
continue
x1 = x + eyes[0][0] + (eyes[0][2] // 2)
y1 = y + eyes[0][1] + (eyes[0][3] // 2)
x2 = x + eyes[1][0] + (eyes[1][2] // 2)
y2 = y + eyes[1][1] + (eyes[1][3] // 2)
if x1 > x2:
x1, y1, x2, y2 = x2, y2, x1, y1
fx = (x2 - x1) / (ex2 - ex1)
glasses2 = cv2.resize(glasses, (0, 0), fx=fx, fy=fx, interpolation=cv2.INTER_AREA)
pos = (x1 - int(ex1 * fx), y1 - int(ey1 * fx))
overlay(frame, glasses2, pos)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows() | 25.666667 | 106 | 0.566401 | [
"MIT"
] | FLY-CODE77/opencv | project/snowapp/snow.py | 2,387 | Python |
import pandas as pd
from tidyframe import nvl
def test_nvl_series():
test_list = [0, 1, None, pd.np.NaN]
test_series = pd.Series(test_list)
nvl(test_series, 10)
def test_nvl_list():
test_list = [0, 1, None, pd.np.NaN]
nvl(test_list, 10)
def test_nvl_int():
nvl(None, 10)
def test_nvl_str():
nvl(None, 'abc')
def test_nvl_int_v2():
nvl(1, 10)
| 14.769231 | 39 | 0.643229 | [
"MIT"
] | Jhengsh/tidyframe | tests/test_nvl.py | 384 | Python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
from builtins import object
from builtins import str
except ImportError:
from __builtin__ import object
from __builtin__ import str
import targets_cfg
def pretty_list(lst, indent=8):
if lst is None or len(lst) == 0:
return ""
if len(lst) == 1:
return "\"%s\"" % lst[0]
separator = "\",\n%s\"" % (" " * indent)
res = separator.join(sorted(lst))
res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 4))
return res
class TARGETSBuilder(object):
def __init__(self, path):
self.path = path
self.targets_file = open(path, 'w')
self.targets_file.write(targets_cfg.rocksdb_target_header)
self.total_lib = 0
self.total_bin = 0
self.total_test = 0
self.tests_cfg = ""
def __del__(self):
self.targets_file.close()
def add_library(self, name, srcs, deps=None, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers,
deps=pretty_list(deps)))
self.total_lib = self.total_lib + 1
def add_rocksdb_library(self, name, srcs, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.rocksdb_library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers))
self.total_lib = self.total_lib + 1
def add_binary(self, name, srcs, deps=None):
self.targets_file.write(targets_cfg.binary_template % (
name,
pretty_list(srcs),
pretty_list(deps)))
self.total_bin = self.total_bin + 1
def register_test(self,
test_name,
src,
is_parallel,
extra_deps,
extra_compiler_flags):
exec_mode = "serial"
if is_parallel:
exec_mode = "parallel"
self.tests_cfg += targets_cfg.test_cfg_template % (
test_name,
str(src),
str(exec_mode),
extra_deps,
extra_compiler_flags)
self.total_test = self.total_test + 1
def flush_tests(self):
self.targets_file.write(targets_cfg.unittests_template % self.tests_cfg)
self.tests_cfg = ""
| 31.978495 | 80 | 0.599866 | [
"BSD-3-Clause"
] | 2acoin/2acoin | external/rocksdb/buckifier/targets_builder.py | 2,974 | Python |
# Copyright 2017 Bruno Ribeiro, Mayank Kakodkar, Pedro Savarese
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from bean.phase import Phase
def parse_top_level_arguments():
parser = argparse.ArgumentParser(description='Fit RBM to MNIST using different gradient estimators')
parser.add_argument('--local', '-l', dest='LOCAL', action='store_const',
const=True, default=False,
help='Enables Local run')
parser.add_argument('--basefolder', '-b', dest='BASE_FOLDER', action='store'
, default='/Users/mkakodka/Code/Research/RBM_V1/',
help='Base Folder for all directory paths')
parser.add_argument('--phase', '-p', dest='PHASE', action='store'
, default='DATA',
help=str(Phase.__dict__))
parser.add_argument('-n', dest='RUNS', action='store'
, default='1',
help='Number of runs')
parser.add_argument('-iteration', dest='iteration', action='store'
, default='-1',
help='iteration')
parser.add_argument('--method', '-m', dest='method', action='store',
default="MCLV",
help='Method to use')
parser.add_argument('-sfs', dest='sample_from_supernode', action='store_const',
const=True, default=False,
help='Sample from supernode for tour distribution')
parser.add_argument('-cdk', dest='cdk', action='store',
default=1,
help='contrastive divergence steps limit')
parser.add_argument('-mclvk', dest='mclvk', action='store',
default=1,
help='tour length limit')
parser.add_argument('-wm', dest='warmup', action='store',
default=2,
help='warmup epochs')
parser.add_argument('-tot', '--total-epochs', dest='total_epochs', action='store',
default=100,
help='total epochs')
parser.add_argument('-mbs', '--mini-batch-size', dest='mini_batch_size', action='store',
default=128,
help='mini batch size')
parser.add_argument('--learning-rate', '-lr', dest='learning_rate', action='store',
default=0.1,
help='learning rate')
parser.add_argument('--weight-decay', '-wd', dest='weight_decay', action='store',
default=0.0,
help='weight decay')
parser.add_argument('--momentum', '-mm', dest='momentum', action='store',
default=0.0,
help='momentum')
parser.add_argument('--plateau', '-pt', dest='plateau', action='store',
default=1000,
help='Robbins Munro Schedule plateau length')
parser.add_argument('--hidden', dest='num_hidden', action='store',
default=16,
help='Number of hidden units')
parser.add_argument('--supernode-samples', '-ss', dest='supernode_samples', action='store',
default=1,
help='Number of samples to include in the supernode')
parser.add_argument('--gpu-id', dest='gpu_id', action='store',
default=-1,
help='gpu_id')
parser.add_argument('--gpu-limit', dest='gpu_limit', action='store',
default=18,
help='gpu_limit')
parser.add_argument('--filename', dest='filename', action='store',
default='temp_local',
help='filename')
parser.add_argument('--final-likelihood', dest='final_likelihood', action='store_const',
const=True, default=False,
help='compute final likelihood')
parser.add_argument('--log-tour', dest='LOG_TOUR', action='store_const',
const=True, default=False,
help='LOG_TOUR')
parser.add_argument('--name', dest='name', action='store',
default=None,
help='Name this run')
args = parser.parse_args()
return args.LOCAL, args.BASE_FOLDER, args
LOCAL, BASE_FOLDER, ARGS = parse_top_level_arguments()
print("Config.BASE_FOLDER=%s" % BASE_FOLDER)
print("Config.LOCAL=%s" % LOCAL)
DATA_FOLDER = BASE_FOLDER + 'data/'
MODEL_FOLDER = BASE_FOLDER + 'data/model/'
OUTPUT_FOLDER = BASE_FOLDER + 'output/'
MNIST_FOLDER = BASE_FOLDER + 'py/MNIST_data/'
PLOT_OUTPUT_FOLDER = BASE_FOLDER + 'plots/'
SQLITE_FILE = DATA_FOLDER + 'results.db'
SERVER_SQLITE_FILE = DATA_FOLDER + 'results_server.db' if LOCAL else SQLITE_FILE
GPU_LIMIT = int(ARGS.gpu_limit)
USE_GPU = torch.cuda.is_available() and not LOCAL
LOG_TOUR = ARGS.LOG_TOUR
TOUR_LENGTHS_TABLE = "TOUR_LENGTH_DISTRIBUTIONS"
# These are hardcoded for the MNIST dataset
WIDTH = 28
HEIGHT = 28
# These options do not work right now, we'll fix them soon
PIN = False
GPU_ID = int(ARGS.gpu_id) if int(ARGS.gpu_id) >= 0 else None
| 44.346154 | 104 | 0.580225 | [
"Apache-2.0"
] | PurdueMINDS/MCLV-RBM | py/util/config.py | 5,765 | Python |
# The Core of Toby
from flask import Flask, request, jsonify, g
import os
import logging
from ax.log import trace_error
from ax.connection import DatabaseConnection
from ax.datetime import now
from ax.tools import load_function, get_uuid, decrypt
from ax.exception import InvalidToken
logger = logging.getLogger('werkzeug')
debug_flg = True if os.getenv('TOBY_DEBUG', 'True') == 'True' else False
token = os.environ['TOBY_TOKEN']
app = Flask('Toby')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.logger.setLevel(logging.DEBUG if debug_flg else logging.INFO)
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'db'):
g.db = DatabaseConnection(os.getenv('TOBY_DB_USER', 'toby'), os.environ['TOBY_DB_PASSWORD'])
return g.db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'db'):
g.db.disconnect()
if error:
logger.error('Database connection closed because of :' + str(error))
@app.route("/")
def ping():
return "<h1 style='color:blue'>Hello There! This is Toby</h1>"
@app.route("/process")
def process():
request_id = None
try:
in_param = request.get_json(force=True, silent=False, cache=False)
if decrypt(in_param['request_token']) != token:
# verify token
raise InvalidToken(in_param)
if 'request_id' not in in_param:
request_id = get_uuid()
in_param['request_id'] = request_id
else:
request_id = in_param['request_id']
if 'request_timestamp' not in in_param:
in_param['request_timestamp'] = now()
in_param['logger'] = logger
in_param['get_db_connection'] = get_db
func = load_function(in_param)
resp = func()
except:
e = trace_error(logger)
resp = {'request_id': request_id, 'request_status': 'error', 'request_error': str(e[-1])}
return jsonify(resp)
if __name__ == "__main__":
app.run()
| 30.157143 | 100 | 0.658456 | [
"MIT"
] | axxiao/toby | toby.py | 2,111 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
| 40.96281 | 107 | 0.632705 | [
"Apache-2.0"
] | JakobSteixner/google-ads-python | google/ads/googleads/v10/services/services/feed_service/client.py | 19,826 | Python |
# Standard Library
import unittest
# YouTubeTimestampRedditBot
from src.utils.youtube import is_youtube_url_without_timestamp
class Youtube(unittest.TestCase):
def test_is_youtube_url_without_timestamp(self):
dicts = [
# no timestamps
{"input": "https://youtube.com/asdf", "expected_output": True},
{"input": "wwww.youtube.com/asdf", "expected_output": True},
{"input": "wwww.youtu.be/asdf", "expected_output": True},
# has timestamps
{"input": "https://youtube.com/asdf?t=1m", "expected_output": False},
{"input": "wwww.youtube.com?watch=asdf&t=1m", "expected_output": False},
{"input": "wwww.youtu.be/asdf?t=12s", "expected_output": False},
# not youtube
{"input": "wwww.asdf.com", "expected_output": False},
{"input": "https://youfoo.com", "expected_output": False},
]
for (i, d) in enumerate(dicts):
with self.subTest(i=i):
assert (
is_youtube_url_without_timestamp(d["input"]) == d["expected_output"]
)
| 39.172414 | 88 | 0.582746 | [
"MIT"
] | ConorSheehan1/YouTubeTimestampRedditBot | tests/unit/utils/test_youtube.py | 1,136 | Python |
'''
Harness Toolset
Copyright (c) 2015 Rich Kelley
Contact:
@RGKelley5
RK5DEVMAIL[A T]gmail[D O T]com
www.frogstarworldc.com
License: MIT
'''
import threading
import builtins
import sys
from random import randint
from harness.core import framework
from harness.core import threads
from collections import namedtuple
from queue import Queue
class ModuleFrame(framework.Framework):
def __init__(self, about):
# -----------------------------------------------------
# Thread Events must be initialized before framework
# due to print function thread controls in ModuleFrame
# -----------------------------------------------------
self.stopper = threading.Event()
self.stopper.clear()
self.allow_print = threading.Event()
self.allow_print.isSet()
self.stdin_q = Queue()
self.FORCE_THREAD = False
# -----------------------------------------------------
framework.Framework.__init__(self)
self.prompt = "H_MOD(" + about["name"] + ") "
self.thread_to_return = None
self.module_id = randint(1, 100000)
# TODO: add exception handling for undeclared keys
self.name = about['name']
self.author = about['author']
self.info = about['info']
self.contact = about['contact']
self.version = about['version']
def isrunning(self):
if self.stopper.isSet():
return False
return True
def print(self, *objects, sep=' ', end='\n', file=sys.stdout, flush=False):
if self.allow_print.isSet():
return builtins.print(*objects, sep=sep, end=end, file=file, flush=flush)
def print_error(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_error(self, outstr)
def print_output(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_output(self, outstr)
def print_debug(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_debug(self, outstr)
def add_session(self, remote_conn_info=None, local_conn_info=None, stype=None):
return framework.Framework.add_session(self, remote_conn_info=remote_conn_info, local_conn_info=local_conn_info, id=self.module_id, stype=stype)
def go(self, _globals):
self.framework_globals = _globals
self.cmdloop()
return self.thread_to_return, self.framework_globals # Return thread back to base for management
def do_back(self, args=None):
return True
def do_run(self, args=None):
if args:
_args = framework.parse_args(args)
else:
_args = (" ")
if not self.options.required_set():
self.allow_print.set()
self.print_error("Required options not set")
self.print_error("Check 'Required' column\n")
self.show_options()
self.allow_print.clear()
return
self.stopper.clear()
self.allow_print.set()
# Wrap the module in a Thread object and return to base
if self.FORCE_THREAD or _args[0].lower() in ('job', 'thread', 'j', 't'):
if self.FORCE_THREAD:
self.print_output("Module must be run in background!")
self.allow_print.clear()
t = threads.ModuleThread(target=self, args=[self.stopper, self.allow_print, self.module_id, self.stdin_q])
t.daemon = True
self.thread_to_return = t
return True
else:
# Normal run in foreground
try:
self.run_module()
# Exit the module cleanly without exiting framework
except KeyboardInterrupt:
pass
finally:
self.cleanup_exit()
def show_info(self, args=None):
print("\n\tModule Name: ", self.name)
print("\tAuthors: ", self.author)
print("\tContact: ", self.contact)
print("\tInfo: ", self.info)
print("\tVersion: ", self.version)
print()
def pre_run(self, args=None):
pass
def run_module(self, args=None):
pass
def post_run(self, args=None):
pass
def cleanup_exit(self):
self.print_debug("Cleaning up...")
self.stopper.clear()
self.post_run()
self.allow_print.clear()
self.print_output("Exiting module...")
return True
| 20.919786 | 146 | 0.674335 | [
"MIT"
] | Rich5/Harness | harness/core/module.py | 3,912 | Python |
"""
load the dataset example and return the maximum image size, which is used to definite the spike generation network;
images with different size are focused onto the center of the spike generation network;
the generated poisson spikes are recorded and saved for further use.
"""
"""
on 12th November
by xiaoquinNUDT
version 0.0
"""
"""
test: no
"""
"""
optimization record:
"""
##-----------------------------------------------------------------------------------------
## module import
##-----------------------------------------------------------------------------------------
import brian2 as b2
from brian2 import *
import numpy as np
import cPickle as pickle
import os
import sys
from struct import unpack
np.set_printoptions(threshold = np.inf)
##-----------------------------------------------------------------------------------------
## code generation device setup
##-----------------------------------------------------------------------------------------
b2.defaultclock.dt = 0.2*b2.ms
b2.core.default_float_dtype = float64 ### reconsider
b2.core.default_integer_dtype = int16 ### retest
codegen.target = 'cython' # default 'auto', other setting include numpy, weave, cython
#clear_cache('cython') #clear the disk cache manually, or use the clear_cache function
codegen.cpp_compiler = 'gcc'
codegen.cpp_extra_compile_args_gcc = ['-ffast-math -march=native']
## Cython runtime codegen preferences
'''
Location of the cache directory for Cython files. By default,
will be stored in a brian_extensions subdirectory
where Cython inline stores its temporary files (the result of get_cython_cache_dir()).
'''
codegen.runtime_cython_cache_dir = None
codegen.runtime_cython_delete_source_files = True
codegen.runtime_cython_multiprocess_safe = True
##-----------------------------------------------------------------------------------------
## self-definition method
##-----------------------------------------------------------------------------------------
def get_dataset_example_mnist(path_dataset, name_dataset, using_test_dataset):
"""
read input images (vector), dump into
'.pickle' format for next load, and return it as a numpy array.
"""
flag_dataloaded = 0
if name_dataset != 'mnist_test_example' and name_dataset != 'mnist_train_example':
raise Exception('You have provide the wrong dataset name or path, please check carefully')
else:
dataset_path_name = path_dataset + name_dataset
if os.path.isfile('%s.pickle' % dataset_path_name):
example = pickle.load(open('%s.pickle' % dataset_path_name))
flag_dataloaded = 1
else:
flag_datasetsource = os.path.isfile(path_dataset+'train-images.idx3-ubyte') & \
os.path.isfile(path_dataset+'train-labels.idx1-ubyte') & \
os.path.isfile(path_dataset+'t10k-images.idx3-ubyte') & \
os.path.isfile(path_dataset+'t10k-labels.idx1-ubyte')
if flag_datasetsource == False:
raise Exception("You haven't downloaded the dataset into the %s!" % path_dataset)
else:
if using_test_dataset:
image = open(path_dataset+'t10k-images.idx3-ubyte', 'rb')
else:
image = open(path_dataset+'train-images.idx3-ubyte', 'rb')
# get metadata for images
image.read(4) # skip the magic number
num_image = unpack('>I', image.read(4))[0]
height_image = unpack('>I', image.read(4))[0]
length_image = unpack('>I', image.read(4))[0]
example = np.zeros((num_image, height_image, length_image), dtype = np.uint8)
for i in xrange(num_image):
example[i] = [[unpack('>B', image.read(1))[0] for m in xrange(length_image)] for n in xrange(height_image)]
pickle.dump(example, open('%s.pickle' % dataset_path_name, 'wb'))
# the dataset has been readed and processed
flag_dataloaded = 1
if flag_dataloaded == 0:
raise Exception('Failed to load the required dataset, please check the name_dataset and other printed information!')
else:
return example
## file system
path_dataset = '../dataset_mnist/'
spike_record_path = './'
## input parameter
using_test_dataset = bool(int(sys.argv[1]))
print(using_test_dataset)
num_example = int(sys.argv[2])
print(num_example)
num_iteration = int(sys.argv[3])
print(num_iteration)
height_receptive_field = 28
length_receptive_field = 28
if using_test_dataset:
num_per_dataset = 10000
name_dataset = 'mnist_test_example'
name_spike_record = 'mnist_spike_record_test'
else:
num_per_dataset = 60000
name_dataset = 'mnist_train_example'
name_spike_record = 'mnist_spike_record_train'
## network setting parameters
input_intensity = 2.0
population_IN = height_receptive_field * length_receptive_field
working_time = 350 * b2.ms
resting_time = 150 * b2.ms
neuron_group_record = {}
spike_monitor_record = {}
name_neuron_group = 'Poisson_spike'
## create input poisson spike train
neuron_group_record[name_neuron_group] = b2.PoissonGroup(population_IN, 0*Hz)
spike_monitor_record[name_neuron_group] = b2.SpikeMonitor(neuron_group_record[name_neuron_group])
network_record = b2.Network()
for obj_sim in [neuron_group_record, spike_monitor_record]:
for key in obj_sim:
network_record.add(obj_sim[key])
## dataset loading and record the input poisson spike
input_example = get_dataset_example_mnist(path_dataset, name_dataset, using_test_dataset)
number_example = 0
while number_example < num_example:
input_image = input_example[(number_example + num_iteration * num_example) % num_per_dataset]
height_example, length_example = input_image.shape
length_margin = int((length_receptive_field - length_example)/2)
height_margin = int((height_receptive_field - height_example)/2)
input_rate = np.zeros((height_receptive_field, length_receptive_field), dtype = np.float32)
for i in xrange(height_example):
for j in xrange(length_example):
input_rate[i + height_margin, j + length_margin] = input_image[i, j]
neuron_group_record[name_neuron_group].rates = input_rate.flatten() / 8.0 * input_intensity * Hz
network_record.run(working_time, report = 'text')
neuron_group_record[name_neuron_group].rates = 0*Hz
network_record.run(resting_time)
number_example += 1
spike_index = np.asarray(spike_monitor_record[name_neuron_group].i, dtype = np.int16)
spike_time = np.asarray(spike_monitor_record[name_neuron_group].t, dtype = np.float64)
if using_test_dataset:
spike_record_path_name = spike_record_path + name_spike_record + '_' + str(num_example)
else:
spike_record_path_name = spike_record_path + name_spike_record + '_' + str(num_example) + '_' + str(num_iteration)
file_spike_record = open('%s.pickle' % spike_record_path_name, 'wb')
pickle.dump(spike_index, file_spike_record)
pickle.dump(spike_time, file_spike_record)
file_spike_record.close()
| 45.909677 | 127 | 0.657532 | [
"BSD-2-Clause"
] | Mary-Shi/Three-SNN-learning-algorithms-in-Brian2 | Spike generation/spike_recorder_focal.py | 7,116 | Python |
# https://repl.it/@thakopian/day-4-2-exercise#main.py
# write a program which will select a random name from a list of names
# name selected will pay for everyone's bill
# cannot use choice() function
# inputs for the names - Angela, Ben, Jenny, Michael, Chloe
# import modules
import random
# set varialbles for input and another to modify the input to divide strings by comma
names_string = input("Give me everybody's names, separated by a comma. ")
names = names_string.split(", ")
# get name at index of list (example)
print(names[0])
# you can also print len of the names to get their range
print(len(names))
# set random module for the index values
# > this is standard format > random.randint(0, x)
# using the len as a substitute for x in the randint example with a variable set to len(names)
num_items = len(names)
# num_items - 1 in place of x to get the offset of the len length to match a starting 0 position on the index values
# set the function to a variable
choice = random.randint(0, num_items - 1)
# assign the mutable name variable with an index of the choice variable to another variable for storing the index value of the name based on the index vaule
person_who_pays = names[choice]
# print that stored named variable out with a message
print(person_who_pays + " is going to buy the meal today")
#######
# This exercise isn't a practical application of random choice since it doesn't use the .choice() function
# the idea is to replace variables, learn by retention and problem solve
# create your own random choice function to understand how the code can facilitate that withouth the .choice() function
# that way you learn how to go through problem challenges and how to create your own workaround in case the out of the box content isn't everything you need for a given problem
| 41.295455 | 176 | 0.760044 | [
"MIT"
] | thakopian/100-DAYS-OF-PYTHON-PROJECT | BEGIN/DAY_04/04.1-day-4-2-exercise-solution.py | 1,817 | Python |
# -*- coding: utf-8 -*-
from .expr import *
def_Topic(
Title("Legendre polynomials"),
Section("Particular values"),
Entries(
"9bdf22",
"217521",
"d77f0a",
"9b7f05",
"a17386",
"13f971",
"a7ac51",
"3df748",
"674afa",
"85eebc",
),
Section("Recurrence and functional equations"),
Entries(
"0010f3",
"367ac2",
"27688e",
"925fdf",
),
Section("Generating functions"),
Entries(
"d84519",
),
Section("Rodrigues' formula"),
Entries(
"4cfeac",
),
Section("Integrals"),
Entries(
"e36542",
),
Section("Sum representations"),
Entries(
"c5dd9b",
"f0569a",
"7a85b7",
),
Section("Hypergeometric representations"),
Entries(
"9395fc",
"f55f0a",
"3c87b9",
"6cd4a1",
"859445",
),
Section("Bounds and inequalities"),
Entries(
"1ba9a5",
"155343",
"ef4b53",
"b786ad",
"60ac50",
"59e5df",
"3b175b",
"6476bd",
),
Section("Analytic properties"),
Entries(
"40fa59",
"d36fd7",
"99e62f",
"7680d3",
"22a42f",
"415911",
"df439e",
"0745ee",
"b2d723",
),
Section("Gauss-Legendre quadrature"),
SeeTopics("Gaussian quadrature"),
Entries(
"ea4754",
"47b181",
),
Section("Bounds and inequalities"),
Subsection("Turán's inequalities"),
Entries(
"c8d10e",
"227d60",
),
)
make_entry(ID("0010f3"),
Formula(Equal(LegendrePolynomial(n,-z), (-1)**n * LegendrePolynomial(n,z))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("367ac2"),
Formula(Equal((n+1)*LegendrePolynomial(n+1,z) - (2*n+1)*z*LegendrePolynomial(n,z) + n*LegendrePolynomial(n-1,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(z, CC))))
make_entry(ID("27688e"),
Formula(Equal((1-z**2)*Derivative(LegendrePolynomial(n,z), Tuple(z,z,2)) - 2*z*Derivative(LegendrePolynomial(n,z), Tuple(z,z,1)) + n*(n+1)*LegendrePolynomial(n,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("925fdf"),
Formula(Equal((1-z**2)*Derivative(LegendrePolynomial(n,z), Tuple(z,z,1)) + n*z*LegendrePolynomial(n,z) - n*LegendrePolynomial(n-1,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(z, CC))))
make_entry(ID("9bdf22"),
Formula(Equal(LegendrePolynomial(0,z), 1)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("217521"),
Formula(Equal(LegendrePolynomial(1,z), z)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("d77f0a"),
Formula(Equal(LegendrePolynomial(2,z), Div(1,2)*(3*z**2 - 1))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("9b7f05"),
Formula(Equal(LegendrePolynomial(3,z), Div(1,2)*(5*z**3 - 3*z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a17386"),
Formula(Equal(LegendrePolynomial(4,z), Div(1,8)*(35*z**4 - 30*z**2 + 3))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("13f971"),
Formula(Equal(LegendrePolynomial(5,z), Div(1,8)*(63*z**5 - 70*z**3 + 15*z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a7ac51"),
Formula(Equal(LegendrePolynomial(n,1), 1)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("3df748"),
Formula(Equal(LegendrePolynomial(n,-1), (-1)**n)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("674afa"),
Formula(Equal(LegendrePolynomial(2*n,0), ((-1)**n / 4**n) * Binomial(2*n,n))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("85eebc"),
Formula(Equal(LegendrePolynomial(2*n+1,0), 0)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("d84519"),
Formula(Equal(Sum(LegendrePolynomial(n,x) * z**n, Tuple(n, 0, Infinity)),
1 / Sqrt(1 - 2*x*z + z**2))),
Variables(x, z),
Assumptions(And(Element(x, ClosedInterval(-1,1)), Element(z, CC), Less(Abs(z), 1))))
make_entry(ID("4cfeac"),
Formula(Equal(LegendrePolynomial(n,z),
Div(1,2**n * Factorial(n)) * Derivative((t**2-1)**n, Tuple(t, z, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0))), Element(z, CC)))
make_entry(ID("e36542"),
Formula(Equal(Integral(LegendrePolynomial(n, x) * LegendrePolynomial(m, x), Tuple(x, -1, 1)), Div(2,2*n+1) * KroneckerDelta(n, m))),
Variables(n, m),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(m, ZZGreaterEqual(0)))))
make_entry(ID("c5dd9b"),
Formula(Equal(LegendrePolynomial(n, z), Div(1,2**n) * Sum(Binomial(n,k)**2 * (z-1)**(n-k) * (z+1)**k, Tuple(k, 0, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("f0569a"),
Formula(Equal(LegendrePolynomial(n, z), Sum(Binomial(n,k) * Binomial(n+k,k) * Div(z-1,2)**k, Tuple(k, 0, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("7a85b7"),
Formula(Equal(LegendrePolynomial(n, z), Div(1,2**n) * Sum((-1)**k * Binomial(n,k) * Binomial(2*n-2*k,n) * z**(n-2*k), Tuple(k, 0, Floor(n/2))))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("9395fc"),
Formula(Equal(LegendrePolynomial(n, z), Hypergeometric2F1(-n, n+1, 1, (1-z)/2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("f55f0a"),
Formula(Equal(LegendrePolynomial(n, z), Binomial(2*n,n) * (z/2)**n * Hypergeometric2F1(-(n/2), (1-n)/2, Div(1,2)-n, 1/z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("3c87b9"),
Formula(Equal(LegendrePolynomial(n, z), Div(z-1,2)**n * Hypergeometric2F1(-n, -n, 1, (z+1)/(z-1)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC, Set(1))))))
make_entry(ID("6cd4a1"),
Formula(Equal(LegendrePolynomial(2*n, z), Div((-1)**n, 4**n) * Binomial(2*n,n) * Hypergeometric2F1(-n, n+Div(1,2), Div(1,2), z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC)))))
make_entry(ID("859445"),
Formula(Equal(LegendrePolynomial(2*n+1, z), Div((-1)**n, 4**n) * (2*n+1) * Binomial(2*n,n) * z * Hypergeometric2F1(-n, n+Div(3,2), Div(3,2), z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC)))))
make_entry(ID("1ba9a5"),
Formula(LessEqual(Abs(LegendrePolynomial(n,x)), 1)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
# todo: also valid on CC?
make_entry(ID("155343"),
Formula(LessEqual(Abs(LegendrePolynomial(n,x)), 2*BesselI(0,2*n*Sqrt(Abs(x-1)/2)), 2*Exp(2*n*Sqrt(Abs(x-1)/2)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(x, RR))))
make_entry(ID("ef4b53"),
Formula(LessEqual(Abs(LegendrePolynomial(n,z)), Abs(LegendrePolynomial(n, Abs(z)*ConstI)), (Abs(z)+Sqrt(1+Abs(z)**2))**n)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("b786ad"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 1))), (n*(n+1))/2)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("60ac50"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 1))), (2**Div(3,2) / Sqrt(ConstPi)) * (n**Div(1,2) / (1 - x**2)**Div(3,4)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("59e5df"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 2))), ((n-1)*n*(n+1)*(n+2))/8)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("3b175b"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 2))), (2**Div(5,2) / Sqrt(ConstPi)) * (n**Div(3,2) / (1 - x**2)**Div(5,4)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("6476bd"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, r))), (2**(r+Div(1,2)) / Sqrt(ConstPi)) * (n**(r-Div(1,2)) / (1 - x**2)**((2*n+1)/4)))),
Variables(n, r, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(r, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("40fa59"),
Formula(Equal(HolomorphicDomain(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), CC)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
make_entry(ID("d36fd7"),
Formula(Equal(Poles(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set(UnsignedInfinity))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
make_entry(ID("99e62f"),
Formula(Equal(EssentialSingularities(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("7680d3"),
Formula(Equal(BranchPoints(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("22a42f"),
Formula(Equal(BranchCuts(LegendrePolynomial(n,z), z, CC), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("415911"),
Formula(Equal(Cardinality(Zeros(LegendrePolynomial(n,z), z, Element(z, CC))), n)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("df439e"),
Formula(Subset(Zeros(LegendrePolynomial(n,z), z, Element(z, CC)), OpenInterval(-1,1))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("0745ee"),
Formula(Equal(Zeros(LegendrePolynomial(n,z), z, Element(z, CC)), SetBuilder(LegendrePolynomialZero(n,k), k, Element(k, ZZBetween(1, n))))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("b2d723"),
Formula(Equal(LegendrePolynomial(n, Conjugate(z)), Conjugate(LegendrePolynomial(n, z)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
# Bounds and inequalities
make_entry(ID("c8d10e"),
Formula(GreaterEqual(Parentheses(LegendrePolynomial(n, x))**2 - LegendrePolynomial(n-1, x) * LegendrePolynomial(n+1, x), 0)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(x, ClosedInterval(-1, 1)))))
make_entry(ID("227d60"),
Formula(Greater(Parentheses(LegendrePolynomial(n, x))**2 - LegendrePolynomial(n-1, x) * LegendrePolynomial(n+1, x), 0)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(x, OpenInterval(-1, 1)))))
| 35.141956 | 172 | 0.621095 | [
"MIT"
] | pascalmolin/fungrim | formulas/legendre_polynomial.py | 11,141 | Python |
import asyncio
import time
import pytest
from async_lru import alru_cache
pytestmark = pytest.mark.asyncio
async def test_expiration(check_lru, loop):
@alru_cache(maxsize=4, expiration_time=2, loop=loop)
async def coro(val):
return val
inputs = [1, 2, 3]
coros = [coro(v) for v in inputs]
ret = await asyncio.gather(*coros, loop=loop)
assert ret == inputs
check_lru(coro, hits=0, misses=3, cache=3, tasks=0, maxsize=4)
time.sleep(1)
inputs = 1
ret = await coro(inputs)
assert ret == inputs
check_lru(coro, hits=1, misses=3, cache=3, tasks=0, maxsize=4)
time.sleep(3)
inputs = 1
ret = await coro(inputs)
assert ret == inputs
check_lru(coro, hits=1, misses=4, cache=3, tasks=0, maxsize=4)
| 23.363636 | 66 | 0.658885 | [
"MIT"
] | vera1118/async_lru | tests/test_expiration.py | 771 | Python |
from pathlib import Path
def dir_touch(path_file) -> None:
Path(path_file).mkdir(parents=True, exist_ok=True)
def file_touch(path_file) -> None:
p = Path(path_file)
p.parents[0].mkdir(parents=True, exist_ok=True)
p.touch()
def index_or_default(lst, val, default=-1):
return lst.index(val) if val in lst else default
def print_info(logger, message):
print(message)
logger.info(message)
| 20.047619 | 54 | 0.703088 | [
"MIT"
] | r3w0p/memeoff | src/tools.py | 421 | Python |
from django.db import models
import os
def get_image_path(instance, filename):
return os.path.join('pics', str(instance.id), filename)
# Create your models here.
class Pets(models.Model):
pet_foto = models.ImageField(upload_to=get_image_path, blank=True, null=True)
DOG = 'C'
CAT = 'G'
ESPECIE_CHOICES = (
(DOG, 'Cachorro'),
(CAT, 'Gato')
)
especie = models.CharField(max_length=1, choices=ESPECIE_CHOICES, default=DOG)
PEQ = 'Pq'
MED = 'Md'
GDE = 'Gd'
PORTE_CHOICES = (
(PEQ, 'Pequeno'),
(MED, 'Médio'),
(GDE, 'Grande')
)
porte = models.CharField(max_length=2, choices=PORTE_CHOICES, default=GDE)
FILHOTE = 'F'
ADULTO = 'A'
IDADE_CHOICES = (
(FILHOTE, 'Filhote'),
(ADULTO, 'Adulto')
)
nome = models.CharField(max_length=50, null=False)
idade = models.CharField(max_length=1, choices=IDADE_CHOICES, default=ADULTO)
raca = models.CharField(max_length=100, null=False)
obs = models.TextField(max_length=500, null=True, blank=True)
def __str__(self):
return "pet_foto: {}\nEspecie: {}\nPorte: {}\nNome: {}\nIdade: {}\nRaça: {}\nObs.: {}"\
.format(self.pet_foto, self.especie, self.porte, self.nome, self.idade, self.raca, self.obs)
| 30.431818 | 101 | 0.605676 | [
"MIT"
] | JuniorGunner/ConcilBackendTest | src/doghouse/models.py | 1,341 | Python |
# coding: utf-8
"""
TextMagic API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReopenChatsBulkInputObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ids': 'str',
'all': 'bool'
}
attribute_map = {
'ids': 'ids',
'all': 'all'
}
def __init__(self, ids=None, all=None): # noqa: E501
"""ReopenChatsBulkInputObject - a model defined in Swagger""" # noqa: E501
self._ids = None
self._all = None
self.discriminator = None
if ids is not None:
self.ids = ids
if all is not None:
self.all = all
@property
def ids(self):
"""Gets the ids of this ReopenChatsBulkInputObject. # noqa: E501
Entity ID(s), separated by comma # noqa: E501
:return: The ids of this ReopenChatsBulkInputObject. # noqa: E501
:rtype: str
"""
return self._ids
@ids.setter
def ids(self, ids):
"""Sets the ids of this ReopenChatsBulkInputObject.
Entity ID(s), separated by comma # noqa: E501
:param ids: The ids of this ReopenChatsBulkInputObject. # noqa: E501
:type: str
"""
self._ids = ids
@property
def all(self):
"""Gets the all of this ReopenChatsBulkInputObject. # noqa: E501
Entity ID(s), separated by comma # noqa: E501
:return: The all of this ReopenChatsBulkInputObject. # noqa: E501
:rtype: bool
"""
return self._all
@all.setter
def all(self, all):
"""Sets the all of this ReopenChatsBulkInputObject.
Entity ID(s), separated by comma # noqa: E501
:param all: The all of this ReopenChatsBulkInputObject. # noqa: E501
:type: bool
"""
self._all = all
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReopenChatsBulkInputObject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReopenChatsBulkInputObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.034247 | 119 | 0.558905 | [
"MIT"
] | imissyouso/textmagic-rest-python | TextMagic/models/reopen_chats_bulk_input_object.py | 3,947 | Python |
import os
import unittest
import abc
from funkyvalidate.examples.existing_directory import ExistingDirectory
from funkyvalidate.examples.existing_file import ExistingFile
from funkyvalidate import InterfaceType, meets
form_path = lambda *parts: os.path.abspath(os.path.join(*parts))
test_dir = form_path(__file__, '..')
example_dir = form_path(test_dir, 'test_example_dir')
nonexistant_dir = form_path(test_dir, 'nonexistant')
test_init_file = form_path(test_dir, '__init__.py')
# test_dir = os.path.abspath(os.path.join(__file__, '..', 'tests'))
# example_dir = os.path.abspath(os.path.join(test_dir, 'test_example_dir'))
# nonexistant_dir = os.path.abspath(os.path.join(test_dir, 'nonexistant'))
# test_init_file = os.path.abspath(os.path.join(test))
# Rebase current directory
os.chdir(test_dir)
class ExamplesTests(unittest.TestCase):
def setUp(self):
self.assertTrue(os.path.exists(test_init_file))
def test_existingdirectory(self):
self.assertTrue(isinstance(example_dir, ExistingDirectory))
self.assertFalse(isinstance(nonexistant_dir, ExistingDirectory))
self.assertFalse(isinstance(test_init_file, ExistingDirectory))
# Test constructor
self.assertTrue(ExistingDirectory(example_dir) == example_dir)
self.assertRaises(TypeError, ExistingDirectory, 321.321)
self.assertRaises(TypeError, ExistingDirectory, [example_dir])
self.assertRaises(IOError, ExistingDirectory, nonexistant_dir)
self.assertRaises(IOError, ExistingDirectory, test_init_file)
def test_existingfile(self):
"""Test the value-type interface for existing files."""
self.assertTrue(isinstance(test_init_file, ExistingFile))
self.assertFalse(isinstance(example_dir, ExistingFile))
# Test constructor
self.assertTrue(ExistingFile(test_init_file) == test_init_file)
self.assertRaises(TypeError, ExistingFile, 12)
self.assertRaises(IOError, ExistingFile, 'wargarbl')
self.assertRaises(IOError, ExistingFile, nonexistant_dir)
class MyInterface(InterfaceType):
@abc.abstractproperty
def first_name(self):
pass
class YesClass(object):
def __init__(self):
pass
first_name = "foo"
yes = YesClass()
class AlsoClass(object):
def __init__(self):
self.first_name = "bar"
also = AlsoClass()
class NoClass(object):
pass
no = NoClass()
class WeirdClass(object):
def __init__(self):
self.first_name = abc.abstractmethod(lambda self: NotImplemented)
first_name = "bazinga"
weird = WeirdClass()
class FirstChild(MyInterface):
def __init__(self):
self.other_stuff = "boo"
# can't instantiate FirstChild
class SecondChild(FirstChild):
first_name = "fixed"
second_child = SecondChild()
# class Weirder(MyInterface):
# first_name = abc.abstractmethod(lambda self: NotImplemented)
# def __init__(self):
# self.first_name = abc.abstractmethod(lambda self: NotImplemented)
class CommutativeFirst(InterfaceType):
first_name = abc.abstractmethod(lambda self: NotImplemented)
class CommutativeSecond(CommutativeFirst):
def __init__(self):
pass
first_name = "booo"
commutative = CommutativeSecond()
class CommutativeFails(CommutativeFirst):
"""This cannot be instantiated, even though the instance
overrides first_name. I believe this to be buggy behavior, however,
it is shared by abc.ABCMeta. (IE its not my fault).
"""
def __init__(self):
self.first_name = "boo"
class InterfaceTests(unittest.TestCase):
"""These test __instancecheck__ and __subclasscheck__, which depend on the meets function.
"""
def test_myinterface_itself(self):
self.assertFalse(meets(MyInterface, MyInterface))
self.assertFalse(issubclass(MyInterface, MyInterface))
self.assertRaises(TypeError, MyInterface)
def test_also_class(self):
"""
AlsoClass does not meet the interface as a class, but does once instantiated.
"""
self.assertFalse(meets(AlsoClass, MyInterface))
self.assertTrue(meets(also, MyInterface))
self.assertTrue(isinstance(also, MyInterface))
self.assertFalse(issubclass(AlsoClass, MyInterface))
def test_yes_class(self):
"""Meets interface"""
self.assertTrue(meets(YesClass, MyInterface))
self.assertTrue(meets(yes, MyInterface))
self.assertTrue(isinstance(yes, MyInterface))
self.assertTrue(issubclass(YesClass, MyInterface))
def test_no_class(self):
"""Does not meet interface."""
self.assertFalse(meets(NoClass, MyInterface))
self.assertFalse(meets(no, MyInterface))
self.assertFalse(isinstance(no, MyInterface))
self.assertFalse(issubclass(NoClass, MyInterface))
def test_weird_class(self):
"""Meets interface as class, but not as instance.
This is strange - not something that would normally ever happen."""
self.assertTrue(meets(WeirdClass, MyInterface))
self.assertFalse(meets(weird, MyInterface))
self.assertFalse(isinstance(weird, MyInterface))
self.assertTrue(issubclass(WeirdClass, MyInterface))
def test_first_child_class(self):
"""First child inherits MyInterface, but does not implement
it at all - so it can't be implemented."""
self.assertFalse(meets(FirstChild, MyInterface))
self.assertFalse(issubclass(FirstChild, MyInterface))
self.assertRaises(TypeError, FirstChild)
def test_second_child_class(self):
"""Meets the interface inherited from its parent."""
self.assertTrue(meets(SecondChild, MyInterface))
self.assertTrue(meets(second_child, MyInterface))
self.assertTrue(isinstance(second_child, MyInterface))
self.assertTrue(issubclass(SecondChild, MyInterface))
def test_commutative(self):
"""
AlsoClass does not meet the interface as a class, but does once instantiated.
"""
self.assertFalse(meets(CommutativeFirst, MyInterface))
self.assertTrue(meets(CommutativeSecond, MyInterface))
self.assertTrue(meets(commutative, MyInterface))
self.assertTrue(isinstance(commutative, MyInterface))
self.assertFalse(issubclass(CommutativeFirst, MyInterface))
self.assertTrue(issubclass(CommutativeSecond, MyInterface))
self.assertRaises(TypeError, CommutativeFails)
if __name__ == "__main__":
unittest.main()
| 35.950276 | 94 | 0.716306 | [
"MIT"
] | OaklandPeters/funkyvalidate | funkyvalidate/tests/test_interfaces.py | 6,507 | Python |
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_consts
from neutron_lib import exceptions as n_exc
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.db import db as nsx_db
from vmware_nsx.services.lbaas import base_mgr
from vmware_nsx.services.lbaas import lb_helper
from vmware_nsx.services.lbaas import lb_translators
from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr
from vmware_nsx.services.lbaas.octavia import constants as oct_const
LOG = logging.getLogger(__name__)
class NotImplementedManager(object):
"""Helper class to make any subclass of LoadBalancerBaseDriver explode if
it is missing any of the required object managers.
"""
def create(self, context, obj):
raise NotImplementedError()
def update(self, context, old_obj, obj):
raise NotImplementedError()
def delete(self, context, obj):
raise NotImplementedError()
class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager):
@log_helpers.log_method_call
def __init__(self):
super(EdgeLoadbalancerDriverV2, self).__init__()
# Init all LBaaS objects
# Note(asarfaty): self.lbv2_driver is not yet defined at init time
# so lambda is used to retrieve it later.
self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper(
"loadbalancer",
loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(),
lb_translators.lb_loadbalancer_obj_to_dict,
lambda: self.lbv2_driver.load_balancer)
self.listener = lb_helper.LBaaSNSXObjectManagerWrapper(
"listener",
listener_mgr.EdgeListenerManagerFromDict(),
lb_translators.lb_listener_obj_to_dict,
lambda: self.lbv2_driver.listener)
self.pool = lb_helper.LBaaSNSXObjectManagerWrapper(
"pool",
pool_mgr.EdgePoolManagerFromDict(),
lb_translators.lb_pool_obj_to_dict,
lambda: self.lbv2_driver.pool)
self.member = lb_helper.LBaaSNSXObjectManagerWrapper(
"member",
member_mgr.EdgeMemberManagerFromDict(),
lb_translators.lb_member_obj_to_dict,
lambda: self.lbv2_driver.member)
self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper(
"healthmonitor",
healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(),
lb_translators.lb_hm_obj_to_dict,
lambda: self.lbv2_driver.health_monitor)
self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper(
"l7policy",
l7policy_mgr.EdgeL7PolicyManagerFromDict(),
lb_translators.lb_l7policy_obj_to_dict,
lambda: self.lbv2_driver.l7policy)
self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper(
"l7rule",
l7rule_mgr.EdgeL7RuleManagerFromDict(),
lb_translators.lb_l7rule_obj_to_dict,
lambda: self.lbv2_driver.l7rule)
self._subscribe_router_delete_callback()
def _subscribe_router_delete_callback(self):
# Check if there is any LB attachment for the NSX router.
# This callback is subscribed here to prevent router/GW/interface
# deletion if it still has LB service attached to it.
#Note(asarfaty): Those callbacks are used by Octavia as well even
# though they are bound only here
registry.subscribe(self._check_lb_service_on_router,
resources.ROUTER, events.BEFORE_DELETE)
registry.subscribe(self._check_lb_service_on_router,
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.subscribe(self._check_lb_service_on_router_interface,
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
def _unsubscribe_router_delete_callback(self):
registry.unsubscribe(self._check_lb_service_on_router,
resources.ROUTER, events.BEFORE_DELETE)
registry.unsubscribe(self._check_lb_service_on_router,
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.unsubscribe(self._check_lb_service_on_router_interface,
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
def _get_lb_ports(self, context, subnet_ids):
dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2
dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA
filters = {'device_owner': [dev_owner_v2, dev_owner_oct],
'fixed_ips': {'subnet_id': subnet_ids}}
return self.loadbalancer.core_plugin.get_ports(
context, filters=filters)
def _check_lb_service_on_router(self, resource, event, trigger,
payload=None):
"""Prevent removing a router GW or deleting a router used by LB"""
router_id = payload.resource_id
context = payload.context
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id)
if not nsx_router_id:
# Skip non-v3 routers (could be a V router in case of TVD plugin)
return
nsxlib = self.loadbalancer.core_plugin.nsxlib
service_client = nsxlib.load_balancer.service
# Check if there is any lb service on nsx router
lb_service = service_client.get_router_lb_service(nsx_router_id)
if lb_service:
msg = _('Cannot delete a %s as it still has lb service '
'attachment') % resource
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
# Also check if there are any loadbalancers attached to this router
# subnets
core_plugin = self.loadbalancer.core_plugin
router_subnets = core_plugin._load_router_subnet_cidrs_from_db(
context.elevated(), router_id)
subnet_ids = [subnet['id'] for subnet in router_subnets]
if subnet_ids and self._get_lb_ports(context.elevated(), subnet_ids):
msg = (_('Cannot delete a %s as it used by a loadbalancer') %
resource)
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
def _check_lb_service_on_router_interface(
self, resource, event, trigger, payload=None):
# Prevent removing the interface of an LB subnet from a router
router_id = payload.resource_id
subnet_id = payload.metadata.get('subnet_id')
if not router_id or not subnet_id:
return
nsx_router_id = nsx_db.get_nsx_router_id(payload.context.session,
router_id)
if not nsx_router_id:
# Skip non-v3 routers (could be a V router in case of TVD plugin)
return
# get LB ports and check if any loadbalancer is using this subnet
if self._get_lb_ports(payload.context.elevated(), [subnet_id]):
msg = _('Cannot delete a router interface as it used by a '
'loadbalancer')
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
class DummyLoadbalancerDriverV2(object):
@log_helpers.log_method_call
def __init__(self):
self.loadbalancer = NotImplementedManager()
self.listener = NotImplementedManager()
self.pool = NotImplementedManager()
self.member = NotImplementedManager()
self.health_monitor = NotImplementedManager()
self.l7policy = NotImplementedManager()
self.l7rule = NotImplementedManager()
| 44.190955 | 78 | 0.690243 | [
"Apache-2.0"
] | yebinama/vmware-nsx | vmware_nsx/services/lbaas/nsx_v3/v2/lb_driver_v2.py | 8,794 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-26 15:51
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20171126_1058'),
]
operations = [
migrations.RemoveField(
model_name='emitido',
name='data_entrada',
),
migrations.RemoveField(
model_name='recebido',
name='data_entrada',
),
migrations.RemoveField(
model_name='recebido',
name='data_lancamento',
),
migrations.AlterField(
model_name='banco',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='cliente',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='emitido',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 424070, tzinfo=utc)),
),
migrations.AlterField(
model_name='fornecedor',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='recebido',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 424070, tzinfo=utc)),
),
]
| 32.381818 | 108 | 0.585065 | [
"MIT"
] | gabrielnaoto/checkapp | check/core/migrations/0010_auto_20171126_1351.py | 1,781 | Python |
A, B, C, D = map(int, input().split())
s1 = set(range(A, B + 1))
s2 = set(range(C, D + 1))
print(len(s1) * len(s2) - len(s1.intersection(s2)))
| 20.714286 | 51 | 0.551724 | [
"MIT"
] | c-yan/yukicoder | yc208/799.py | 145 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| 40.028953 | 95 | 0.587492 | [
"MIT"
] | 227514/Supervised-Simultaneous-MT | fairseq/tasks/translation.py | 17,973 | Python |
from typing import List, Optional
import scrapy
from scrapy import Item
from jedeschule.items import School
from jedeschule.spiders.school_spider import SchoolSpider
def first_or_none(item: List) -> Optional[str]:
try:
return item[0]
except IndexError:
return None
class BrandenburgSpider(SchoolSpider):
name = "brandenburg"
start_urls = ['https://bildung-brandenburg.de/schulportraets/index.php?id=uebersicht']
def parse(self, response):
for link in response.xpath('/html/body/div/div[5]/div[2]/div/div[2]/table/tbody/tr/td/a/@href').getall():
yield scrapy.Request(response.urljoin(link), callback=self.parse_details)
def parse_details(self, response):
table = response.xpath('//*[@id="c"]/div/table')
data = {
# extract the school ID from the URL
'id': response.url.rsplit('=', 1)[1],
'data_url': response.url
}
for tr in table.css('tr:not(:first-child)'):
key = tr.css('th ::text').get().replace(':', '').strip()
value = tr.css('td ::text').getall()
data[key] = [self.fix_data(part) for part in value]
yield data
def fix_data(self, string):
"""
fix wrong tabs, spaces and backslashes
fix @ in email addresses
"""
if string is None:
return None
string = ' '.join(string.split())
return string.replace('\\', '').replace('|at|','@').strip()
@staticmethod
def normalize(item: Item) -> School:
*name, street, place = item.get('Adresse')
zip_code, *city_parts = place.split(" ")
return School(name=' '.join(name),
id='BB-{}'.format(item.get('id')),
address=street,
zip=zip_code,
city=' '.join(city_parts),
website=first_or_none(item.get('Internet')),
email=first_or_none(item.get('E-Mail')),
school_type=first_or_none(item.get('Schulform')),
provider=first_or_none(item.get('Schulamt')),
fax=first_or_none(item.get('Fax')),
phone=first_or_none(item.get('Telefon')),
director=first_or_none(item.get('Schulleiter/in')))
| 36.953125 | 113 | 0.556871 | [
"MIT"
] | MartinGer/jedeschule-scraper | jedeschule/spiders/brandenburg.py | 2,365 | Python |
#!/usr/bin/env python
"""Base test classes for API handlers tests."""
# pylint:mode=test
import json
import logging
import os
import threading
import portpicker
import requests
from google.protobuf import json_format
from grr import gui
from grr_api_client.connectors import http_connector
from grr.gui import api_auth_manager
from grr.gui import api_call_router
from grr.gui import api_value_renderers
from grr.gui import http_api
from grr.gui import wsgiapp_testlib
from grr.lib import flags
from grr.lib import utils
from grr.server import data_store
from grr.test_lib import test_lib
DOCUMENT_ROOT = os.path.join(os.path.dirname(gui.__file__), "static")
_HTTP_ENDPOINTS = {}
_HTTP_ENDPOINTS_LOCK = threading.RLock()
class HttpApiRegressionTestMixinBase(object):
"""Load only API E2E test cases."""
api_version = None
read_from_relational_db = False
_get_connector_lock = threading.RLock()
@staticmethod
def GetConnector(api_version):
if api_version not in [1, 2]:
raise ValueError("api_version may be 1 or 2 only")
with _HTTP_ENDPOINTS_LOCK:
if api_version not in _HTTP_ENDPOINTS:
port = portpicker.PickUnusedPort()
logging.info("Picked free AdminUI port %d.", port)
# Force creation of new APIAuthorizationManager.
api_auth_manager.APIACLInit.InitApiAuthManager()
trd = wsgiapp_testlib.ServerThread(port)
trd.StartAndWaitUntilServing()
_HTTP_ENDPOINTS[api_version] = "http://localhost:%d" % port
return http_connector.HttpConnector(
api_endpoint=_HTTP_ENDPOINTS[api_version])
def setUp(self):
super(HttpApiRegressionTestMixinBase, self).setUp()
self.connector = self.GetConnector(self.__class__.api_version)
if (not getattr(self, "aff4_only_test", False) and
self.__class__.read_from_relational_db):
self.db_config_overrider = test_lib.ConfigOverrider({
"Database.useForReads": True
})
self.db_config_overrider.Start()
else:
self.db_config_overrider = None
def tearDown(self):
super(HttpApiRegressionTestMixinBase, self).tearDown()
if self.db_config_overrider:
self.db_config_overrider.Stop()
def _ParseJSON(self, json_str):
"""Parses response JSON."""
xssi_prefix = ")]}'\n"
if json_str.startswith(xssi_prefix):
json_str = json_str[len(xssi_prefix):]
return json.loads(json_str)
def _PrepareV1Request(self, method, args=None):
"""Prepares API v1 request for a given method and args."""
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
request.url = request.url.replace("/api/v2/", "/api/")
if args and request.data:
body_proto = args.__class__().AsPrimitiveProto()
json_format.Parse(request.data, body_proto)
body_args = args.__class__()
body_args.ParseFromString(body_proto.SerializeToString())
request.data = json.dumps(
api_value_renderers.StripTypeInfo(
api_value_renderers.RenderValue(body_args)),
cls=http_api.JSONEncoderWithRDFPrimitivesSupport)
prepped_request = request.prepare()
return request, prepped_request
def _PrepareV2Request(self, method, args=None):
"""Prepares API v2 request for a given method and args."""
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
prepped_request = request.prepare()
return request, prepped_request
def HandleCheck(self, method_metadata, args=None, replace=None):
"""Does regression check for given method, args and a replace function."""
if not replace:
raise ValueError("replace can't be None")
if self.__class__.api_version == 1:
request, prepped_request = self._PrepareV1Request(
method_metadata.name, args=args)
elif self.__class__.api_version == 2:
request, prepped_request = self._PrepareV2Request(
method_metadata.name, args=args)
else:
raise ValueError("api_version may be only 1 or 2, not %d",
flags.FLAGS.api_version)
session = requests.Session()
response = session.send(prepped_request)
check_result = {
"url": replace(prepped_request.path_url),
"method": request.method
}
if request.data:
request_payload = self._ParseJSON(replace(request.data))
if request_payload:
check_result["request_payload"] = request_payload
if (method_metadata.result_type ==
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE):
check_result["response"] = replace(utils.SmartUnicode(response.content))
else:
check_result["response"] = self._ParseJSON(replace(response.content))
if self.__class__.api_version == 1:
stripped_response = api_value_renderers.StripTypeInfo(
check_result["response"])
if stripped_response != check_result["response"]:
check_result["type_stripped_response"] = stripped_response
return check_result
class HttpApiV1RegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v1 protocol."""
connection_type = "http_v1"
skip_legacy_dynamic_proto_tests = False
api_version = 1
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-docs-examples.json")
class HttpApiV2RegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v2 protocol."""
connection_type = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
class HttpApiV2RelationalDBRegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v2 protocol with Database.useForReads=True."""
read_from_relational_db = True
connection_type = "http_v2_rel_db"
use_golden_files_of = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsEnabled(self):
if not getattr(self, "aff4_only_test", False):
self.assertTrue(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
| 31.14486 | 79 | 0.722731 | [
"Apache-2.0"
] | nickamon/grr | grr/gui/api_regression_http.py | 6,665 | Python |
#encoding=utf-8
from __future__ import unicode_literals
from django.apps import AppConfig
class CourseConfig(AppConfig):
name = 'course'
verbose_name = u"课程管理"
| 17.1 | 39 | 0.760234 | [
"Apache-2.0"
] | wyftddev/MXOline | apps/course/apps.py | 179 | Python |
from pathlib import Path
import unittest
from saw_client import *
from saw_client.llvm import Contract, array, array_ty, void, i32
class ArraySwapContract(Contract):
def specification(self):
a0 = self.fresh_var(i32, "a0")
a1 = self.fresh_var(i32, "a1")
a = self.alloc(array_ty(2, i32),
points_to=array(a0, a1))
self.execute_func(a)
self.points_to(a[0], a1)
self.points_to(a[1], a0)
self.returns(void)
class LLVMArraySwapTest(unittest.TestCase):
def test_llvm_array_swap(self):
connect(reset_server=True)
if __name__ == "__main__": view(LogResults())
bcname = str(Path('tests','saw','test-files', 'llvm_array_swap.bc'))
mod = llvm_load_module(bcname)
result = llvm_verify(mod, 'array_swap', ArraySwapContract())
self.assertIs(result.is_success(), True)
if __name__ == "__main__":
unittest.main()
| 27.794118 | 76 | 0.639153 | [
"BSD-3-Clause"
] | GaloisInc/saw-script | saw-remote-api/python/tests/saw/test_llvm_array_swap.py | 945 | Python |
import docker
if __name__ == '__main__':
client = docker.from_env()
i = -1
name = 'evtd_'
while(True):
try:
i += 1
container = client.containers.get('{}{}'.format(name,i))
print(container.logs(tail=1))
# container.stop()
# container.remove()
# print('free {}{} succeed'.format(name, i))
except docker.errors.NotFound:
if(i >= 10):
break
| 26.111111 | 68 | 0.485106 | [
"MIT"
] | Laighno/evt | nettests/monitor.py | 470 | Python |
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
| 31.492908 | 99 | 0.596329 | [
"MIT"
] | fariasfc/solo-learn | solo/utils/classification_dataloader.py | 8,881 | Python |
import collections
class Solution:
"""
@param board: a board
@param click: the position
@return: the new board
"""
def updateBoard(self, board, click):
# Write your code here
b = []
for s in board:
temp = []
for c in s:
temp.append(c)
b.append(temp)
row, col = click
if b[row][col] == 'M':
b[row][col] = 'X'
else:
m, n = len(board), len(board[0])
Q = collections.deque([(row, col)])
b[row][col] = 'B'
while Q:
r, c = Q.popleft()
count = 0
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'M':
count += 1
if count > 0:
b[r][c] = str(count)
else:
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'E':
Q.append((nr, nc))
b[nr][nc] = 'B'
return [''.join(row) for row in b]
| 34.794872 | 121 | 0.342668 | [
"MIT"
] | jiadaizhao/LintCode | 1101-1200/1189-Minesweeper/1189-Minesweeper.py | 1,357 | Python |
#!/usr/bin/env python
#
#===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*--#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===-----------------------------------------------------------------------===#
from __future__ import print_function
import argparse
import collections
import difflib
import json
import logging
import os
import re
#===-----------------------------------------------------------------------===#
# These data structures represent a deserialized ExplodedGraph.
#===-----------------------------------------------------------------------===#
# A helper function for finding the difference between two dictionaries.
def diff_dicts(curr, prev):
removed = [k for k in prev if k not in curr or curr[k] != prev[k]]
added = [k for k in curr if k not in prev or curr[k] != prev[k]]
return (removed, added)
# Represents any program state trait that is a dictionary of key-value pairs.
class GenericMap(object):
def __init__(self, items):
self.generic_map = collections.OrderedDict(items)
def diff(self, prev):
return diff_dicts(self.generic_map, prev.generic_map)
def is_different(self, prev):
removed, added = self.diff(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized source location.
class SourceLocation(object):
def __init__(self, json_loc):
super(SourceLocation, self).__init__()
logging.debug('json: %s' % json_loc)
self.line = json_loc['line']
self.col = json_loc['column']
self.filename = os.path.basename(json_loc['file']) \
if 'file' in json_loc else '(main file)'
self.spelling = SourceLocation(json_loc['spelling']) \
if 'spelling' in json_loc else None
def is_macro(self):
return self.spelling is not None
# A deserialized program point.
class ProgramPoint(object):
def __init__(self, json_pp):
super(ProgramPoint, self).__init__()
self.kind = json_pp['kind']
self.tag = json_pp['tag']
self.node_id = json_pp['node_id']
self.is_sink = bool(json_pp['is_sink'])
self.has_report = bool(json_pp['has_report'])
if self.kind == 'Edge':
self.src_id = json_pp['src_id']
self.dst_id = json_pp['dst_id']
elif self.kind == 'Statement':
logging.debug(json_pp)
self.stmt_kind = json_pp['stmt_kind']
self.cast_kind = json_pp['cast_kind'] \
if 'cast_kind' in json_pp else None
self.stmt_point_kind = json_pp['stmt_point_kind']
self.stmt_id = json_pp['stmt_id']
self.pointer = json_pp['pointer']
self.pretty = json_pp['pretty']
self.loc = SourceLocation(json_pp['location']) \
if json_pp['location'] is not None else None
elif self.kind == 'BlockEntrance':
self.block_id = json_pp['block_id']
# A single expression acting as a key in a deserialized Environment.
class EnvironmentBindingKey(object):
def __init__(self, json_ek):
super(EnvironmentBindingKey, self).__init__()
# CXXCtorInitializer is not a Stmt!
self.stmt_id = json_ek['stmt_id'] if 'stmt_id' in json_ek \
else json_ek['init_id']
self.pretty = json_ek['pretty']
self.kind = json_ek['kind'] if 'kind' in json_ek else None
def _key(self):
return self.stmt_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# Deserialized description of a location context.
class LocationContext(object):
def __init__(self, json_frame):
super(LocationContext, self).__init__()
self.lctx_id = json_frame['lctx_id']
self.caption = json_frame['location_context']
self.decl = json_frame['calling']
self.loc = SourceLocation(json_frame['location']) \
if json_frame['location'] is not None else None
def _key(self):
return self.lctx_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A group of deserialized Environment bindings that correspond to a specific
# location context.
class EnvironmentFrame(object):
def __init__(self, json_frame):
super(EnvironmentFrame, self).__init__()
self.location_context = LocationContext(json_frame)
self.bindings = collections.OrderedDict(
[(EnvironmentBindingKey(b),
b['value']) for b in json_frame['items']]
if json_frame['items'] is not None else [])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized Environment. This class can also hold other entities that
# are similar to Environment, such as Objects Under Construction.
class GenericEnvironment(object):
def __init__(self, json_e):
super(GenericEnvironment, self).__init__()
self.frames = [EnvironmentFrame(f) for f in json_e]
def diff_frames(self, prev):
# TODO: It's difficult to display a good diff when frame numbers shift.
if len(self.frames) != len(prev.frames):
return None
updated = []
for i in range(len(self.frames)):
f = self.frames[i]
prev_f = prev.frames[i]
if f.location_context == prev_f.location_context:
if f.is_different(prev_f):
updated.append(i)
else:
# We have the whole frame replaced with another frame.
# TODO: Produce a nice diff.
return None
# TODO: Add support for added/removed.
return updated
def is_different(self, prev):
updated = self.diff_frames(prev)
return updated is None or len(updated) > 0
# A single binding key in a deserialized RegionStore cluster.
class StoreBindingKey(object):
def __init__(self, json_sk):
super(StoreBindingKey, self).__init__()
self.kind = json_sk['kind']
self.offset = json_sk['offset']
def _key(self):
return (self.kind, self.offset)
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A single cluster of the deserialized RegionStore.
class StoreCluster(object):
def __init__(self, json_sc):
super(StoreCluster, self).__init__()
self.base_region = json_sc['cluster']
self.bindings = collections.OrderedDict(
[(StoreBindingKey(b), b['value']) for b in json_sc['items']])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized RegionStore.
class Store(object):
def __init__(self, json_s):
super(Store, self).__init__()
self.ptr = json_s['pointer']
self.clusters = collections.OrderedDict(
[(c['pointer'], StoreCluster(c)) for c in json_s['items']])
def diff_clusters(self, prev):
removed = [k for k in prev.clusters if k not in self.clusters]
added = [k for k in self.clusters if k not in prev.clusters]
updated = [k for k in prev.clusters if k in self.clusters
and prev.clusters[k].is_different(self.clusters[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_clusters(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# Deserialized messages from a single checker in a single program state.
# Basically a list of raw strings.
class CheckerLines(object):
def __init__(self, json_lines):
super(CheckerLines, self).__init__()
self.lines = json_lines
def diff_lines(self, prev):
lines = difflib.ndiff(prev.lines, self.lines)
return [l.strip() for l in lines
if l.startswith('+') or l.startswith('-')]
def is_different(self, prev):
return len(self.diff_lines(prev)) > 0
# Deserialized messages of all checkers, separated by checker.
class CheckerMessages(object):
def __init__(self, json_m):
super(CheckerMessages, self).__init__()
self.items = collections.OrderedDict(
[(m['checker'], CheckerLines(m['messages'])) for m in json_m])
def diff_messages(self, prev):
removed = [k for k in prev.items if k not in self.items]
added = [k for k in self.items if k not in prev.items]
updated = [k for k in prev.items if k in self.items
and prev.items[k].is_different(self.items[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_messages(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# A deserialized program state.
class ProgramState(object):
def __init__(self, state_id, json_ps):
super(ProgramState, self).__init__()
logging.debug('Adding ProgramState ' + str(state_id))
if json_ps is None:
json_ps = {
'store': None,
'environment': None,
'constraints': None,
'dynamic_types': None,
'constructing_objects': None,
'checker_messages': None
}
self.state_id = state_id
self.store = Store(json_ps['store']) \
if json_ps['store'] is not None else None
self.environment = \
GenericEnvironment(json_ps['environment']['items']) \
if json_ps['environment'] is not None else None
self.constraints = GenericMap([
(c['symbol'], c['range']) for c in json_ps['constraints']
]) if json_ps['constraints'] is not None else None
self.dynamic_types = GenericMap([
(t['region'], '%s%s' % (t['dyn_type'],
' (or a sub-class)'
if t['sub_classable'] else ''))
for t in json_ps['dynamic_types']]) \
if json_ps['dynamic_types'] is not None else None
self.constructing_objects = \
GenericEnvironment(json_ps['constructing_objects']) \
if json_ps['constructing_objects'] is not None else None
self.checker_messages = CheckerMessages(json_ps['checker_messages']) \
if json_ps['checker_messages'] is not None else None
# A deserialized exploded graph node. Has a default constructor because it
# may be referenced as part of an edge before its contents are deserialized,
# and in this moment we already need a room for predecessors and successors.
class ExplodedNode(object):
def __init__(self):
super(ExplodedNode, self).__init__()
self.predecessors = []
self.successors = []
def construct(self, node_id, json_node):
logging.debug('Adding ' + node_id)
self.ptr = node_id[4:]
self.points = [ProgramPoint(p) for p in json_node['program_points']]
self.node_id = self.points[-1].node_id
self.state = ProgramState(json_node['state_id'],
json_node['program_state']
if json_node['program_state'] is not None else None);
assert self.node_name() == node_id
def node_name(self):
return 'Node' + self.ptr
# A deserialized ExplodedGraph. Constructed by consuming a .dot file
# line-by-line.
class ExplodedGraph(object):
# Parse .dot files with regular expressions.
node_re = re.compile(
'^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$')
edge_re = re.compile(
'^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$')
def __init__(self):
super(ExplodedGraph, self).__init__()
self.nodes = collections.defaultdict(ExplodedNode)
self.root_id = None
self.incomplete_line = ''
def add_raw_line(self, raw_line):
if raw_line.startswith('//'):
return
# Allow line breaks by waiting for ';'. This is not valid in
# a .dot file, but it is useful for writing tests.
if len(raw_line) > 0 and raw_line[-1] != ';':
self.incomplete_line += raw_line
return
raw_line = self.incomplete_line + raw_line
self.incomplete_line = ''
# Apply regexps one by one to see if it's a node or an edge
# and extract contents if necessary.
logging.debug('Line: ' + raw_line)
result = self.edge_re.match(raw_line)
if result is not None:
logging.debug('Classified as edge line.')
pred = result.group(1)
succ = result.group(2)
self.nodes[pred].successors.append(succ)
self.nodes[succ].predecessors.append(pred)
return
result = self.node_re.match(raw_line)
if result is not None:
logging.debug('Classified as node line.')
node_id = result.group(1)
if len(self.nodes) == 0:
self.root_id = node_id
# Note: when writing tests you don't need to escape everything,
# even though in a valid dot file everything is escaped.
node_label = result.group(2).replace('\\l', '') \
.replace(' ', '') \
.replace('\\"', '"') \
.replace('\\{', '{') \
.replace('\\}', '}') \
.replace('\\\\', '\\') \
.replace('\\|', '|') \
.replace('\\<', '\\\\<') \
.replace('\\>', '\\\\>') \
.rstrip(',')
logging.debug(node_label)
json_node = json.loads(node_label)
self.nodes[node_id].construct(node_id, json_node)
return
logging.debug('Skipping.')
#===-----------------------------------------------------------------------===#
# Visitors traverse a deserialized ExplodedGraph and do different things
# with every node and edge.
#===-----------------------------------------------------------------------===#
# A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
# syntax highlighing.
class DotDumpVisitor(object):
def __init__(self, do_diffs, dark_mode, gray_mode,
topo_mode, dump_dot_only):
super(DotDumpVisitor, self).__init__()
self._do_diffs = do_diffs
self._dark_mode = dark_mode
self._gray_mode = gray_mode
self._topo_mode = topo_mode
self._dump_dot_only = dump_dot_only
self._output = []
def _dump_raw(self, s):
if self._dump_dot_only:
print(s, end='')
else:
self._output.append(s)
def output(self):
assert not self._dump_dot_only
return ''.join(self._output)
def _dump(self, s):
s = s.replace('&', '&') \
.replace('{', '\\{') \
.replace('}', '\\}') \
.replace('\\<', '<') \
.replace('\\>', '>') \
.replace('\\l', '<br />') \
.replace('|', '\\|')
if self._gray_mode:
s = re.sub(r'<font color="[a-z0-9]*">', '', s)
s = re.sub(r'</font>', '', s)
self._dump_raw(s)
@staticmethod
def _diff_plus_minus(is_added):
if is_added is None:
return ''
if is_added:
return '<font color="forestgreen">+</font>'
return '<font color="red">-</font>'
@staticmethod
def _short_pretty(s):
if s is None:
return None
if len(s) < 20:
return s
left = s.find('{')
right = s.rfind('}')
if left == -1 or right == -1 or left >= right:
return s
candidate = s[0:left + 1] + ' ... ' + s[right:]
if len(candidate) >= len(s):
return s
return candidate
@staticmethod
def _make_sloc(loc):
if loc is None:
return '<i>Invalid Source Location</i>'
def make_plain_loc(loc):
return '%s:<b>%s</b>:<b>%s</b>' \
% (loc.filename, loc.line, loc.col)
if loc.is_macro():
return '%s <font color="royalblue1">' \
'(<i>spelling at </i> %s)</font>' \
% (make_plain_loc(loc), make_plain_loc(loc.spelling))
return make_plain_loc(loc)
def visit_begin_graph(self, graph):
self._graph = graph
self._dump_raw('digraph "ExplodedGraph" {\n')
if self._dark_mode:
self._dump_raw('bgcolor="gray10";\n')
self._dump_raw('label="";\n')
def visit_program_point(self, p):
if p.kind in ['Edge', 'BlockEntrance', 'BlockExit']:
color = 'gold3'
elif p.kind in ['PreStmtPurgeDeadSymbols',
'PostStmtPurgeDeadSymbols']:
color = 'red'
elif p.kind in ['CallEnter', 'CallExitBegin', 'CallExitEnd']:
color = 'dodgerblue' if self._dark_mode else 'blue'
elif p.kind in ['Statement']:
color = 'cyan4'
else:
color = 'forestgreen'
self._dump('<tr><td align="left">%s.</td>' % p.node_id)
if p.kind == 'Statement':
# This avoids pretty-printing huge statements such as CompoundStmt.
# Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols
skip_pretty = 'PurgeDeadSymbols' in p.stmt_point_kind
stmt_color = 'cyan3'
self._dump('<td align="left" width="0">%s:</td>'
'<td align="left" width="0"><font color="%s">'
'%s</font> </td>'
'<td align="left"><i>S%s</i></td>'
'<td align="left"><font color="%s">%s</font></td>'
'<td align="left">%s</td></tr>'
% (self._make_sloc(p.loc), color,
'%s (%s)' % (p.stmt_kind, p.cast_kind)
if p.cast_kind is not None else p.stmt_kind,
p.stmt_id, stmt_color, p.stmt_point_kind,
self._short_pretty(p.pretty)
if not skip_pretty else ''))
elif p.kind == 'Edge':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td><td align="left">'
'[B%d] -\\> [B%d]</td></tr>'
% (color, 'BlockEdge', p.src_id, p.dst_id))
elif p.kind == 'BlockEntrance':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td>'
'<td align="left">[B%d]</td></tr>'
% (color, p.kind, p.block_id))
else:
# TODO: Print more stuff for other kinds of points.
self._dump('<td width="0"></td>'
'<td align="left" width="0" colspan="2">'
'<font color="%s">%s</font></td></tr>'
% (color, p.kind))
if p.tag is not None:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<b>Tag: </b> <font color="crimson">'
'%s</font></td></tr>' % p.tag)
if p.has_report:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="red"><b>Bug Report Attached'
'</b></font></td></tr>')
if p.is_sink:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="cornflowerblue"><b>Sink Node'
'</b></font></td></tr>')
def visit_environment(self, e, prev_e=None):
self._dump('<table border="0">')
def dump_location_context(lc, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><b>%s</b></td>'
'<td align="left" colspan="2">'
'<font color="gray60">%s </font>'
'%s</td></tr>'
% (self._diff_plus_minus(is_added),
lc.caption, lc.decl,
('(%s)' % self._make_sloc(lc.loc))
if lc.loc is not None else ''))
def dump_binding(f, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><i>S%s</i></td>'
'%s'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
b.stmt_id,
'<td align="left"><font color="%s"><i>'
'%s</i></font></td>' % (
'lavender' if self._dark_mode else 'darkgreen',
('(%s)' % b.kind) if b.kind is not None else ' '
),
self._short_pretty(b.pretty), f.bindings[b]))
frames_updated = e.diff_frames(prev_e) if prev_e is not None else None
if frames_updated:
for i in frames_updated:
f = e.frames[i]
prev_f = prev_e.frames[i]
dump_location_context(f.location_context)
bindings_removed, bindings_added = f.diff_bindings(prev_f)
for b in bindings_removed:
dump_binding(prev_f, b, False)
for b in bindings_added:
dump_binding(f, b, True)
else:
for f in e.frames:
dump_location_context(f.location_context)
for b in f.bindings:
dump_binding(f, b)
self._dump('</table>')
def visit_environment_in_state(self, selector, title, s, prev_s=None):
e = getattr(s, selector)
prev_e = getattr(prev_s, selector) if prev_s is not None else None
if e is None and prev_e is None:
return
self._dump('<hr /><tr><td align="left"><b>%s: </b>' % title)
if e is None:
self._dump('<i> Nothing!</i>')
else:
if prev_e is not None:
if e.is_different(prev_e):
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e, prev_e)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e)
self._dump('</td></tr>')
def visit_store(self, s, prev_s=None):
self._dump('<table border="0">')
def dump_binding(s, c, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
s.clusters[c].base_region, b.offset,
'(<i>Default</i>)' if b.kind == 'Default'
else '',
s.clusters[c].bindings[b]))
if prev_s is not None:
clusters_removed, clusters_added, clusters_updated = \
s.diff_clusters(prev_s)
for c in clusters_removed:
for b in prev_s.clusters[c].bindings:
dump_binding(prev_s, c, b, False)
for c in clusters_updated:
bindings_removed, bindings_added = \
s.clusters[c].diff_bindings(prev_s.clusters[c])
for b in bindings_removed:
dump_binding(prev_s, c, b, False)
for b in bindings_added:
dump_binding(s, c, b, True)
for c in clusters_added:
for b in s.clusters[c].bindings:
dump_binding(s, c, b, True)
else:
for c in s.clusters:
for b in s.clusters[c].bindings:
dump_binding(s, c, b)
self._dump('</table>')
def visit_store_in_state(self, s, prev_s=None):
st = s.store
prev_st = prev_s.store if prev_s is not None else None
if st is None and prev_st is None:
return
self._dump('<hr /><tr><td align="left"><b>Store: </b>')
if st is None:
self._dump('<i> Nothing!</i>')
else:
if self._dark_mode:
self._dump(' <font color="gray30">(%s)</font>' % st.ptr)
else:
self._dump(' <font color="gray">(%s)</font>' % st.ptr)
if prev_st is not None:
if s.store.is_different(prev_st):
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st, prev_st)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st)
self._dump('</td></tr>')
def visit_generic_map(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_pair(m, k, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
k, m.generic_map[k]))
if prev_m is not None:
removed, added = m.diff(prev_m)
for k in removed:
dump_pair(prev_m, k, False)
for k in added:
dump_pair(m, k, True)
else:
for k in m.generic_map:
dump_pair(m, k, None)
self._dump('</table>')
def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
m = getattr(s, selector)
prev_m = getattr(prev_s, selector) if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>%s: </b>' % title)
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m)
self._dump('</td></tr>')
def visit_checker_messages(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_line(l, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added), l))
def dump_chk(chk, is_added=None):
dump_line('<i>%s</i>:' % chk, is_added)
if prev_m is not None:
removed, added, updated = m.diff_messages(prev_m)
for chk in removed:
dump_chk(chk, False)
for l in prev_m.items[chk].lines:
dump_line(l, False)
for chk in updated:
dump_chk(chk)
for l in m.items[chk].diff_lines(prev_m.items[chk]):
dump_line(l[1:], l.startswith('+'))
for chk in added:
dump_chk(chk, True)
for l in m.items[chk].lines:
dump_line(l, True)
else:
for chk in m.items:
dump_chk(chk)
for l in m.items[chk].lines:
dump_line(l)
self._dump('</table>')
def visit_checker_messages_in_state(self, s, prev_s=None):
m = s.checker_messages
prev_m = prev_s.checker_messages if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>Checker State: </b>')
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m)
self._dump('</td></tr>')
def visit_state(self, s, prev_s):
self.visit_store_in_state(s, prev_s)
self.visit_environment_in_state('environment', 'Expressions',
s, prev_s)
self.visit_generic_map_in_state('constraints', 'Ranges',
s, prev_s)
self.visit_generic_map_in_state('dynamic_types', 'Dynamic Types',
s, prev_s)
self.visit_environment_in_state('constructing_objects',
'Objects Under Construction',
s, prev_s)
self.visit_checker_messages_in_state(s, prev_s)
def visit_node(self, node):
self._dump('%s [shape=record,'
% (node.node_name()))
if self._dark_mode:
self._dump('color="white",fontcolor="gray80",')
self._dump('label=<<table border="0">')
self._dump('<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
% ("gray20" if self._dark_mode else "gray70",
node.state.state_id
if node.state is not None else 'Unspecified'))
if not self._topo_mode:
self._dump('<tr><td align="left" width="0">')
if len(node.points) > 1:
self._dump('<b>Program points:</b></td></tr>')
else:
self._dump('<b>Program point:</b></td></tr>')
self._dump('<tr><td align="left" width="0">'
'<table border="0" align="left" width="0">')
for p in node.points:
self.visit_program_point(p)
self._dump('</table></td></tr>')
if node.state is not None and not self._topo_mode:
prev_s = None
# Do diffs only when we have a unique predecessor.
# Don't do diffs on the leaf nodes because they're
# the important ones.
if self._do_diffs and len(node.predecessors) == 1 \
and len(node.successors) > 0:
prev_s = self._graph.nodes[node.predecessors[0]].state
self.visit_state(node.state, prev_s)
self._dump_raw('</table>>];\n')
def visit_edge(self, pred, succ):
self._dump_raw('%s -> %s%s;\n' % (
pred.node_name(), succ.node_name(),
' [color="white"]' if self._dark_mode else ''
))
def visit_end_of_graph(self):
self._dump_raw('}\n')
if not self._dump_dot_only:
import sys
import tempfile
def write_temp_file(suffix, data):
fd, filename = tempfile.mkstemp(suffix=suffix)
print('Writing "%s"...' % filename)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
print('Done! Please remember to remove the file.')
return filename
try:
import graphviz
except ImportError:
# The fallback behavior if graphviz is not installed!
print('Python graphviz not found. Please invoke')
print(' $ pip install graphviz')
print('in order to enable automatic conversion to HTML.')
print()
print('You may also convert DOT to SVG manually via')
print(' $ dot -Tsvg input.dot -o output.svg')
print()
write_temp_file('.dot', self.output())
return
svg = graphviz.pipe('dot', 'svg', self.output())
filename = write_temp_file(
'.html', '<html><body bgcolor="%s">%s</body></html>' % (
'#1a1a1a' if self._dark_mode else 'white', svg))
if sys.platform == 'win32':
os.startfile(filename)
elif sys.platform == 'darwin':
os.system('open "%s"' % filename)
else:
os.system('xdg-open "%s"' % filename)
#===-----------------------------------------------------------------------===#
# Explorers know how to traverse the ExplodedGraph in a certain order.
# They would invoke a Visitor on every node or edge they encounter.
#===-----------------------------------------------------------------------===#
# BasicExplorer explores the whole graph in no particular order.
class BasicExplorer(object):
def __init__(self):
super(BasicExplorer, self).__init__()
def explore(self, graph, visitor):
visitor.visit_begin_graph(graph)
for node in sorted(graph.nodes):
logging.debug('Visiting ' + node)
visitor.visit_node(graph.nodes[node])
for succ in sorted(graph.nodes[node].successors):
logging.debug('Visiting edge: %s -> %s ' % (node, succ))
visitor.visit_edge(graph.nodes[node], graph.nodes[succ])
visitor.visit_end_of_graph()
#===-----------------------------------------------------------------------===#
# Trimmers cut out parts of the ExplodedGraph so that to focus on other parts.
# Trimmers can be combined together by applying them sequentially.
#===-----------------------------------------------------------------------===#
# SinglePathTrimmer keeps only a single path - the leftmost path from the root.
# Useful when the trimmed graph is still too large.
class SinglePathTrimmer(object):
def __init__(self):
super(SinglePathTrimmer, self).__init__()
def trim(self, graph):
visited_nodes = set()
node_id = graph.root_id
while True:
visited_nodes.add(node_id)
node = graph.nodes[node_id]
if len(node.successors) > 0:
succ_id = node.successors[0]
succ = graph.nodes[succ_id]
node.successors = [succ_id]
succ.predecessors = [node_id]
if succ_id in visited_nodes:
break
node_id = succ_id
else:
break
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
# TargetedTrimmer keeps paths that lead to specific nodes and discards all
# other paths. Useful when you cannot use -trim-egraph (e.g. when debugging
# a crash).
class TargetedTrimmer(object):
def __init__(self, target_nodes):
super(TargetedTrimmer, self).__init__()
self._target_nodes = target_nodes
@staticmethod
def parse_target_node(node, graph):
if node.startswith('0x'):
ret = 'Node' + node
assert ret in graph.nodes
return ret
else:
for other_id in graph.nodes:
other = graph.nodes[other_id]
if other.node_id == int(node):
return other_id
@staticmethod
def parse_target_nodes(target_nodes, graph):
return [TargetedTrimmer.parse_target_node(node, graph)
for node in target_nodes.split(',')]
def trim(self, graph):
queue = self._target_nodes
visited_nodes = set()
while len(queue) > 0:
node_id = queue.pop()
visited_nodes.add(node_id)
node = graph.nodes[node_id]
for pred_id in node.predecessors:
if pred_id not in visited_nodes:
queue.append(pred_id)
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
for node_id in graph.nodes:
node = graph.nodes[node_id]
node.successors = [succ_id for succ_id in node.successors
if succ_id in visited_nodes]
node.predecessors = [succ_id for succ_id in node.predecessors
if succ_id in visited_nodes]
#===-----------------------------------------------------------------------===#
# The entry point to the script.
#===-----------------------------------------------------------------------===#
def main():
parser = argparse.ArgumentParser(
description='Display and manipulate Exploded Graph dumps.')
parser.add_argument('filename', type=str,
help='the .dot file produced by the Static Analyzer')
parser.add_argument('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
default=logging.WARNING,
help='enable info prints')
parser.add_argument('-d', '--diff', action='store_const', dest='diff',
const=True, default=False,
help='display differences between states')
parser.add_argument('-t', '--topology', action='store_const',
dest='topology', const=True, default=False,
help='only display program points, omit states')
parser.add_argument('-s', '--single-path', action='store_const',
dest='single_path', const=True, default=False,
help='only display the leftmost path in the graph '
'(useful for trimmed graphs that still '
'branch too much)')
parser.add_argument('--to', type=str, default=None,
help='only display execution paths from the root '
'to the given comma-separated list of nodes '
'identified by a pointer or a stable ID; '
'compatible with --single-path')
parser.add_argument('--dark', action='store_const', dest='dark',
const=True, default=False,
help='dark mode')
parser.add_argument('--gray', action='store_const', dest='gray',
const=True, default=False,
help='black-and-white mode')
parser.add_argument('--dump-dot-only', action='store_const',
dest='dump_dot_only', const=True, default=False,
help='instead of writing an HTML file and immediately '
'displaying it, dump the rewritten dot file '
'to stdout')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
graph = ExplodedGraph()
with open(args.filename) as fd:
for raw_line in fd:
raw_line = raw_line.strip()
graph.add_raw_line(raw_line)
trimmers = []
if args.to is not None:
trimmers.append(TargetedTrimmer(
TargetedTrimmer.parse_target_nodes(args.to, graph)))
if args.single_path:
trimmers.append(SinglePathTrimmer())
explorer = BasicExplorer()
visitor = DotDumpVisitor(args.diff, args.dark, args.gray, args.topology,
args.dump_dot_only)
for trimmer in trimmers:
trimmer.trim(graph)
explorer.explore(graph, visitor)
if __name__ == '__main__':
main()
| 38.535917 | 79 | 0.522921 | [
"Apache-2.0"
] | 0xmmalik/clang | utils/analyzer/exploded-graph-rewriter.py | 40,771 | Python |
import numpy as np
from pddlgym.core import get_successor_states, InvalidAction
from pddlgym.inference import check_goal
def get_all_reachable(s, A, env, reach=None):
reach = {} if not reach else reach
reach[s] = {}
for a in A:
try:
succ = get_successor_states(s,
a,
env.domain,
raise_error_on_invalid_action=True,
return_probs=True)
except InvalidAction:
succ = {s: 1.0}
reach[s][a] = {s_: prob for s_, prob in succ.items()}
for s_ in succ:
if s_ not in reach:
reach.update(get_all_reachable(s_, A, env, reach))
return reach
def vi(S, succ_states, A, V_i, G_i, goal, env, gamma, epsilon):
V = np.zeros(len(V_i))
P = np.zeros(len(V_i))
pi = np.full(len(V_i), None)
print(len(S), len(V_i), len(G_i), len(P))
print(G_i)
P[G_i] = 1
i = 0
diff = np.inf
while True:
print('Iteration', i, diff)
V_ = np.copy(V)
P_ = np.copy(P)
for s in S:
if check_goal(s, goal):
continue
Q = np.zeros(len(A))
Q_p = np.zeros(len(A))
cost = 1
for i_a, a in enumerate(A):
succ = succ_states[s, a]
probs = np.fromiter(iter(succ.values()), dtype=float)
succ_i = [V_i[succ_s] for succ_s in succ_states[s, a]]
Q[i_a] = cost + np.dot(probs, gamma * V_[succ_i])
Q_p[i_a] = np.dot(probs, P_[succ_i])
V[V_i[s]] = np.min(Q)
P[V_i[s]] = np.max(Q_p)
pi[V_i[s]] = A[np.argmin(Q)]
diff = np.linalg.norm(V_ - V, np.inf)
if diff < epsilon:
break
i += 1
return V, pi
| 29.625 | 75 | 0.477848 | [
"MIT"
] | GCrispino/vi-pddlgym | mdp.py | 1,896 | Python |
# pylint: disable=invalid-name
# Requires Python 3.6+
# Ref: https://www.sphinx-doc.org/en/master/usage/configuration.html
"""Configuration for the Sphinx documentation generator."""
import sys
from functools import partial
from pathlib import Path
from setuptools_scm import get_version
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve() # pylint: disable=no-member
get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, str(PROJECT_ROOT_DIR))
# Make in-tree extension importable in non-tox setups/envs, like RTD.
# Refs:
# https://github.com/readthedocs/readthedocs.org/issues/6311
# https://github.com/readthedocs/readthedocs.org/issues/7182
sys.path.insert(0, str((Path(__file__).parent / '_ext').resolve()))
# -- Project information -----------------------------------------------------
github_url = 'https://github.com'
github_repo_org = 'abhinavsingh'
github_repo_name = 'proxy.py'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
github_sponsors_url = f'{github_url}/sponsors'
project = github_repo_name.title()
author = f'{project} project contributors'
copyright = author # pylint: disable=redefined-builtin
# The short X.Y version
version = '.'.join(
get_scm_version(
local_scheme='no-local-version',
).split('.')[:3],
)
# The full version, including alpha/beta/rc tags
release = get_scm_version()
rst_epilog = f"""
.. |project| replace:: {project}
"""
# -- General configuration ---------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
# Third-party extensions:
'myst_parser', # extended markdown; https://pypi.org/project/myst-parser/
'sphinxcontrib.apidoc',
]
# Conditional third-party extensions:
try:
import sphinxcontrib.spelling as _sphinxcontrib_spelling
except ImportError:
extensions.append('spelling_stub_ext')
else:
del _sphinxcontrib_spelling
extensions.append('sphinxcontrib.spelling')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'changelog-fragments.d/**', # Towncrier-managed change notes
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
html_show_sphinx = True
html_theme_options = {
}
html_context = {
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = f'{project} Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = 'Documentation'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = f'https://{github_repo_name.replace(".", "")}.readthedocs.io/en/latest/'
# The master toctree document.
root_doc = master_doc = 'index' # Sphinx 4+ / 3- # noqa: WPS429
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'myst': ('https://myst-parser.rtfd.io/en/latest', None),
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for sphinxcontrib.apidoc extension ------------------------------
apidoc_excluded_paths = [
'plugin/cache/*',
'testing/*.py',
]
apidoc_extra_args = [
'--implicit-namespaces',
'--private', # include “_private” modules
]
apidoc_module_dir = str(PROJECT_ROOT_DIR / 'proxy')
apidoc_module_first = False
apidoc_output_dir = 'pkg'
apidoc_separate_modules = True
apidoc_toc_file = None
# -- Options for sphinxcontrib.spelling extension ----------------------------
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
# -- Options for extlinks extension ------------------------------------------
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'), # noqa: WPS323
'pr': (f'{github_repo_url}/pull/%s', 'PR #'), # noqa: WPS323
'commit': (f'{github_repo_url}/commit/%s', ''), # noqa: WPS323
'gh': (f'{github_url}/%s', 'GitHub: '), # noqa: WPS323
'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
}
# -- Options for linkcheck builder -------------------------------------------
linkcheck_ignore = [
r'http://localhost:\d+/', # local URLs
]
linkcheck_workers = 25
# -- Options for myst_parser extension ------------------------------------------
myst_enable_extensions = [
'colon_fence', # allow to optionally use ::: instead of ```
'deflist',
'html_admonition', # allow having HTML admonitions
'html_image', # allow HTML <img> in Markdown
# FIXME: `linkify` turns "Proxy.Py` into a link so it's disabled now
# Ref: https://github.com/executablebooks/MyST-Parser/issues/428#issuecomment-970277208
# "linkify", # auto-detect URLs @ plain text, needs myst-parser[linkify]
'replacements', # allows Jinja2-style replacements
'smartquotes', # use "cursive" quotes
'substitution', # replace common ASCII shortcuts into their symbols
]
myst_substitutions = {
'project': project,
}
# -- Strict mode -------------------------------------------------------------
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
nitpicky = True
_any_role = 'any'
_py_obj_role = 'py:obj'
_py_class_role = 'py:class'
nitpick_ignore = [
(_any_role, '<proxy.HttpProxyBasePlugin>'),
(_any_role, '__init__'),
(_any_role, 'Client'),
(_any_role, 'event_queue'),
(_any_role, 'fd_queue'),
(_any_role, 'flag.flags'),
(_any_role, 'flags.work_klass'),
(_any_role, 'flush'),
(_any_role, 'httpx'),
(_any_role, 'HttpParser.state'),
(_any_role, 'HttpProtocolHandler'),
(_any_role, 'multiprocessing.Manager'),
(_any_role, 'proxy.core.base.tcp_upstream.TcpUpstreamConnectionHandler'),
(_any_role, 'work_klass'),
(_py_class_role, '_asyncio.Task'),
(_py_class_role, 'asyncio.events.AbstractEventLoop'),
(_py_class_role, 'CacheStore'),
(_py_class_role, 'HttpParser'),
(_py_class_role, 'HttpProtocolHandlerPlugin'),
(_py_class_role, 'HttpProxyBasePlugin'),
(_py_class_role, 'HttpWebServerBasePlugin'),
(_py_class_role, 'multiprocessing.context.Process'),
(_py_class_role, 'multiprocessing.synchronize.Lock'),
(_py_class_role, 'NonBlockingQueue'),
(_py_class_role, 'paramiko.channel.Channel'),
(_py_class_role, 'proxy.http.parser.parser.T'),
(_py_class_role, 'proxy.plugin.cache.store.base.CacheStore'),
(_py_class_role, 'proxy.core.pool.AcceptorPool'),
(_py_class_role, 'proxy.core.executors.ThreadlessPool'),
(_py_class_role, 'proxy.core.acceptor.threadless.T'),
(_py_class_role, 'queue.Queue[Any]'),
(_py_class_role, 'TcpClientConnection'),
(_py_class_role, 'TcpServerConnection'),
(_py_class_role, 'unittest.case.TestCase'),
(_py_class_role, 'unittest.result.TestResult'),
(_py_class_role, 'UUID'),
(_py_class_role, 'Url'),
(_py_class_role, 'WebsocketFrame'),
(_py_class_role, 'Work'),
(_py_obj_role, 'proxy.core.acceptor.threadless.T'),
]
| 33.427119 | 96 | 0.673461 | [
"BSD-3-Clause"
] | JerryKwan/proxy.py | docs/conf.py | 9,865 | Python |
import pyJHTDB
# M1Q4
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200)
ii.write_coefficients()
# M2Q8
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200, m = 2, n = 3)
ii.write_coefficients()
# M2Q14
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200, m = 2, n = 6)
ii.write_coefficients()
| 22.75 | 87 | 0.785714 | [
"Apache-2.0"
] | idies/pyJHTDB | examples/get_channel_spline_coefficients.py | 364 | Python |
from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from functools import wraps
from flask_mail import Mail, Message
import bcrypt
import re
from validate_email import validate_email
from validate_docbr import CPF
from sqlalchemy.ext.declarative import declarative_base
from flask_marshmallow import Marshmallow
from flask_cors import CORS, cross_origin
from marshmallow import fields
Base = declarative_base()
app = Flask(__name__)
mail= Mail(app)
CORS(app, support_credentials=True)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['SECRET_KEY'] = 'thisissecret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = ''
app.config['MAIL_PASSWORD'] = ''
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.String(50), unique=True)
name = db.Column(db.String(50))
cpf = db.Column(db.String(11))
birthdate = db.Column(db.String(10))
gender = db.Column(db.String(1))
phone = db.Column(db.String(11))
email = db.Column(db.String(50))
password = db.Column(db.String(80))
passwordResetToken = db.Column(db.String(250))
passwordResetExpires = db.Column(db.String(100))
class Product(db.Model, Base):
__tablename__ = 'products'
product_id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(250))
price = db.Column(db.Float)
installments = db.Column(db.Integer)
sizes = db.Column(db.String(50))
availableSizes = db.Column(db.String(50))
gender = db.Column(db.String(1))
material = db.Column(db.String(50))
color = db.Column(db.String(50))
brand = db.Column(db.String(50))
carts = db.relationship('Cart',secondary='cart_products')
class Image(db.Model, Base):
__tablename__ = 'products_imgs'
img_id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String(300))
product_id = db.Column(db.Integer, db.ForeignKey('products.product_id'))
product = db.relationship('Product', backref='images')
class Cart(db.Model, Base):
__tablename__ = 'cart'
cart_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
total_amount = db.Column(db.Float)
create_dttm = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship('User', backref='images')
products = db.relationship('Product', secondary = 'cart_products')
class CP(db.Model, Base):
__tablename__ = 'cart_products'
id = db.Column(db.Integer, primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey('products.product_id'))
cart_id = db.Column(db.Integer, db.ForeignKey('cart.cart_id'))
quantity = db.Column(db.Integer)
size = db.Column(db.String(5))
product = db.relationship(Product, backref=db.backref("cart_products", cascade="all, delete-orphan"))
cart = db.relationship(Cart, backref=db.backref("cart_products", cascade="all, delete-orphan"))
class ImageSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Image
include_fk = True
class ProductSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Product
images = fields.Nested(ImageSchema, many=True, only=['url'])
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message' : 'Token is missing!'}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = User.query.filter_by(public_id=data['public_id']).first()
except:
return jsonify({'message' : 'Token is invalid!'}), 401
return f(current_user, *args, **kwargs)
return decorated
@app.route('/user/<public_id>', methods=['GET'])
@cross_origin(supports_credentials=True)
@token_required
def get_one_user(current_user, public_id):
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'Usuário não encontrado!'}), 400
user_data = {}
user_data['public_id'] = user.public_id
user_data['name'] = user.name
user_data['cpf'] = user.cpf
user_data['birthdate'] = user.birthdate
user_data['gender'] = user.gender
user_data['phone'] = user.phone
user_data['email'] = user.email
return jsonify({'user' : user_data}), 200
@app.route('/users', methods=['POST'])
@cross_origin(supports_credentials=True)
def create_user():
cpf = CPF()
data = request.get_json()
if not all(x.isalpha() or x.isspace() for x in str(data['name'])) or len(str(data['name'])) < 3 or len(str(data['name'])) > 100:
return jsonify({'message' : 'Nome inválido!'}), 400
elif not cpf.validate(str(data['cpf'])):
return jsonify({'message' : 'CPF inválido!'}), 400
elif datetime.date.today().year - datetime.datetime.strptime(str(data['birthdate']), "%d/%m/%Y").year < 18:
return jsonify({'message' : 'Usuário menor de idade!'}), 400
elif str(data['gender']) != "M" and str(data['gender']) != "F":
return jsonify({'message' : 'Gênero inválido!'}), 400
elif not str(data['phone']).isdigit() or len(str(data['phone'])) < 10:
return jsonify({'message' : 'Telefone inválido!'}), 400
elif not validate_email(str(data['email'])):
return jsonify({'message' : 'Email inválido!'}), 400
elif len(str(data['password'])) < 8 or len(str(data['password'])) > 20:
return jsonify({'message' : 'Senha inválida!'}), 400
prospect_cpf = User.query.filter_by(cpf=data['cpf']).first()
prospect_email = User.query.filter_by(email=data['email']).first()
if prospect_cpf:
return jsonify({'message' : 'CPF já cadastrado!'}), 400
elif prospect_email:
return jsonify({'message' : 'Email já cadastrado!'}), 400
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = User(public_id=str(uuid.uuid4()), name=data['name'], cpf=data['cpf'], birthdate=data['birthdate'],
gender=data['gender'], phone=data['phone'], email=data['email'], password=hashed_password, passwordResetToken=None, passwordResetExpires=None)
db.session.add(new_user)
db.session.commit()
return jsonify({'message' : 'Usuário cadastrado com sucesso!'}), 200
@app.route('/users/<public_id>', methods=['DELETE'])
@cross_origin(supports_credentials=True)
@token_required
def delete_user(current_user, public_id):
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'Usuário não encontrado'}), 400
db.session.delete(user)
db.session.commit()
return jsonify({'message' : 'Usuário apagado com sucesso!'}), 200
@app.route('/login', methods=['POST'])
@cross_origin(supports_credentials=True)
def login():
auth = request.get_json()
if not auth or not auth['email'] or not auth['password']:
return jsonify({'message' : 'Email ou senha não foram preenchidos!'}), 401
user = User.query.filter_by(email=auth['email']).first()
if not user:
return jsonify({'message' : 'Email não existe!'}), 401
if check_password_hash(user.password, auth['password']):
token = jwt.encode({'public_id' : user.public_id, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8'), 'id' : user.public_id, 'name' : user.name, 'email' : user.email}), 200
return jsonify({'message' : 'Senha incorreta'}), 401
@app.route("/forgot-password", methods=['POST'])
@cross_origin(supports_credentials=True)
def send_email():
data = request.get_json()
user = User.query.filter_by(email=data['email']).first()
if not user:
return jsonify({'message' : "Email não encontrado!"}), 400
password = str(user.email).encode('UTF-8')
passToken = bcrypt.hashpw(password, bcrypt.gensalt())
passToken = re.sub('\W+','', str(passToken))
passExpires = str(datetime.datetime.utcnow() + datetime.timedelta(minutes=15))
user.passwordResetToken = passToken
user.passwordResetExpires = passExpires
db.session.commit()
msg = Message('Recuperação de senha - Gama Sports', sender = app.config['MAIL_USERNAME'], recipients = [user.email])
msg.body = "Olá " + str(user.email) + ", \n\n" + "Acesse o link a seguir para trocar sua senha: \n\n" + "http://localhost:4200/users/recover-password?token=" + str(passToken)
mail.send(msg)
return jsonify({'message' : "Email disparado!"}), 200
@app.route("/reset-password", methods=['POST'])
@cross_origin(supports_credentials=True)
def change_password():
data = request.get_json()
user = User.query.filter_by(passwordResetToken=str(data['token'])).first()
if not user:
return jsonify({'message' : "Token inválido!"}), 400
date_time_exp = datetime.datetime.strptime(user.passwordResetExpires, '%Y-%m-%d %H:%M:%S.%f')
if datetime.datetime.utcnow() > date_time_exp:
return jsonify({'message' : "Token expirado, gere um novo!"}), 400
if len(str(data['password'])) < 8 or len(str(data['password'])) > 20:
return jsonify({'message' : 'Senha inválida!'}), 400
hashed_newpassword = generate_password_hash(data['password'], method='sha256')
user.password = hashed_newpassword
user.passwordResetToken = None
user.passwordResetExpires = None
db.session.commit()
return jsonify({'message' : "Senha trocada com sucesso!"}), 200
@app.route('/products', methods=['GET'])
@cross_origin(supports_credentials=True)
def get_all_products():
search = request.args.get("search", None)
if not search:
products = Product.query.all()
else:
search = "%{}%".format(search)
products = Product.query.filter(Product.description.like(search)).all()
if not products:
return jsonify([]), 200
product_schema = ProductSchema(many=True)
output = product_schema.dump(products)
return jsonify(output), 200
@app.route('/products/<product_id>', methods=['GET'])
@cross_origin(supports_credentials=True)
def get_product(product_id):
product = Product.query.filter_by(product_id=product_id).first()
if not product:
return jsonify({'message' : 'Produto não encontrado!'}), 400
product_schema = ProductSchema()
output = product_schema.dump(product)
return jsonify(output), 200
@app.route('/cart', methods=['POST'])
@cross_origin(supports_credentials=True)
@token_required
def create_cart(current_user):
data = request.get_json()
cart = Cart(total_amount=data['total'], user_id=data['clientId'])
db.session.add(cart)
db.session.commit()
if not cart:
return jsonify({'message' : 'Problema na inclusão do carrinho'}), 400
for product in data['products']:
if not product:
return jsonify({'message' : 'Problema na inclusão do produto'}), 400
add_product = CP(product_id=product['id'], cart_id=cart.cart_id, quantity=product['quantity'], size=product['size'])
db.session.add(add_product)
db.session.commit()
return jsonify({'message' : 'Carrinho salvo com sucesso!'}), 200
if __name__ == '__main__':
app.run(debug=True) | 34.849558 | 178 | 0.675047 | [
"MIT"
] | lucas-almeida-silva/gama-sports | server/API.py | 11,841 | Python |
from discord.ext import commands
import discord
class EphemeralCounterBot(commands.Bot):
def __init__(self):
super().__init__()
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
# Define a simple View that gives us a counter button
class Counter(discord.ui.View):
# Define the actual button
# When pressed, this increments the number displayed until it hits 5.
# When it hits 5, the counter button is disabled and it turns green.
# note: The name of the function does not matter to the library
@discord.ui.button(label='0', style=discord.ButtonStyle.red)
async def count(self, button: discord.ui.Button, interaction: discord.Interaction):
number = int(button.label) if button.label else 0
if number + 1 >= 5:
button.style = discord.ButtonStyle.green
button.disabled = True
button.label = str(number + 1)
# Make sure to update the message with our updated selves
await interaction.response.edit_message(view=self)
# Define a View that will give us our own personal counter button
class EphemeralCounter(discord.ui.View):
# When this button is pressed, it will respond with a Counter view that will
# give the button presser their own personal button they can press 5 times.
@discord.ui.button(label='Click', style=discord.ButtonStyle.blurple)
async def receive(self, button: discord.ui.Button, interaction: discord.Interaction):
# ephemeral=True makes the message hidden from everyone except the button presser
await interaction.response.send_message('Enjoy!', view=Counter(), ephemeral=True)
bot = EphemeralCounterBot()
@bot.slash()
async def counter(ctx: commands.Context):
"""Starts a counter for pressing."""
await ctx.send('Press!', view=EphemeralCounter())
bot.run('token')
| 39.520833 | 89 | 0.702688 | [
"MIT"
] | NextChai/discord.py | examples/views/ephemeral.py | 1,897 | Python |
"""Test the creation of all inventories."""
import stewi
from stewi.globals import paths, STEWI_VERSION, config
year = 2018
def test_inventory_generation():
# Create new local path
paths.local_path = paths.local_path + "_" + STEWI_VERSION
error_list = []
for inventory in config()['databases']:
# skip RCRAInfo due to browswer download
if inventory in ['RCRAInfo']:
continue
df = stewi.getInventory(inventory, year)
error = df is None
if not error:
error = len(df) == 0
if error:
error_list.append(inventory)
assert len(error_list) == 0, f"Generation of {','.join(error_list)} unsuccessful"
if __name__ == "__main__":
test_inventory_generation()
| 24.612903 | 85 | 0.63827 | [
"CC0-1.0"
] | matthewlchambers/standardizedinventories | tests/test_inventory_generation.py | 763 | Python |
# define a function, which accepts 2 arguments
def cheese_and_crackers(cheese_count, boxes_of_crackers):
# %d is for digit
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
# go to a new line after the end
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
# call the function defined above
# by passing plain numbers,
# also called numeric constants
# or numeric literals
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
# a variable definition
# doesn't need a'def' beforehand
amount_of_cheese = 10
amount_of_crackers = 50
# call (use, invoke, run) the function by passing the above variables
# or vars, for short
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
# python interpreter first calculates the math
# then passes the results as arguments
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
# python substitutes the vars with their values, then does the math,
# and finally passes the calculated results to the function
# literals(consts), variables, math - all those called expressions
# calculating math and substituting var with their vals are called 'expression evaluation'
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
#################################################################
# another way to call a function is using result of calling another function
# which could be a built-in or custom
# also, don't forget about so-called "splats", when a function can accept any amount of args
def pass_any_two(*args):
print "There are %d arguments" % len(args)
print "First: %r" % args[0]
print "Second: %r" % args[1]
return "%r %r" % (args[0], args[1])
# 1: constants
pass_any_two(1, 2)
# 2: variables
first = "f"
second = "s"
pass_any_two(first, second)
# 3: math of consts
pass_any_two(4 + 6, 5 + 8)
# 4: math of vars
a = 5
b = 6
pass_any_two(a + 8, b * 2)
# 5: more than two args
pass_any_two(1, 2, 3, 4)
# 6: built-in function call results
txt = "what is my length?"
pass_any_two(len(txt), txt)
# 7: custom (same) function call results
pass_any_two(0, pass_any_two)
# 8: call by alias (just another name)
pass_any_2 = pass_any_two
pass_any_2("alias", "called")
# 9: call by invoking buil-in __call__ method
pass_any_two.__call__("__call__", "invoked")
# 10: call by passing a list, converted to multiple arguments
pass_any_two(*["list", "converted", 3, 4])
| 27.178947 | 92 | 0.717661 | [
"MIT"
] | python-practice/lpthw | ex19/ex19-sd.py | 2,582 | Python |
from setuptools import setup, find_packages
setup(
name="squirrel-and-friends",
version="0.1",
packages=find_packages(),
install_requires=[
"emoji==0.5.4", "nltk==3.5", "pyspellchecker==0.5.4",
"numerizer==0.1.5", "lightgbm==2.3.1",
"albumentations==0.5.2", "opencv-python==4.5.1.48",
"opencv-python-headless==4.5.1.48",
"torch==1.7.1", "imgaug==0.4.0",
"numpy==1.19.5", "pandas==0.25.1",
"tensorboard==2.4.1", "tensorboard-plugin-wit==1.8.0",
"tensorflow-estimator==2.4.0", "tensorflow-gpu==2.4.1"
]
)
| 32.722222 | 62 | 0.568761 | [
"MIT"
] | JacobXPX/squirrel-and-friends | setup.py | 589 | Python |
#!/usr/bin/env python
import os
import sys
import warnings
if __name__ == "__main__":
here = os.path.dirname(__file__)
there = os.path.join(here, '..')
there = os.path.abspath(there)
sys.path.insert(0, there)
print "NOTE Using jingo_offline_compressor from %s" % there
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 26.111111 | 71 | 0.725532 | [
"BSD-3-Clause"
] | peterbe/django-jingo-offline-compressor | example/manage.py | 470 | Python |
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
| 39.842027 | 79 | 0.625533 | [
"BSD-3-Clause"
] | AbinMM/PYNQ | pynq/lib/logictools/tests/test_fsm_generator.py | 26,734 | Python |
# Bit Manipulation
# Given a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.
#
# Example 1:
#
# Input: ["abcw","baz","foo","bar","xtfn","abcdef"]
# Output: 16
# Explanation: The two words can be "abcw", "xtfn".
# Example 2:
#
# Input: ["a","ab","abc","d","cd","bcd","abcd"]
# Output: 4
# Explanation: The two words can be "ab", "cd".
# Example 3:
#
# Input: ["a","aa","aaa","aaaa"]
# Output: 0
# Explanation: No such pair of words.
class Solution:
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
wordsDict = {}
for word in words:
wordsDict[word] = set(word)
output = 0
for i in range(len(words)):
for j in range(i+1, len(words)):
if not wordsDict[words[i]]&wordsDict[words[j]]:
output = max(output, len(words[i])*len(words[j]))
return output
| 29.513514 | 244 | 0.581502 | [
"MIT"
] | gesuwen/Algorithms | LeetCode/318 Maximum Product of Word Lengths.py | 1,092 | Python |
from flask_mail import Message
from flask import render_template
from . import mail
subject_pref = 'Pitches'
sender_email = "[email protected]"
def mail_message(subject,template,to,**kwargs):
sender_email = '[email protected]'
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email) | 31.928571 | 66 | 0.740492 | [
"MIT"
] | ruthjelimo/Pitch-app | app/email.py | 447 | Python |
"""Test mysensors MQTT gateway with unittest."""
import os
import tempfile
import time
from unittest import TestCase, main, mock
from mysensors import ChildSensor, Sensor
from mysensors.gateway_mqtt import MQTTGateway
class TestMQTTGateway(TestCase):
"""Test the MQTT Gateway."""
def setUp(self):
"""Set up gateway."""
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = MQTTGateway(self.mock_pub, self.mock_sub)
def tearDown(self):
"""Stop MQTTGateway if alive."""
if self.gateway.is_alive():
self.gateway.stop()
def _add_sensor(self, sensorid):
"""Add sensor node. Return sensor node instance."""
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_send(self):
"""Test send method."""
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
def test_send_empty_string(self):
"""Test send method with empty string."""
self.gateway.send('')
self.assertFalse(self.mock_pub.called)
def test_send_error(self):
"""Test send method with error on publish."""
self.mock_pub.side_effect = ValueError(
'Publish topic cannot contain wildcards.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
self.assertEqual(
# only check first line of error log
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Publish to /1/1/1/0/1 failed: '
'Publish topic cannot contain wildcards.')
def test_recv(self):
"""Test recv method."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
def test_recv_wrong_prefix(self):
"""Test recv method with wrong topic prefix."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('wrong/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, None)
def test_presentation(self):
"""Test handle presentation message."""
self._add_sensor(1)
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
def test_presentation_no_sensor(self):
"""Test handle presentation message without sensor."""
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
self.assertFalse(self.mock_sub.called)
def test_subscribe_error(self):
"""Test subscribe throws error."""
self._add_sensor(1)
self.mock_sub.side_effect = ValueError(
'No topic specified, or incorrect topic type.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
self.assertEqual(
# only check first line of error log
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Subscribe to /1/1/1/+/+ failed: '
'No topic specified, or incorrect topic type.')
def test_start_stop_gateway(self):
"""Test start and stop of MQTT gateway."""
self.assertFalse(self.gateway.is_alive())
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.recv('/1/1/1/0/1', '30', 0)
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.start()
self.assertTrue(self.gateway.is_alive())
calls = [
mock.call('/+/+/0/+/+', self.gateway.recv, 0),
mock.call('/+/+/3/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
time.sleep(0.05)
calls = [
mock.call('/1/1/1/0/1', '20', 0, True),
mock.call('/1/1/1/0/1', '30', 0, True)]
self.mock_pub.assert_has_calls(calls)
self.gateway.stop()
self.gateway.join(timeout=0.5)
self.assertFalse(self.gateway.is_alive())
def test_mqtt_load_persistence(self):
"""Test load persistence file for MQTTGateway."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
with tempfile.TemporaryDirectory() as temp_dir:
self.gateway.persistence_file = os.path.join(temp_dir, 'file.json')
# pylint: disable=protected-access
self.gateway._save_sensors()
del self.gateway.sensors[1]
self.assertNotIn(1, self.gateway.sensors)
self.gateway._safe_load_sensors()
self.assertEqual(
self.gateway.sensors[1].children[1].id,
sensor.children[1].id)
self.assertEqual(
self.gateway.sensors[1].children[1].type,
sensor.children[1].type)
self.assertEqual(
self.gateway.sensors[1].children[1].values,
sensor.children[1].values)
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
class TestMQTTGatewayCustomPrefix(TestCase):
"""Test the MQTT Gateway with custom topic prefix."""
def setUp(self):
"""Set up test."""
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = None
def _setup(self, in_prefix, out_prefix):
"""Set up gateway."""
self.gateway = MQTTGateway(
self.mock_pub, self.mock_sub, in_prefix=in_prefix,
out_prefix=out_prefix)
def _add_sensor(self, sensorid):
"""Add sensor node. Return sensor node instance."""
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_nested_prefix(self):
"""Test recv method with nested topic prefix."""
self._setup('test/test-in', 'test/test-out')
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('test/test-in/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('test/test-in/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
if __name__ == '__main__':
main()
| 39.16 | 79 | 0.598698 | [
"MIT"
] | jslove/pymysensors | tests/test_gateway_mqtt.py | 7,832 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'name': 'str'
}
attribute_map = {
'key': 'key',
'name': 'name'
}
def __init__(self, key=None, name=None, local_vars_configuration=None): # noqa: E501
"""IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._name = None
self.discriminator = None
if key is not None:
self.key = key
self.name = name
@property
def key(self):
"""Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
"""
self._key = key
@property
def name(self):
"""Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return True
return self.to_dict() != other.to_dict()
| 34.960526 | 169 | 0.641325 | [
"Apache-2.0"
] | mariusgheorghies/python | kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py | 5,314 | Python |
#
# These are settings for Heroku Production Environment
#
from .common import *
import dj_database_url
# We don't want any debug warnings giving
# away unnecessary information to attackers
DEBUG = False
# We grab the secret key from the environment because it is
# our production key and no can know it
SECRET_KEY = os.environ.get('SECRET_KEY')
# We redirect any http requests to their https equivalents
SECURE_SSL_REDIRECT = True
ALLOWED_HOSTS = ["yefbackend.herokuapp.com", "localhost"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# In a real production environment, we would likely want to
# handle static files on a different machine.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# We let the dj_database_url package pull the database info from heroku
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config(conn_max_age=600, ssl_require=True)
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': DEFAULT_RENDERER_CLASSES
}
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'yefclient.herokuapp.com'
)
| 23.204082 | 73 | 0.761653 | [
"MIT"
] | JumboCode/YEF | backend/src/settings/prod.py | 1,137 | Python |
def metade(x=0):
res = x / 2
return res
def dobro(x=0):
res = 2 * x
return res
def aumentar(x=0, y=0):
res = x * (1 + y / 100)
return res
def reduzir(x=0, y=0):
res = x * (1 - y / 100)
return res
def moeda(x=0, m='R$'):
res = f'{m}{x:.2f}'.replace('.', ',')
return res
| 13.208333 | 41 | 0.473186 | [
"MIT"
] | bernardombraga/Solucoes-exercicios-cursos-gratuitos | Curso-em-video-Python3-mundo3/ex108/moeda.py | 317 | Python |
"""Highlevel API for managing PRs on Github"""
import abc
import logging
from copy import copy
from enum import Enum
from typing import Any, Dict, List, Optional
import gidgethub
import gidgethub.aiohttp
import aiohttp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: State for Github Issues
IssueState = Enum("IssueState", "open closed all") # pylint: disable=invalid-name
class GitHubHandler:
"""Handles interaction with GitHub
Arguments:
token: OAUTH token granting permissions to GH
dry_run: Don't actually modify things if set
to_user: Target User/Org for PRs
to_repo: Target repository within **to_user**
"""
PULLS = "/repos/{user}/{repo}/pulls{/number}{?head,base,state}"
ISSUES = "/repos/{user}/{repo}/issues{/number}"
ORG_MEMBERS = "/orgs/{user}/members{/username}"
STATE = IssueState
def __init__(self, token: str,
dry_run: bool = False,
to_user: str = "bioconda",
to_repo: str = "bioconnda-recipes") -> None:
self.token = token
self.dry_run = dry_run
self.var_default = {'user': to_user,
'repo': to_repo}
# filled in by login():
self.api: gidgethub.abc.GitHubAPI = None
self.username: str = None
@abc.abstractmethod
def create_api_object(self, *args, **kwargs):
"""Create API object"""
def get_file_relurl(self, path: str, branch_name: str = "master") -> str:
"""Format domain relative url for **path** on **branch_name**"""
return "/{user}/{repo}/tree/{branch_name}/{path}".format(
branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs):
"""Log into API (fills `self.username`)"""
self.create_api_object(*args, **kwargs)
if not self.token:
self.username = "UNKNOWN [no token]"
else:
user = await self.api.getitem("/user")
self.username = user["login"]
async def is_member(self, username) -> bool:
"""Check if **username** is member of current org"""
if not username:
return False
var_data = copy(self.var_default)
var_data['username'] = username
try:
await self.api.getitem(self.ORG_MEMBERS, var_data)
except gidgethub.BadRequest:
logger.debug("User %s is not a member of %s", username, var_data['user'])
return False
logger.debug("User %s IS a member of %s", username, var_data['user'])
return True
# pylint: disable=too-many-arguments
async def get_prs(self,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = None,
number: Optional[int] = None,
state: Optional[IssueState] = None) -> List[Dict[Any, Any]]:
"""Retrieve list of PRs matching parameters
Arguments:
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
(default: from auth)
to_branch: Name of branch into which to pull (default: master)
number: PR number
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
if from_branch:
if from_user:
var_data['head'] = f"{from_user}:{from_branch}"
else:
var_data['head'] = from_branch
if to_branch:
var_data['base'] = to_branch
if number:
var_data['number'] = str(number)
if state:
var_data['state'] = state.name.lower()
return await self.api.getitem(self.PULLS, var_data)
# pylint: disable=too-many-arguments
async def create_pr(self, title: str,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = "master",
body: Optional[str] = None,
maintainer_can_modify: bool = True) -> Dict[Any, Any]:
"""Create new PR
Arguments:
title: Title of new PR
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
to_branch: Name of branch into which to pull (default: master)
body: Body text of PR
maintainer_can_modify: Whether to allow maintainer to modify from_branch
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
data: Dict[str, Any] = {'title': title,
'body': '',
'maintainer_can_modify': maintainer_can_modify}
if body:
data['body'] += body
if from_branch:
if from_user and from_user != self.username:
data['head'] = f"{from_user}:{from_branch}"
else:
data['head'] = from_branch
if to_branch:
data['base'] = to_branch
logger.debug("PR data %s", data)
if self.dry_run:
logger.info("Would create PR '%s'", title)
return {'number': -1}
logger.info("Creating PR '%s'", title)
return await self.api.post(self.PULLS, var_data, data=data)
async def modify_issue(self, number: int,
labels: Optional[List[str]] = None,
title: Optional[str] = None,
body: Optional[str] = None) -> Dict[Any, Any]:
"""Modify existing issue (PRs are issues)
Arguments:
labels: list of labels to assign to issue
title: new title
body: new body
"""
var_data = copy(self.var_default)
var_data["number"] = str(number)
data: Dict[str, Any] = {}
if labels:
data['labels'] = labels
if title:
data['title'] = title
if body:
data['body'] = body
if self.dry_run:
logger.info("Would modify PR %s", number)
if title:
logger.info("New title: %s", title)
if labels:
logger.info("New labels: %s", labels)
if body:
logger.info("New Body:\n%s\n", body)
return {'number': number}
logger.info("Modifying PR %s", number)
return await self.api.patch(self.ISSUES, var_data, data=data)
class AiohttpGitHubHandler(GitHubHandler):
"""GitHubHandler using Aiohttp for HTTP requests
Arguments:
session: Aiohttp Client Session object
requester: Identify self (e.g. user agent)
"""
def create_api_object(self, session: aiohttp.ClientSession,
requester: str, *args, **kwargs) -> None:
self.api = gidgethub.aiohttp.GitHubAPI(
session, requester, oauth_token=self.token
)
| 35.078431 | 85 | 0.55981 | [
"MIT"
] | erictleung/bioconda-utils | bioconda_utils/githubhandler.py | 7,156 | Python |
import re
from ._video import Video
from ._channel import Channel
from ._playlist import Playlist
from ._videobulk import _VideoBulk
from ._channelbulk import _ChannelBulk
from ._playlistbulk import _PlaylistBulk
from ._auxiliary import _parser, _filter, _src
class Search:
def __init__(self):
pass
@staticmethod
def video(keywords: str):
"""
:return: < video object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
video_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
return Video(video_ids[0]) if video_ids else None
@staticmethod
def channel(keywords: str):
"""
:return: < channel object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
channel_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
return Channel(channel_ids[0]) if channel_ids else None
@staticmethod
def videos(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total number of videos to be searched
:return: list of < video object > of each video regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
raw_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _VideoBulk(pureList) if pureList else None
@staticmethod
def channels(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total number of channels to be searched
:return: list of < channel object > of each video regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
raw_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _ChannelBulk(pureList) if pureList else None
@staticmethod
def playlist(keywords: str):
"""
:return: < playlist object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
return Playlist(found[0]) if found else None
@staticmethod
def playlists(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total playlists be searched
:return: list of < playlist object > of each playlist regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
pure = _filter(limit=limit, iterable=found)
return _PlaylistBulk(pure) if pure else None
| 40.435897 | 107 | 0.640457 | [
"MIT"
] | SlumberDemon/AioTube | src/_query.py | 3,154 | Python |
#-*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from climatespider.items import ClimatespiderItem
from scrapy.selector import Selector
from dateutil.parser import parse
import re
import datetime
from scrapy.exceptions import CloseSpider
def getyesterdaty():
today_date = datetime.date.today()
yesterday_date = today_date - datetime.timedelta(days=1)
return yesterday_date.strftime('%Y/%m/%d')
class wugSpider(CrawlSpider):
name = "WUGCrawlSpider_AO"
#today_date = datetime.now().strftime('%Y/%m/%d')
allowed_domains = ['www.wunderground.com']
start_urls = [
'https://www.wunderground.com/history/airport/ZBAA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54618/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBTJ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBYN/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSSS/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50888/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50136/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYHB/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50854/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSOF/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZLXY/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54602/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/VMMC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54401/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58506/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGHA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSHC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZHHH/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58606/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGGG/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGSZ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/53798/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYTL/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZUUU/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50774/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50949/{0}/DailyHistory.html'.format(getyesterdaty())
]
def parse(self, response):
sel = Selector(response)
indexlist = list(map(lambda x: x.replace(' ','').replace('.',''),sel.xpath('//table[@id="obsTable"]/thead/tr/th/text()').extract()))
date = re.match(r'.*(\d{4}\/\d{1,2}\/\d{1,2}).*', response.url).group(1)
datatable = sel.xpath('//tr[@class="no-metars"]')
# items = []
for each in datatable:
item = ClimatespiderItem()
item['area'] = re.match(r'.*history/(.*)/2\d{3}/.*', response.url).group(1)
# item['date'] = date
if len(indexlist) == 13:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[5]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[8]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[11]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[12]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[13]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-','')
try:
item['qx_DewPoint'] = each.xpath('td[4]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[4]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Pressure'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Visibility'] = each.xpath('td[7]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[7]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-','')
try:
item['qx_GustSpeed'] = each.xpath('td[10]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[10]/text()').extract()[0].strip().replace('-','')
yield item
else:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[4]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[7]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[10]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[11]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[12]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
# try:
# item['WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
# except Exception as e:
# item['WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_DewPoint'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Pressure'] = each.xpath('td[5]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[5]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Visibility'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_WindSpeed'] = each.xpath('td[8]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[8]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_GustSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-', '')
yield item
# for index in range(len(indexlist)):
| 66.274648 | 140 | 0.568165 | [
"Apache-2.0"
] | burnman108/climateSpider | climatespider/climatespider/spiders/AO_wugspider.py | 9,411 | Python |
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.api_client import ApiClient, Endpoint as _Endpoint
from vrchatapi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from vrchatapi.model.create_world_request import CreateWorldRequest
from vrchatapi.model.error import Error
from vrchatapi.model.instance import Instance
from vrchatapi.model.limited_world import LimitedWorld
from vrchatapi.model.update_world_request import UpdateWorldRequest
from vrchatapi.model.world import World
from vrchatapi.model.world_metadata import WorldMetadata
from vrchatapi.model.world_publish_status import WorldPublishStatus
class WorldsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [],
'endpoint_path': '/worlds',
'operation_id': 'create_world',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_world_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_world_request':
(CreateWorldRequest,),
},
'attribute_map': {
},
'location_map': {
'create_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'delete_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_active_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/active',
'operation_id': 'get_active_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_favorited_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/favorites',
'operation_id': 'get_favorited_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_recent_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/recent',
'operation_id': 'get_recent_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'get_world',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_instance_endpoint = _Endpoint(
settings={
'response_type': (Instance,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/{instanceId}',
'operation_id': 'get_world_instance',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
'instance_id',
],
'required': [
'world_id',
'instance_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'instance_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
'instance_id': 'instanceId',
},
'location_map': {
'world_id': 'path',
'instance_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_metadata_endpoint = _Endpoint(
settings={
'response_type': (WorldMetadata,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/metadata',
'operation_id': 'get_world_metadata',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_publish_status_endpoint = _Endpoint(
settings={
'response_type': (WorldPublishStatus,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'get_world_publish_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.publish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'publish_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.search_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds',
'operation_id': 'search_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'user',
'user_id',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'user',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('user',): {
"ME": "me"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'user':
(str,),
'user_id':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'user': 'user',
'user_id': 'userId',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'user': 'query',
'user_id': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.unpublish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'unpublish_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'update_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
'update_world_request',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'update_world_request':
(UpdateWorldRequest,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
'update_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_world(
self,
**kwargs
):
"""Create World # noqa: E501
Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_world(async_req=True)
>>> result = thread.get()
Keyword Args:
create_world_request (CreateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.create_world_endpoint.call_with_http_info(**kwargs)
def delete_world(
self,
world_id,
**kwargs
):
"""Delete World # noqa: E501
Delete a world. Notice a world is never fully \"deleted\", only its ReleaseStatus is set to \"hidden\" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.delete_world_endpoint.call_with_http_info(**kwargs)
def get_active_worlds(
self,
**kwargs
):
"""List Active Worlds # noqa: E501
Search and list currently Active worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_active_worlds_endpoint.call_with_http_info(**kwargs)
def get_favorited_worlds(
self,
**kwargs
):
"""List Favorited Worlds # noqa: E501
Search and list favorited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_favorited_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_favorited_worlds_endpoint.call_with_http_info(**kwargs)
def get_recent_worlds(
self,
**kwargs
):
"""List Recent Worlds # noqa: E501
Search and list recently visited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_recent_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_recent_worlds_endpoint.call_with_http_info(**kwargs)
def get_world(
self,
world_id,
**kwargs
):
"""Get World by ID # noqa: E501
Get information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_endpoint.call_with_http_info(**kwargs)
def get_world_instance(
self,
world_id,
instance_id,
**kwargs
):
"""Get World Instance # noqa: E501
Returns a worlds instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_instance(world_id, instance_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
instance_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Instance
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
kwargs['instance_id'] = \
instance_id
return self.get_world_instance_endpoint.call_with_http_info(**kwargs)
def get_world_metadata(
self,
world_id,
**kwargs
):
"""Get World Metadata # noqa: E501
Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_metadata(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldMetadata
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_metadata_endpoint.call_with_http_info(**kwargs)
def get_world_publish_status(
self,
world_id,
**kwargs
):
"""Get World Publish Status # noqa: E501
Returns a worlds publish status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_publish_status(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldPublishStatus
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_publish_status_endpoint.call_with_http_info(**kwargs)
def publish_world(
self,
world_id,
**kwargs
):
"""Publish World # noqa: E501
Publish a world. You can only publish one world per week. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.publish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.publish_world_endpoint.call_with_http_info(**kwargs)
def search_worlds(
self,
**kwargs
):
"""Search All Worlds # noqa: E501
Search and list any worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me"
user_id (str): Filter by UserID.. [optional]
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.search_worlds_endpoint.call_with_http_info(**kwargs)
def unpublish_world(
self,
world_id,
**kwargs
):
"""Unpublish World # noqa: E501
Unpublish a world. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unpublish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.unpublish_world_endpoint.call_with_http_info(**kwargs)
def update_world(
self,
world_id,
**kwargs
):
"""Update World # noqa: E501
Update information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
update_world_request (UpdateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.update_world_endpoint.call_with_http_info(**kwargs)
| 36.510513 | 201 | 0.450378 | [
"MIT"
] | vrchatapi/vrchatapi-python | vrchatapi/api/worlds_api.py | 74,664 | Python |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/js_ts_linter.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import multiprocessing
import os
import shutil
import subprocess
import sys
from core.tests import test_utils
from . import js_ts_linter
from . import pre_commit_linter
from .. import common
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, os.pardir, 'oppia_tools')
ESPRIMA_PATH = os.path.join(
OPPIA_TOOLS_DIR, 'esprima-%s' % common.ESPRIMA_VERSION)
sys.path.insert(1, ESPRIMA_PATH)
import esprima # isort:skip pylint: disable=wrong-import-order, wrong-import-position
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_JS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.js')
VALID_TS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.ts')
VALID_APP_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ts')
VALID_APP_CONSTANTS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ajs.ts')
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ts')
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ajs.ts')
VALID_BACKEND_API_SERVICE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid-backend-api.service.ts')
EXTRA_JS_FILEPATH = os.path.join('core', 'templates', 'demo.js')
INVALID_COMPONENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_two_component.ts')
INVALID_SCOPE_TRUE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_scope_true.ts')
INVALID_SCOPE_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'invalid_scope.ts')
INVALID_SORTED_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_sorted_dependencies.ts')
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_line_breaks_in_controller_dependencies.ts')
INVALID_CONSTANT_IN_TS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_constant_in_ts_file.ts')
INVALID_CONSTANT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ts')
INVALID_CONSTANT_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ajs.ts')
INVALID_AS_CONST_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_as_const.constants.ts')
INVALID_HTTP_CLIENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_http_client_used.ts')
INVALID_FORMATTED_COMMENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_comments.ts')
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK = os.path.join(
LINTER_TESTS_DIR, 'invalid_directive_without_return.ts')
INVALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_ignore.ts')
VALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_ignore.ts')
INVALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_expect_error.ts')
VALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_expect_error.spec.ts')
VALID_IGNORED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ignored.service.ts')
VALID_UNLISTED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_unlisted.service.ts')
# Note: Almost all test functions have a subprocess call. This call is to mock
# the compile function used in js_ts_linter. The tests require fewer files to
# be compiled instead of all files as done in js_ts_linter. Mocking the
# compile method reduces the compile time as fewer files are compiled
# thereby making the tests run faster.
class JsTsLintTests(test_utils.LinterTestBase):
"""Tests for js_ts_linter file."""
def validate(self, lint_task_report, expected_messages, failed_count):
"""Assert linter output messages with expected messages."""
for stdout in lint_task_report:
if stdout.failed:
for message in expected_messages:
self.assert_same_list_elements(
[message], stdout.trimmed_messages)
self.assert_failed_messages_count(
stdout.get_report(), failed_count)
else:
continue
def test_validate_and_parse_js_and_ts_files_with_exception(self):
def mock_parse_script(unused_file_content, comment): # pylint: disable=unused-argument
raise Exception('Exception raised from parse_script()')
esprima_swap = self.swap(esprima, 'parseScript', mock_parse_script)
with esprima_swap, self.assertRaisesRegexp(
Exception, r'Exception raised from parse_script\(\)'):
js_ts_linter.JsTsLintChecksManager(
[], [VALID_JS_FILEPATH], FILE_CACHE).perform_all_lint_checks()
def test_check_extra_js_file_found(self):
def mock_readlines(unused_self, unused_filepath):
return ('var a = 10;\n',)
def mock_read(unused_self, unused_filepath):
return 'var a = 10;\n'
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
read_swap = self.swap(
pre_commit_linter.FileCache, 'read', mock_read)
with readlines_swap, read_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[EXTRA_JS_FILEPATH], [], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Found extra .js file']
expected_messages.extend([
'If you want the above files to be present as js files, add '
'them to the list JS_FILEPATHS_NOT_TO_BUILD in build.py. '
'Otherwise, rename them to .ts'])
self.validate(lint_task_report, expected_messages, 1)
def test_check_js_and_ts_component_name_and_count_with_two_component(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_COMPONENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_COMPONENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that there is exactly one component '
'in the file.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_true_value(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[],
[INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file does not have scope set to true.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_no_scope(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SCOPE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file has a scope: {}.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_sorted_dependencies_with_unsorted_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SORTED_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SORTED_DEPENDENCIES_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in SuggestionModalForCreatorViewController'
' in file', 'the injected dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.']
expected_messages.extend([
'Please ensure that in SuggestionModalForCreatorViewController'
' in file ', 'the stringfied dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.'])
self.validate(lint_task_report, expected_messages, 1)
def test_match_line_breaks_in_controller_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in file',
'the line breaks pattern between the dependencies mentioned as'
' strings:\n[$rootScope,$window,BackgroundMaskService,\n'
'SidebarStatusService,UrlService]\nand the dependencies '
'mentioned as function parameters: \n($rootScope,$window,\n'
'BackgroundMaskService,\nSidebarStatusService,UrlService)\n'
'for the corresponding controller should exactly match.'
]
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
expected_messages.extend([
'Please ensure that the constant ADMIN_TABS is initialized '
'from the value from the corresponding Angular constants file '
'(the *.constants.ts file). Please create one in the Angular '
'constants file if it does not exist there.'
])
self.validate(lint_task_report, expected_messages, 1)
def test_check_duplicate_constant_declaration_in_separate_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH,
INVALID_CONSTANT_IN_TS_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'The constant \'ADMIN_ROLE_HANDLER_URL\' is already declared '
'in', 'Please import the file where the constant is declared '
'or rename the constant.']
self.validate(lint_task_report, expected_messages, 1)
def test_duplicate_constants_in_ajs_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
self.validate(lint_task_report, expected_messages, 1)
def test_as_const_in_constant_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_AS_CONST_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_AS_CONST_CONSTANTS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'This constants file doesn\'t have \'as const\' at the end.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_outside_class(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH,
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_app_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_APP_CONSTANTS_AJS_FILEPATH,
VALID_APP_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_APP_CONSTANTS_FILEPATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_in_non_constant_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Constant declaration found at line 19. Please declare the '
'constants in a separate constants file.']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
expected_messages = ['Unused injected value IMPORT_STATEMENT']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter_with_stderr(self):
with self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
INVALID_SORTED_DEPENDENCIES_FILEPATH
).perform_all_lint_checks()
def test_third_party_linter_with_invalid_eslint_path(self):
def mock_exists(unused_path):
return False
exists_swap = self.swap(os.path, 'exists', mock_exists)
with exists_swap, self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
def test_third_party_linter_with_success_message(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[VALID_TS_FILEPATH]).perform_all_lint_checks()
expected_messages = (
['SUCCESS ESLint check passed'])
self.validate(lint_task_report, expected_messages, 0)
def test_custom_linter_with_no_files(self):
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [], FILE_CACHE).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_no_files(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[]).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_http_client_used_with_excluded_file(self):
excluded_file = (
'core/templates/services/request-interceptor.service.spec.ts')
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'core/templates/services/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
excluded_file)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [excluded_file], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_in_backend_api_service_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_BACKEND_API_SERVICE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_BACKEND_API_SERVICE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_with_error_message(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_HTTP_CLIENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_HTTP_CLIENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'An instance of HttpClient is found in this file. You are not '
'allowed to create http requests from files that are not '
'backend api services.']
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-ignore found at line 25.']
expected_messages.extend(['@ts-ignore found at line 31.'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 25. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 31. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {
VALID_TS_IGNORE_FILEPATH: ['let b: number = c;']
})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS ignore check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_ts_expect_error_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-expect-error found at line 24.']
expected_messages.extend(['@ts-expect-error found at line 30.'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 24. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 30. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_expect_error_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS expect error check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_missing_punctuation_at_end_of_comment(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_FORMATTED_COMMENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_FORMATTED_COMMENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Line 39: Invalid punctuation used at '
'the end of the comment.']
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_UNLISTED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_UNLISTED_SERVICE_PATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
angular_services_index_path = (
'./core/templates/services/angular-services.index.ts')
class_name = 'UnlistedService'
service_name_type_pair = (
'[\'%s\', %s]' % (class_name, class_name))
expected_messages = [
'Please import %s to Angular Services Index file in %s'
'from %s'
% (
class_name,
angular_services_index_path,
VALID_UNLISTED_SERVICE_PATH),
'Please add the pair %s to the angularServices in %s'
% (service_name_type_pair, angular_services_index_path)
]
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_IGNORED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_IGNORED_SERVICE_PATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'SUCCESS Angular Services Index file check passed'
]
self.validate(lint_task_report, expected_messages, 0)
def test_get_linters_with_success(self):
custom_linter, third_party = js_ts_linter.get_linters(
[VALID_JS_FILEPATH], [VALID_TS_FILEPATH], FILE_CACHE)
self.assertTrue(
isinstance(custom_linter, js_ts_linter.JsTsLintChecksManager))
self.assertTrue(
isinstance(
third_party,
js_ts_linter.ThirdPartyJsTsLintChecksManager))
| 48.482227 | 95 | 0.641951 | [
"Apache-2.0"
] | Aryan-Abhishek/oppia | scripts/linters/js_ts_linter_test.py | 40,919 | Python |
# Load both the 2016 and 2017 sheets by name
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = ['2016', '2017'])
# View the data type of all_survey_data
print(type(all_survey_data))
'''
<script.py> output:
<class 'collections.OrderedDict'>
'''
# Load all sheets in the Excel file
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = [0, '2017'])
# View the sheet names in all_survey_data
print(all_survey_data.keys())
'''
<script.py> output:
odict_keys([0, '2017'])
'''
# Load all sheets in the Excel file
all_survey_data = pd.read_excel("fcc_survey.xlsx",
sheet_name = None)
# View the sheet names in all_survey_data
print(all_survey_data.keys())
'''
<script.py> output:
odict_keys(['2016', '2017'])
'''
# Notice that if you load a sheet by its index position, the resulting data frame's name is also the index number, not the sheet name. | 24.210526 | 134 | 0.691304 | [
"MIT"
] | Ali-Parandeh/Data_Science_Playground | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/11_select_multiple_sheets.py | 920 | Python |
from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_type='', widget_name='', widget_pos='under', pos=-1)
# self.delete_input(input or index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(output or index)
# self.update_shape()
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.special_actions['add input'] = {'method': self.action_add_input}
self.enlargement_state = 0
self.initialized()
def action_add_input(self):
self.create_new_input('data', '', widget_type='std line edit', widget_pos='besides')
self.enlargement_state += 1
self.special_actions['remove input'] = {'method': self.action_remove_input}
def action_remove_input(self):
self.delete_input(self.inputs[-1])
self.enlargement_state -= 1
if self.enlargement_state == 0:
del self.special_actions['remove input']
def update_event(self, input_called=-1):
result = self.input(0) or self.input(1)
for i in range(self.enlargement_state):
result = result or self.input(2+i)
self.outputs[0].set_val(result)
def get_data(self):
data = {'enlargement state': self.enlargement_state}
return data
def set_data(self, data):
self.enlargement_state = data['enlargement state']
# optional - important for threading - stop everything here
def removing(self):
pass
| 34.482143 | 97 | 0.671673 | [
"MIT"
] | Shirazbello/Pyscriptining | packages/std/nodes/std___Or0/std___Or0___METACODE.py | 1,931 | Python |
"""
Test that we keep references to failinfo as needed.
"""
import fiu
# Object we'll use for failinfo
finfo = [1, 2, 3]
fiu.enable('p1', failinfo = finfo)
assert fiu.fail('p1')
assert fiu.failinfo('p1') is finfo
finfo_id = id(finfo)
del finfo
assert fiu.failinfo('p1') == [1, 2, 3]
assert id(fiu.failinfo('p1')) == finfo_id
| 15.136364 | 51 | 0.666667 | [
"MIT"
] | lwllvyb/libfiu-hack | tests/test-failinfo_refcount.py | 333 | Python |
"""
ASGI config for logkit project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logkit.settings')
application = get_asgi_application()
| 22.882353 | 78 | 0.784062 | [
"Apache-2.0"
] | zhaheyan/logkit | logkit/logkit/asgi.py | 389 | Python |
import sys
from os import listdir
from os.path import isfile, join, dirname, realpath
import struct
import gzip
def list_dir(d):
return [f for f in listdir(d) if isfile(join(d, f))]
def store(p, file):
try:
output_file = open(p, "w", encoding="utf-8", errors="xmlcharrefreplace")
output_file.write(file)
except:
print("Unable to store the file. Error:", sys.exc_info()[0])
raise
def store_bin(p, file):
with open(p, 'wb') as f:
if isinstance(file, int):
f.write(struct.pack('i', file)) # write an int
elif isinstance(file, str):
f.write(file) # write a string
else:
raise TypeError('Can only write str or int')
def load(p, compression=None):
if compression == 'gz' or compression == 'gzip':
f = gzip.open(p, 'rb')
else:
f = open(p, mode="r", encoding="utf-8")
content = f.read()
f.close()
return content
def to_string(data: bytes, encoding=None):
if encoding is None:
return data.decode("utf-8")
return data.decode(encoding)
def store_list(p, files, file_names):
for i in len(files):
store(p + file_names[i], files[i])
i += 1
def path(file):
return dirname(realpath(file)).replace("\\", "/")
| 23.089286 | 80 | 0.595514 | [
"Unlicense"
] | ennioVisco/topocity | tools/batchrun/storage.py | 1,293 | Python |
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Blog.urls')),
path('tinymce/', include('tinymce.urls')),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 28.928571 | 89 | 0.750617 | [
"MIT"
] | collins-hue/Django-Blog-Comment | BlogComment/BlogComment/urls.py | 405 | Python |
from math import sqrt
from PrefrontalCortex import Impulse
from Decisions import Decisions
from Decision import Decision
import random as rand
# The job of the Neo-cortex is to evaluate, think, and consider.
# It is a slow brain part, but a highly important one, it's job is to perform tasks for the prefrontal cortex (to make it happy),
# While finding the optimal ways to do those tasks.
class NeoCortex:
def __init__(self, settler, world_grid):
self.settler = settler
self.decision_tree = self.settler._get_decisions()
self.world_grid = world_grid
self.xz_grid = self.get_xz_of(world_grid[:])
def get_xz_of(self, grid):
l = []
for cell in grid:
c = []
for block in cell.get_chunk():
c.append((block[0], block[2]))
l.append(c)
return l
def handle_impulse(self, impulse, weights):
text = ""
if impulse.name == Impulse.WANT_FOOD.name:
food = self._go_hunt()
if food > 0:
text = "Went to hunt, and found "+ str(food) +" food!"
else:
text = "Went to hunt, and found nothing.."
elif impulse.name == Impulse.WANT_SHELTER.name:
text = self._go_build_shelter()
elif impulse.name == Impulse.WANT_SLEEP.name:
self._go_sleep()
text = "Went to sleep"
elif impulse.name == Impulse.WANT_CHILDREN.name:
if self.settler._get_has_mate():
self._go_mate()
text = "Went to mate"
else:
text = self._go_find_mate()
#print "SETTLER: ", text
decision = Decision(text, impulse, weights)
self.decision_tree.new_decision(decision)
#Returns a boolean value true if the settler found food after hunting
def _go_hunt(self):
self.settler._move(self.find_free_grid_cell()) #Action
success_prob = 0.5
bounds = (0, 10)
found_food = rand.randrange(bounds[0], bounds[1], 1) >= bounds[1] * success_prob
food = int(found_food) * int(rand.randrange(0, 2))
self.settler.add_food(food)
return food
def _go_build_shelter(self):
self.move_to_suitable_plot()
self.settler.settlement.settler_claims_index(self.settler.origin)
self.settler._build() #Action
self.world_grid[self.settler.origin].use_segment() #Mental note
self.settler.set_has_shelter()
return "Successfully built a shelter"
def _go_sleep(self):
pass
def _go_mate(self):
self.settler._mate()
def _go_find_mate(self):
success, mates = self.get_suitable_mates()
if success:
mated, num_kids = self.settler._find_and_mate(mates)
text = ""
if mated:
text = "Had " + str(num_kids) + " children"
else:
text = "Got no consent from suitable mates"
return text
else:
return "Failed to find suitable mates"
def old_can_build(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
dist = 0
if self.settler.settlement.get_index_claimed(self.settler.origin):
return False
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= 5 and dist[1] <= 5:
return False
return True
def move_to_suitable_plot(self):
close_shelters = self.get_close_houses()
if len(close_shelters) > 0:
self_loc = self.world_grid[self.settler.origin].get_chunk()[0]
average_loc = (self_loc[0], self_loc[2])
for shelter_loc in close_shelters:
average_loc += (-(shelter_loc[0] - self_loc[0]), -(shelter_loc[2] - self_loc[2]))
self.settler._move(self.get_index_of(average_loc, self.xz_grid))
min_shelter_dist = 10
def get_close_houses(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
close_shelters_locs = []
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= self.min_shelter_dist and dist[1] <= self.min_shelter_dist:
close_shelters_locs.append(t)
if self.settler.settlement.get_index_claimed(self.settler.origin):
close_shelters_locs.append(s)
return close_shelters_locs
def find_free_grid_cell(self):
point = self.world_grid[self.settler.origin].get_chunk()[0] #Initial and fallback (no move)
attempts = 0
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
while not self.point_in_grid(new_point, self.xz_grid):
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
if self.settler.steps_left <= 0:
print "Settler died thinking"
return self.settler.origin
if attempts % 5 == 0: #Slowly die trying to move (prevents stalling)
self.settler.steps_left -= 1
attempts += 1
return self.get_index_of(new_point, self.xz_grid)
def get_step_size(self, loc):
d = 5 #One chunk per step
return int(rand.normalvariate(loc, d))
def point_in_grid(self, point, grid):
for cell in grid:
if point in cell:
return True
return False
def get_index_of(self, point, grid):
for cell in grid:
if point in cell:
return grid.index(cell)
return 0
def get_index_of_3d(self, point, grid):
for cell in grid:
if point in cell.get_chunk():
return grid.index(cell)
return self.find_free_grid_cell()
def get_suitable_mates(self):
suitable = []
for settler in self.settler.settlement.get_all_settlers():
if settler._get_has_shelter():
suitable.append(settler)
if len(suitable) <= 0:
return False, suitable
else:
return True, suitable | 37.378531 | 130 | 0.587666 | [
"ISC"
] | Sebastianchr22/GDMC-master | stock-filters/NeoCortex.py | 6,616 | Python |
#!/usr/bin/env python
# coding: utf-8
import random
import numpy as np
import sys, os
import pandas as pd
import torch
from torchsummary import summary
from torchtext import data
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, TensorDataset,DataLoader, RandomSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import pickle
import shutil
from sklearn.model_selection import train_test_split
def tokenize(tokenizer,text_array,max_seq_len=64,pad_to_max_length=True,add_special_tokens=True):
''' Returns tokenized IDs and attention mask
The transformers encode_plus method returns the following:
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}'''
all_tokens=[]
all_attention_mask=[]
for i,text in enumerate(tqdm(text_array)):
encoded = tokenizer.encode_plus(
text,
add_special_tokens=add_special_tokens,
max_length=max_seq_len,
pad_to_max_length=pad_to_max_length)
tokens = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
all_tokens.append(tokens)
all_attention_mask.append(attention_mask)
return all_tokens,all_attention_mask
class CreateDataset(Dataset):
def __init__(self,data,atten_mask,labels,num_excl):
self._dataset = [[data[i],atten_mask[i],labels.values[i],num_excl.values[i]] for i in range(0,len(data))]
def __len__(self):
return len(self._dataset)
def __getitem__(self,idx):
return self._dataset[idx]
def createTestTrainSplit(all_train_df,test_size=0.2,seed=1234):
# Create train, validation dataset splits
train_df, valid_df = train_test_split(all_train_df, test_size=0.2,random_state=seed)
train_data = train_df.text.fillna("DUMMY_VALUE")
train_labels = train_df.label
train_num_excl = train_df.num_exclamation_marks
valid_data = valid_df.text.fillna("DUMMY_VALUE")
valid_labels = valid_df.label
valid_num_excl = train_df.num_exclamation_marks
return train_data,train_labels,train_num_excl,valid_data,valid_labels,valid_num_excl
def saveTokensToFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
# save to files for later use
with open(TOKEN_DATA_PATH+'/train_data_tokenized.txt', 'wb') as fp:
pickle.dump(train_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/train_attention_mask.txt', 'wb') as fp:
pickle.dump(train_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/valid_data_tokenized.txt', 'wb') as fp:
pickle.dump(valid_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/valid_attention_mask.txt', 'wb') as fp:
pickle.dump(valid_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/test_data_tokenized.txt', 'wb') as fp:
pickle.dump(test_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/test_attention_mask.txt', 'wb') as fp:
pickle.dump(test_attention_mask, fp)
def loadTokensFromFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
# read back tokenized data
with open(TOKEN_DATA_PATH+'train_data_tokenized.txt', 'rb') as fp:
train_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'train_attention_mask.txt', 'rb') as fp:
train_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_data_tokenized.txt', 'rb') as fp:
valid_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_attention_mask.txt', 'rb') as fp:
valid_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_data_tokenized.txt', 'rb') as fp:
test_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_attention_mask.txt', 'rb') as fp:
test_attention_mask=pickle.load(fp)
def generateDataLoader(dataset,batch_size,shuffle=False,num_workers=16,pin_memory=False,drop_last=True):
# print("Expected number of batches:", int(len(train_data_tokenized)/params['batch_size']))
sampler = RandomSampler(dataset)
dataLoader = torch.utils.data.DataLoader(dataset=dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return dataLoader
| 45.982609 | 118 | 0.703101 | [
"Apache-2.0"
] | suhasgupta791/mids-w251-final-project | utils/utils.py | 5,288 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import codecs
import os
import re
import tarfile
import shutil
import tempfile
import hashlib
import glob
import platform
from contextlib import closing
import ruamel.yaml as yaml
import json
from six.moves.urllib.error import URLError
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack.cmd
import spack.config as config
import spack.fetch_strategy as fs
import spack.util.gpg
import spack.relocate as relocate
import spack.util.spack_yaml as syaml
import spack.mirror
import spack.util.url as url_util
import spack.util.web as web_util
from spack.spec import Spec
from spack.stage import Stage
from spack.util.gpg import Gpg
import spack.architecture as architecture
_build_cache_relative_path = 'build_cache'
BUILD_CACHE_INDEX_TEMPLATE = '''
<html>
<head>
<title>{title}</title>
</head>
<body>
<ul>
{path_list}
</ul>
</body>
</html>
'''
BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>'
class NoOverwriteException(spack.error.SpackError):
"""
Raised when a file exists and must be overwritten.
"""
def __init__(self, file_path):
err_msg = "\n%s\nexists\n" % file_path
err_msg += "Use -f option to overwrite."
super(NoOverwriteException, self).__init__(err_msg)
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super(NoGpgException, self).__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super(NoKeyException, self).__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
pass
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super(NewLayoutException, self).__init__(msg)
def build_cache_relative_path():
return _build_cache_relative_path
def build_cache_prefix(prefix):
return os.path.join(prefix, build_cache_relative_path())
def buildinfo_file_name(prefix):
"""
Filename of the binary package meta-data file
"""
name = os.path.join(prefix, ".spack/binary_distribution")
return name
def read_buildinfo_file(prefix):
"""
Read buildinfo file
"""
filename = buildinfo_file_name(prefix)
with open(filename, 'r') as inputfile:
content = inputfile.read()
buildinfo = yaml.load(content)
return buildinfo
def write_buildinfo_file(spec, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
prefix = spec.prefix
text_to_relocate = []
binary_to_relocate = []
link_to_relocate = []
blacklist = (".spack", "man")
prefix_to_hash = dict()
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
deps = spack.build_environment.get_rpath_deps(spec.package)
for d in deps:
prefix_to_hash[str(d.prefix)] = d.dag_hash()
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(prefix, topdown=True):
dirs[:] = [d for d in dirs if d not in blacklist]
for filename in files:
path_name = os.path.join(root, filename)
m_type, m_subtype = relocate.mime_type(path_name)
if os.path.islink(path_name):
link = os.readlink(path_name)
if os.path.isabs(link):
# Relocate absolute links into the spack tree
if link.startswith(spack.store.layout.root):
rel_path_name = os.path.relpath(path_name, prefix)
link_to_relocate.append(rel_path_name)
else:
msg = 'Absolute link %s to %s ' % (path_name, link)
msg += 'outside of prefix %s ' % prefix
msg += 'should not be relocated.'
tty.warn(msg)
if relocate.needs_binary_relocation(m_type, m_subtype):
if not filename.endswith('.o'):
rel_path_name = os.path.relpath(path_name, prefix)
binary_to_relocate.append(rel_path_name)
if relocate.needs_text_relocation(m_type, m_subtype):
rel_path_name = os.path.relpath(path_name, prefix)
text_to_relocate.append(rel_path_name)
# Create buildinfo data and write it to disk
buildinfo = {}
buildinfo['relative_rpaths'] = rel
buildinfo['buildpath'] = spack.store.layout.root
buildinfo['spackprefix'] = spack.paths.prefix
buildinfo['relative_prefix'] = os.path.relpath(
prefix, spack.store.layout.root)
buildinfo['relocate_textfiles'] = text_to_relocate
buildinfo['relocate_binaries'] = binary_to_relocate
buildinfo['relocate_links'] = link_to_relocate
buildinfo['prefix_to_hash'] = prefix_to_hash
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec):
"""
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
"""
return "%s/%s/%s-%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name, spec.version)
def tarball_name(spec, ext):
"""
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
"""
return "%s-%s-%s-%s-%s%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name,
spec.version,
spec.dag_hash(),
ext)
def tarball_path_name(spec, ext):
"""
Return the full path+name for a given spec according to the convention
<tarball_directory_name>/<tarball_name>
"""
return os.path.join(tarball_directory_name(spec),
tarball_name(spec, ext))
def checksum_tarball(file):
# calculate sha256 hash of tar file
block_size = 65536
hasher = hashlib.sha256()
with open(file, 'rb') as tfile:
buf = tfile.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = tfile.read(block_size)
return hasher.hexdigest()
def sign_tarball(key, force, specfile_path):
# Sign the packages if keys available
if spack.util.gpg.Gpg.gpg() is None:
raise NoGpgException(
"gpg2 is not available in $PATH .\n"
"Use spack install gnupg and spack load gnupg.")
if key is None:
keys = Gpg.signing_keys()
if len(keys) == 1:
key = keys[0]
if len(keys) > 1:
raise PickKeyException(str(keys))
if len(keys) == 0:
msg = "No default key available for signing.\n"
msg += "Use spack gpg init and spack gpg create"
msg += " to create a default key."
raise NoKeyException(msg)
if os.path.exists('%s.asc' % specfile_path):
if force:
os.remove('%s.asc' % specfile_path)
else:
raise NoOverwriteException('%s.asc' % specfile_path)
Gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.html" page at the location given in
cache_prefix. This page contains a link for each binary package (*.yaml)
and public key (*.key) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
try:
index_html_path = os.path.join(tmpdir, 'index.html')
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if (entry.endswith('.yaml')
or entry.endswith('.key')))
with open(index_html_path, 'w') as f:
f.write(BUILD_CACHE_INDEX_TEMPLATE.format(
title='Spack Package Index',
path_list='\n'.join(
BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path)
for path in file_list)))
web_util.push_to_url(
index_html_path,
url_util.join(cache_prefix, 'index.html'),
keep_original=False,
extra_args={'ContentType': 'text/html'})
finally:
shutil.rmtree(tmpdir)
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
allow_root=False, key=None, regenerate_index=False):
"""
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
"""
if not spec.concrete:
raise ValueError('spec must be concrete to build tarball')
# set up some paths
tmpdir = tempfile.mkdtemp()
cache_prefix = build_cache_prefix(tmpdir)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
spackfile_path = os.path.join(
cache_prefix, tarball_path_name(spec, '.spack'))
remote_spackfile_path = url_util.join(
outdir, os.path.relpath(spackfile_path, tmpdir))
mkdirp(tarfile_dir)
if web_util.url_exists(remote_spackfile_path):
if force:
web_util.remove_url(remote_spackfile_path)
else:
raise NoOverwriteException(url_util.format(remote_spackfile_path))
# need to copy the spec file so the build cache can be downloaded
# without concretizing with the current spack packages
# and preferences
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.realpath(
os.path.join(cache_prefix, specfile_name))
remote_specfile_path = url_util.join(
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
if web_util.url_exists(remote_specfile_path):
if force:
web_util.remove_url(remote_specfile_path)
else:
raise NoOverwriteException(url_util.format(remote_specfile_path))
# make a copy of the install directory to work with
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % spec.prefix,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
# create info for later relocation and create tar
write_buildinfo_file(spec, workdir, rel)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if rel:
try:
make_package_relative(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
else:
try:
check_package_relocatable(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
# create gzip compressed tarball of the install prefix
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
tar.add(name='%s' % workdir,
arcname='%s' % os.path.basename(spec.prefix))
# remove copy of install directory
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# add sha256 checksum to spec.yaml
with open(spec_file, 'r') as inputfile:
content = inputfile.read()
spec_dict = yaml.load(content)
bchecksum = {}
bchecksum['hash_algorithm'] = 'sha256'
bchecksum['hash'] = checksum
spec_dict['binary_cache_checksum'] = bchecksum
# Add original install prefix relative to layout root to spec.yaml.
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
buildinfo['relative_rpaths'] = rel
spec_dict['buildinfo'] = buildinfo
spec_dict['full_hash'] = spec.full_hash()
tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(
spec_dict['full_hash'],
spec.name,
url_util.format(remote_specfile_path)))
tty.debug(spec.tree())
with open(specfile_path, 'w') as outfile:
outfile.write(syaml.dump(spec_dict))
# sign the tarball and spec file with gpg
if not unsigned:
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
tar.add(name=specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
# cleanup file moved to archive
os.remove(tarfile_path)
if not unsigned:
os.remove('%s.asc' % specfile_path)
web_util.push_to_url(
spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
specfile_path, remote_specfile_path, keep_original=False)
tty.msg('Buildache for "%s" written to \n %s' %
(spec, remote_spackfile_path))
try:
# create an index.html for the build_cache directory so specs can be
# found
if regenerate_index:
generate_package_index(url_util.join(
outdir, os.path.relpath(cache_prefix, tmpdir)))
finally:
shutil.rmtree(tmpdir)
return None
def download_tarball(spec):
"""
Download binary tarball for given package into stage area
Return True if successful
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of pre-compiled packages.")
tarball = tarball_path_name(spec, '.spack')
for mirror in spack.mirror.MirrorCollection().values():
url = url_util.join(
mirror.fetch_url, _build_cache_relative_path, tarball)
# stage the tarball into standard place
stage = Stage(url, name="build_cache", keep=True)
try:
stage.fetch()
return stage.save_filename
except fs.FetchError:
continue
return None
def make_package_relative(workdir, spec, allow_root):
"""
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
old_layout_root = buildinfo['buildpath']
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.make_macho_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.make_elf_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
relocate.check_files_relocatable(cur_path_names, allow_root)
orig_path_names = list()
cur_path_names = list()
for linkname in buildinfo.get('relocate_links', []):
orig_path_names.append(os.path.join(prefix, linkname))
cur_path_names.append(os.path.join(workdir, linkname))
relocate.make_link_relative(cur_path_names, orig_path_names)
def check_package_relocatable(workdir, spec, allow_root):
"""
Check if package binaries are relocatable.
Change links to placeholder links.
"""
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
cur_path_names.append(os.path.join(workdir, filename))
relocate.check_files_relocatable(cur_path_names, allow_root)
def relocate_package(spec, allow_root):
"""
Relocate the given package
"""
workdir = str(spec.prefix)
buildinfo = read_buildinfo_file(workdir)
new_layout_root = str(spack.store.layout.root)
new_prefix = str(spec.prefix)
new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
new_spack_prefix = str(spack.paths.prefix)
old_layout_root = str(buildinfo['buildpath'])
old_spack_prefix = str(buildinfo.get('spackprefix'))
old_rel_prefix = buildinfo.get('relative_prefix')
old_prefix = os.path.join(old_layout_root, old_rel_prefix)
rel = buildinfo.get('relative_rpaths')
prefix_to_hash = buildinfo.get('prefix_to_hash', None)
if (old_rel_prefix != new_rel_prefix and not prefix_to_hash):
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout and an older "
msg += "buildcache create implementation. It cannot be relocated."
raise NewLayoutException(msg)
# older buildcaches do not have the prefix_to_hash dictionary
# need to set an empty dictionary and add one entry to
# prefix_to_prefix to reproduce the old behavior
if not prefix_to_hash:
prefix_to_hash = dict()
hash_to_prefix = dict()
hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix)
new_deps = spack.build_environment.get_rpath_deps(spec.package)
for d in new_deps:
hash_to_prefix[d.format('{hash}')] = str(d.prefix)
prefix_to_prefix = dict()
for orig_prefix, hash in prefix_to_hash.items():
prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None)
prefix_to_prefix[old_prefix] = new_prefix
prefix_to_prefix[old_layout_root] = new_layout_root
tty.debug("Relocating package from",
"%s to %s." % (old_layout_root, new_layout_root))
def is_backup_file(file):
return file.endswith('~')
# Text files containing the prefix text
text_names = list()
for filename in buildinfo['relocate_textfiles']:
text_name = os.path.join(workdir, filename)
# Don't add backup files generated by filter_file during install step.
if not is_backup_file(text_name):
text_names.append(text_name)
# If we are installing back to the same location don't replace anything
if old_layout_root != new_layout_root:
paths_to_relocate = [old_spack_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.relocate_macho_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.relocate_elf_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
link_names = [linkname
for linkname in buildinfo.get('relocate_links', [])]
relocate.relocate_links(link_names,
old_layout_root,
new_layout_root,
old_prefix,
new_prefix,
prefix_to_prefix)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
"""
extract binary tarball for given package into install area
"""
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
else:
raise NoOverwriteException(str(spec.prefix))
tmpdir = tempfile.mkdtemp()
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(tmpdir, tarfile_name)
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.join(tmpdir, specfile_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(tmpdir, tarfile_name)
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
suppress = config.get('config:suppress_gpg_warnings', False)
Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
except Exception as e:
shutil.rmtree(tmpdir)
raise e
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
"Package spec file failed signature verification.\n"
"Use spack buildcache keys to download "
"and install a key for verification from the mirror.")
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# get the sha256 checksum recorded at creation
spec_dict = {}
with open(specfile_path, 'r') as inputfile:
content = inputfile.read()
spec_dict = syaml.load(content)
bchecksum = spec_dict['binary_cache_checksum']
# if the checksums don't match don't install
if bchecksum['hash'] != checksum:
shutil.rmtree(tmpdir)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
new_relative_prefix = str(os.path.relpath(spec.prefix,
spack.store.layout.root))
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get('buildinfo', {})
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
rel = buildinfo.get('relative_rpaths')
# if the original relative prefix and new relative prefix differ the
# directory layout has changed and the buildcache cannot be installed
# if it was created with relative rpaths
info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
tty.debug(info %
(old_relative_prefix, new_relative_prefix, rel))
# if (old_relative_prefix != new_relative_prefix and (rel)):
# shutil.rmtree(tmpdir)
# msg = "Package tarball was created from an install "
# msg += "prefix with a different directory layout. "
# msg += "It cannot be relocated because it "
# msg += "uses relative rpaths."
# raise NewLayoutException(msg)
# extract the tarball in a temp directory
with closing(tarfile.open(tarfile_path, 'r')) as tar:
tar.extractall(path=tmpdir)
# get the parent directory of the file .spack/binary_distribution
# this should the directory unpacked from the tarball whose
# name is unknown because the prefix naming is unknown
bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
tty.debug('workdir %s' % workdir)
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % workdir,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(spec.prefix)
os.remove(temp_tarfile_path)
# cleanup
os.remove(tarfile_path)
os.remove(specfile_path)
try:
relocate_package(spec, allow_root)
except Exception as e:
shutil.rmtree(spec.prefix)
raise e
else:
manifest_file = os.path.join(spec.prefix,
spack.store.layout.metadata_dir,
spack.store.layout.manifest_file_name)
if not os.path.exists(manifest_file):
spec_id = spec.format('{name}/{hash:7}')
tty.warn('No manifest file in tarball for spec %s' % spec_id)
finally:
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
# Internal cache for downloaded specs
_cached_specs = set()
def try_download_specs(urls=None, force=False):
'''
Try to download the urls and cache them
'''
global _cached_specs
if urls is None:
return {}
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.add(spec)
return _cached_specs
def get_spec(spec=None, force=False):
"""
Check if spec.yaml exists on mirrors and return it if it does
"""
global _cached_specs
urls = set()
if spec is None:
return {}
specfile_name = tarball_name(spec, '.spec.yaml')
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
if _cached_specs and spec in _cached_specs:
return _cached_specs
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_specs(force=False, allarch=False):
"""
Get spec.yaml's for build caches available on mirror
"""
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if not allarch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
arch_re = re.compile(regex_pattern)
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = arch_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = arch_re.search(link)
if m:
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_keys(install=False, trust=False, force=False):
"""
Get pgp public keys available on mirror
with suffix .key or .pub
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of build caches.")
keys = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding public keys in %s" % mirror_dir)
files = os.listdir(str(mirror_dir))
for file in files:
if re.search(r'\.key', file) or re.search(r'\.pub', file):
link = url_util.join(fetch_url_build_cache, file)
keys.add(link)
else:
tty.msg("Finding public keys at %s" %
url_util.format(fetch_url_build_cache))
# For s3 mirror need to request index.html directly
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'), depth=1)
for link in links:
if re.search(r'\.key', link) or re.search(r'\.pub', link):
keys.add(link)
for link in keys:
with Stage(link, name="build_cache", keep=True) as stage:
if os.path.exists(stage.save_filename) and force:
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
tty.msg('Found key %s' % link)
if install:
if trust:
Gpg.trust(stage.save_filename)
tty.msg('Added this key to trusted keys.')
else:
tty.msg('Will not add this key to trusted keys.'
'Use -t to install all downloaded keys')
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
if not spec.concrete:
raise ValueError('spec must be concrete to check against mirror')
pkg_name = spec.name
pkg_version = spec.version
pkg_hash = spec.dag_hash()
pkg_full_hash = spec.full_hash()
tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format(
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
tty.debug(spec.tree())
# Try to retrieve the .spec.yaml directly, based on the known
# format of the name, in order to determine if the package
# needs to be rebuilt.
cache_prefix = build_cache_prefix(mirror_url)
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
spec.short_spec, '' if rebuild_on_errors else 'not ')
try:
_, _, yaml_file = web_util.read_from_url(file_path)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
except (URLError, web_util.SpackWebError) as url_err:
err_msg = [
'Unable to determine whether {0} needs rebuilding,',
' caught exception attempting to read from {1}.',
]
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
tty.debug(url_err)
tty.warn(result_of_error)
return rebuild_on_errors
if not yaml_contents:
tty.error('Reading {0} returned nothing'.format(file_path))
tty.warn(result_of_error)
return rebuild_on_errors
spec_yaml = syaml.load(yaml_contents)
# If either the full_hash didn't exist in the .spec.yaml file, or it
# did, but didn't match the one we computed locally, then we should
# just rebuild. This can be simplified once the dag_hash and the
# full_hash become the same thing.
if ('full_hash' not in spec_yaml or
spec_yaml['full_hash'] != pkg_full_hash):
if 'full_hash' in spec_yaml:
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
spec_yaml['full_hash'], pkg_full_hash)
else:
reason = 'full_hash was missing from remote spec.yaml'
tty.msg('Rebuilding {0}, reason: {1}'.format(
spec.short_spec, reason))
tty.msg(spec.tree())
return True
return False
def check_specs_against_mirrors(mirrors, specs, output_file=None,
rebuild_on_errors=False):
"""Check all the given specs against buildcaches on the given mirrors and
determine if any of the specs need to be rebuilt. Reasons for needing to
rebuild include binary cache for spec isn't present on a mirror, or it is
present but the full_hash has changed since last time spec was built.
Arguments:
mirrors (dict): Mirrors to check against
specs (iterable): Specs to check against mirrors
output_file (string): Path to output file to be written. If provided,
mirrors with missing or out-of-date specs will be formatted as a
JSON object and written to this file.
rebuild_on_errors (boolean): Treat any errors encountered while
checking specs as a signal to rebuild package.
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
"""
rebuilds = {}
for mirror in spack.mirror.MirrorCollection(mirrors).values():
tty.msg('Checking for built specs at %s' % mirror.fetch_url)
rebuild_list = []
for spec in specs:
if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors):
rebuild_list.append({
'short_spec': spec.short_spec,
'hash': spec.dag_hash()
})
if rebuild_list:
rebuilds[mirror.fetch_url] = {
'mirrorName': mirror.name,
'mirrorUrl': mirror.fetch_url,
'rebuildSpecs': rebuild_list
}
if output_file:
with open(output_file, 'w') as outf:
outf.write(json.dumps(rebuilds))
return 1 if rebuilds else 0
def _download_buildcache_entry(mirror_root, descriptions):
for description in descriptions:
description_url = os.path.join(mirror_root, description['url'])
path = description['path']
fail_if_missing = description['required']
mkdirp(path)
stage = Stage(
description_url, name="build_cache", path=path, keep=True)
try:
stage.fetch()
except fs.FetchError as e:
tty.debug(e)
if fail_if_missing:
tty.error('Failed to download required url {0}'.format(
description_url))
return False
return True
def download_buildcache_entry(file_descriptions, mirror_url=None):
if not mirror_url and not spack.mirror.MirrorCollection():
tty.die("Please provide or add a spack mirror to allow " +
"download of buildcache entries.")
if mirror_url:
mirror_root = os.path.join(
mirror_url, _build_cache_relative_path)
return _download_buildcache_entry(mirror_root, file_descriptions)
for mirror in spack.mirror.MirrorCollection().values():
mirror_root = os.path.join(
mirror.fetch_url,
_build_cache_relative_path)
if _download_buildcache_entry(mirror_root, file_descriptions):
return True
else:
continue
return False
| 36.542435 | 79 | 0.624735 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | AndrewGaspar/spack | lib/spack/spack/binary_distribution.py | 39,612 | Python |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 tianyou pan <sherry0429 at SOAPython>
"""
from engine import ServiceEngineModule
from template import ServiceParamTemplate
__all__ = ['ServiceEngineModule', 'ServiceParamTemplate'] | 25.444444 | 57 | 0.759825 | [
"Apache-2.0"
] | sherry0429/SOAForPython | toBusUsege/service_module/service_core/__init__.py | 229 | Python |
try:
from rgbmatrix import graphics
except ImportError:
from RGBMatrixEmulator import graphics
class Color:
def __init__(self, color_json):
self.json = color_json
def color(self, keypath):
return self.__find_at_keypath(keypath)
def graphics_color(self, keypath):
color = self.color(keypath)
if not color:
color = self.color("default.text")
return graphics.Color(color["r"], color["g"], color["b"])
def __find_at_keypath(self, keypath):
keys = keypath.split(".")
rv = self.json
for key in keys:
rv = rv[key]
return rv
| 24.692308 | 65 | 0.61215 | [
"MIT"
] | ajbowler/mlb-led-scoreboard | data/config/color.py | 642 | Python |
from typing import List
from lxml import etree
from cssselect import GenericTranslator
from kloppy.domain import Event, EventType
class CSSPatternMatcher:
def __init__(self, pattern: str):
self.expression = GenericTranslator().css_to_xpath([pattern])
def match(self, events: List[Event]) -> List[List[Event]]:
elm = etree.Element("start")
root = elm
for i, event in enumerate(events):
if event.event_type != EventType.GENERIC:
elm = etree.SubElement(
elm,
event.event_name.lower()
.replace(" ", "_")
.replace("*", ""),
index=i,
result=str(event.result).lower(),
team=str(event.team.ground).lower(),
attrib={
"class": str(event.result).lower()
+ " "
+ str(event.team.ground).lower()
},
)
matching_events = []
for elm in root.xpath(self.expression):
matching_events.append(events[elm.attrib["index"]])
return matching_events
| 32.459459 | 69 | 0.507077 | [
"BSD-3-Clause"
] | JanVanHaaren/kloppy | kloppy/domain/services/matchers/css.py | 1,201 | Python |
'''
Created on Nov 16, 2021
@author: mballance
'''
from mkdv.tools.hdl.hdl_tool_config import HdlToolConfig
import os
class HdlTool(object):
def config(self, cfg : HdlToolConfig):
raise NotImplementedError("config not implemented for %s" % str(type(self)))
def setup(self, cfg : HdlToolConfig):
raise NotImplementedError("setup not implemented for %s" % str(type(self)))
def run(self, cfg : HdlToolConfig):
raise NotImplementedError("setup not implemented for %s" % str(type(self)))
| 28.421053 | 84 | 0.67963 | [
"Apache-2.0"
] | fvutils/pymkdv | src/mkdv/tools/hdl/hdl_tool.py | 540 | Python |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert h.startswith('getblockchaininfo\n')
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| 34.514286 | 101 | 0.679636 | [
"MIT"
] | Gulden/gulden-official | test/functional/rpc_named_arguments.py | 1,208 | Python |
# Copyright 2020 Maruan Al-Shedivat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Entropy-based activity regularizers."""
import tensorflow as tf
from tensorflow.python.keras.regularizers import Regularizer
class ContextConditionalNegativeEntropy(Regularizer):
"""Encourages models with higher context-conditional entropy."""
def __init__(self, coeff=0., num_samples=256, stddev=2e-1, epsilon=1e-6):
self.coeff = coeff
self.stddev = stddev
self.epsilon = epsilon
self.num_samples = num_samples
def __call__(self, x):
if self.coeff == 0.:
return tf.constant(0.)
# Unpack inputs.
# contextual_weights:
# kernels: <float32> [batch_size, feature_dim, num_classes].
# biases: <float32> [batch_size, num_classes].
# features: <float32> [batch_size, feature_dim].
# outputs: <float32> [batch_size, num_classes].
contextual_weights, features, outputs = x
# Generate features from P(x | c).
# <float32> [batch_size, num_samples, feature_dim].
features_shape = tf.shape(features)
features_noise = tf.random.normal(
shape=(features_shape[0], self.num_samples, features_shape[1]),
stddev=self.stddev
)
# <float32> [batch_size, num_samples, feature_dim].
features_prime = tf.expand_dims(features, axis=1) + features_noise
# Compute log mean_j P(Y | x_j, c_i).
# <float32> [batch_size, num_samples, num_classes].
logits = tf.einsum(
"ipk,ijp->ijk", contextual_weights["kernels"], features_prime
)
if "biases" in contextual_weights:
# <float32> [batch_size, num_samples, units].
biases = tf.expand_dims(contextual_weights["biases"], axis=1)
logits = tf.add(logits, biases)
# <float32> [batch_size, num_classes].
probs = tf.reduce_mean(tf.nn.softmax(logits), axis=1) + self.epsilon
probs_sum = tf.reduce_sum(probs, axis=-1, keepdims=True)
log_probs = tf.math.log(probs / probs_sum)
# Compute loss.
loss = -tf.nn.softmax_cross_entropy_with_logits(
labels=tf.nn.softmax(outputs), logits=log_probs
)
return self.coeff * tf.reduce_mean(loss)
def __str__(self):
config = self.get_config()
return "{name:s}({coeff:f})".format(**config)
def get_config(self):
return {"name": self.__class__.__name__, "coeff": float(self.coeff)}
# Aliases.
def ctx_cond_neg_ent(coeff=0., num_samples=32, stddev=.1, epsilon=1e-6):
return ContextConditionalNegativeEntropy(
coeff=coeff, num_samples=num_samples, stddev=stddev, epsilon=epsilon
)
| 38.068182 | 80 | 0.643881 | [
"Apache-2.0"
] | alshedivat/cen | cen/regularizers/entropy.py | 3,350 | Python |
from django.contrib import admin
# from .models import related models
from .models import CarMake, CarModel
# Register your models here.
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel.car_makes.through
extra = 3
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
list_display = ['name']
# CarMakeAdmin class with CarModelInline
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
list_display = ['name']
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin) | 26 | 44 | 0.77592 | [
"Apache-2.0"
] | RafaelJon/agfzb-CloudAppDevelopment_Capstone | server/djangoapp/admin.py | 598 | Python |
class Student:
def __init__(self,name):
self.name = name
self.exp = 0
self.lesson = 0
self.AddEXP(10)
def Hello(self):
print('Hello World! My name is {}!'.format(self.name))
def Coding(self):
print('{}: Currently coding...'.format(self.name))
self.exp += 5
self.lesson += 1
def ShowEXP(self):
print('- {} has {} EXP'.format(self.name,self.exp))
print('- Learned {} times'.format(self.lesson))
def AddEXP(self, score):
self.exp += score
class SpecialStudent(Student):
def __init__(self,name,father):
super().__init__(name)
self.father = father
mafia = ['Bill Gates', 'Thomas Edison']
if father in mafia:
self.exp += 100
def AddEXP(self,score):
self.exp += (score * 3)
self.lessson += 1
def AskEXP(self,score=10):
print('*Holding Gun* Gimme some EXP!')
self.AddEXP(score)
print(__name__)
if __name__ == '__studentclass__':
print('===== 1 Jan =====')
| 20.804348 | 58 | 0.61233 | [
"MIT"
] | GemmyTheGeek/GemmyTheNerd | GemmyTheNerd/studentclass.py | 957 | Python |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
| 27.435115 | 152 | 0.571508 | [
"Apache-2.0"
] | LoveCeline/rt-thread | bsp/stm32/libraries/templates/stm32f10x/rtconfig.py | 3,594 | Python |
# pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
__all__ = ['get_model']
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc,
'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc' : get_fcn_voc_resnet50,
'fcn_resnet101_voc' : get_fcn_voc_resnet101,
'fcn_resnet50_ade' : get_fcn_ade_resnet50,
'psp_resnet50_ade' : get_psp_ade_resnet50,
'resnet18_v1b' : resnet18_v1b,
'resnet34_v1b' : resnet34_v1b,
'resnet50_v1b' : resnet50_v1b,
'resnet101_v1b' : resnet101_v1b,
'resnet152_v1b' : resnet152_v1b,
'resnet50_v2a': resnet50_v2a,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x4d' : resnext101_32x4d,
'resnext101_64x4d' : resnext101_64x4d,
'se_resnext50_32x4d' : se_resnext50_32x4d,
'se_resnext101_32x4d' : se_resnext101_32x4d,
'se_resnext101_64x4d' : se_resnext101_64x4d,
'senet_52' : senet_52,
'senet_103' : senet_103,
'senet_154' : senet_154,
'se_resnet18_v1' : se_resnet18_v1,
'se_resnet34_v1' : se_resnet34_v1,
'se_resnet50_v1' : se_resnet50_v1,
'se_resnet101_v1' : se_resnet101_v1,
'se_resnet152_v1' : se_resnet152_v1,
'se_resnet18_v2' : se_resnet18_v2,
'se_resnet34_v2' : se_resnet34_v2,
'se_resnet50_v2' : se_resnet50_v2,
'se_resnet101_v2' : se_resnet101_v2,
'se_resnet152_v2' : se_resnet152_v2,
'darknet53': darknet53,
'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc,
'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco,
}
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
| 38.918919 | 95 | 0.693287 | [
"Apache-2.0"
] | Ellinier/gluon-cv | gluoncv/model_zoo/model_zoo.py | 4,320 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.address import AddressType
from test_framework.util import (
connect_nodes,
assert_equal,
set_node_times,
)
import collections
from decimal import Decimal
import enum
import itertools
import random
Call = enum.Enum("Call", "single multiaddress multiscript")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data address_type rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
assert_equal(self.address["solvable"], True)
assert_equal(self.address["isscript"], self.address_type == AddressType.p2sh_segwit)
assert_equal(self.address["iswitness"], self.address_type == AddressType.bech32)
if self.address["isscript"]:
assert_equal(self.address["embedded"]["isscript"], False)
assert_equal(self.address["embedded"]["iswitness"], True)
if self.call == Call.single:
if self.data == Data.address:
response = self.node.importaddress(address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.node.importpubkey(pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call in (Call.multiaddress, Call.multiscript):
request = {
"scriptPubKey": {
"address": self.address["address"]
} if self.call == Call.multiaddress else self.address["scriptPubKey"],
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}
if self.address_type == AddressType.p2sh_segwit and self.data != Data.address:
# We need solving data when providing a pubkey or privkey as data
request.update({"redeemscript": self.address['embedded']['scriptPubKey']})
response = self.node.importmulti(
requests=[request],
options={"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)},
)
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmation_height=None):
"""Verify that listtransactions/listreceivedbyaddress return expected values."""
txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)
current_height = self.node.getblockcount()
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], 1 + current_height - confirmation_height)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], 1 + current_height - confirmation_height)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, AddressType, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
AMOUNT_DUST = 0.00000546
def get_rand_amount():
r = random.uniform(AMOUNT_DUST, 1)
return Decimal(str(round(r, 8)))
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
self.extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
# Import keys with pruning disabled
self.start_nodes(extra_args=[[]] * self.num_nodes)
for n in self.nodes:
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(
label=variant.label,
address_type=variant.address_type.value,
))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = get_rand_amount()
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
# Generate a block further in the future (past the rescan window).
assert_equal(self.nodes[0].getrawmempool(), [])
set_node_times(
self.nodes,
self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + TIMESTAMP_WINDOW + 1,
)
self.nodes[0].generate(1)
self.sync_all()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run import for variant {}'.format(variant))
expect_rescan = variant.rescan == Rescan.yes
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(variant.timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, variant.confirmation_height)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = get_rand_amount()
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_all()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run check for variant {}'.format(variant))
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, variant.confirmation_height)
if __name__ == "__main__":
ImportRescanTest().main()
| 46.913043 | 126 | 0.66469 | [
"MIT"
] | 124327288/bitcoin | test/functional/wallet_import_rescan.py | 10,790 | Python |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
| 34.122711 | 79 | 0.576176 | [
"BSD-3-Clause"
] | Adirio/pandas | pandas/core/arrays/categorical.py | 87,593 | Python |
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
| 55.913669 | 190 | 0.702651 | [
"MIT"
] | farahmand-m/django-payir | payir/models.py | 7,772 | Python |
class Car:
def needFuel(self):
pass
def getEngineTemperature(self):
pass
def driveTo(self, destination):
pass | 15.7 | 36 | 0.55414 | [
"MIT"
] | TestowanieAutomatyczneUG/laboratorium-9-Sienkowski99 | src/car.py | 157 | Python |
from setuptools import setup
import mp_sync
setup(
name='mp_sync',
version=mp_sync.__version__,
description='Moon Package for Sync repository(google drive, notion, mongodb(local/web), local file)',
url='https://github.com/hopelife/mp_sync',
author='Moon Jung Sam',
author_email='[email protected]',
license='MIT',
packages=['mp_sync'],
# entry_points={'console_scripts': ['mp_sync = mp_sync.__main__:main']},
keywords='scraper',
# python_requires='>=3.8', # Python 3.8.6-32 bit
# install_requires=[ # 패키지 사용을 위해 필요한 추가 설치 패키지
# 'selenium',
# ],
# zip_safe=False
)
| 30 | 105 | 0.655556 | [
"MIT"
] | hopelife/mp_sync | setup.py | 666 | Python |
import string
import sbxor
"""
Detect single-character XOR
One of the 60-character strings in this file (4.txt) has been encrypted by single-character XOR.
Find it.
"""
if __name__ == "__main__":
with open("data/4.txt", "r") as data_file:
data = data_file.read().split("\n")
candidates = []
for line in data[:]:
line_byte = bytearray.fromhex(line)
sb = sbxor.solve(line_byte)
if len(sb) != 0:
candidates.append([line_byte, sb])
print(f"{len(candidates)} candidate(s) found for single-byte xor\n")
for candidate in candidates:
print(f"Ciphertext: {candidate[0]}")
print("Possible solution(s):")
for b in candidate[1]:
print(f"Key: {b[0]}")
print(f"Plaintext: {repr(b[1])}")
| 26.5 | 96 | 0.6 | [
"MIT"
] | elevenchars/cryptopals | set1/detectsb.py | 795 | Python |
#!/usr/bin/env python
import csv
import os
import argparse
import dateutil.parser
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, required=True,
help="name of the data directory")
args = parser.parse_args()
return args.dir
def convert_ts(ts_str):
return dateutil.parser.parse(ts_str).timestamp()
def get_data(data, fname):
with open(fname, newline='') as csvfile:
content = csv.reader(csvfile, delimiter=',', quotechar='"')
for line in content:
if len(line) < 5:
continue
try:
ts = convert_ts(line[2])
adm = [line[1]]
if line[0] != '':
adm.append(line[0])
data.append(
{
'date': ts,
'adm': adm,
'infected': int(line[3]),
'deaths': int(line[4]),
'recovered': int(line[5]),
'sex': 'NaN', # Not sure why this is needed????
# 'source': 'JHU',
'source': ObjectId("5e75f8d7745bde4a48972b42")
})
except ValueError as ve:
# If there is a problem e.g. converting the ts
# just go on.
pass
def convert2json(dir_name):
data = []
for fname in os.listdir(dir_name):
get_data(data, os.path.join(dir_name, fname))
return data
def main():
dir_name = parse_args()
data = convert2json(dir_name)
print(json.dumps(data))
if __name__ == '__main__':
main()
| 27.619048 | 71 | 0.486782 | [
"MIT"
] | florath/jhu2db | jhu2json.py | 1,740 | Python |
import asyncio
from aiotasks import build_manager
loop = asyncio.get_event_loop()
loop.set_debug(True)
manager = build_manager(loop=loop)
@manager.task()
async def task_01(num):
print("Task 01 starting: {}".format(num))
await asyncio.sleep(2, loop=loop)
print("Task 01 stopping")
return "a"
async def main_async():
manager.run()
async with task_01.wait(1) as f:
print(f)
await manager.wait(5)
manager.stop()
if __name__ == '__main__':
loop.run_until_complete(main_async())
| 15.472222 | 45 | 0.644524 | [
"BSD-3-Clause"
] | cr0hn/aiotasks | examples_old/memory_backend/basic_wait.py | 557 | Python |
import random
import time
class unit :
def __init__(self, HP, Atk) :
self.HP = HP
self.Atk = Atk
def DoAtk(self, OtherUnit) :
self.DoDmg(self.Atk, OtherUnit)
def DoDmg(self, dmg, OtherUnit) :
OtherUnit.HP-=dmg
class hero(unit) :
def __init__(self, HP, Atk) :
super(hero, self).__init__(HP, Atk)
def Heal(self) :
self.HP+=30
class moster(unit) :
def __init__(self, HP, Atk) :
super(moster, self).__init__(HP, Atk)
def HardAtk(self, OtherUnit) :
self.DoDmg(self.Atk + 10, OtherUnit)
while True :
print("initializing hero...")
time.sleep(1)
Hero = hero(random.randint(30,50), random.randint(10,15))
print("A hero is here now")
print("HP:" + str(Hero.HP))
print("Atk:" + str(Hero.Atk) + "\n")
time.sleep(0.5)
print("initializing moster...")
time.sleep(1)
Moster = moster(random.randint(20,30), random.randint(5,10))
print("A moster is here now")
print("HP:" + str(Moster.HP))
print("Atk:" + str(Moster.Atk) + "\n")
###
time.sleep(1.5)
while Hero.HP > 0 and Moster.HP > 0 :
print("Hero turn")
time.sleep(1.5)
if Hero.HP < 10 :
Hero.Heal()
print("Hero use heal")
else :
Hero.DoAtk(Moster)
print("Hero atk")
print("Hero HP:" + str(Hero.HP))
print("Moster HP:" + str(Moster.HP) + "\n")
time.sleep(1.5)
if Moster.HP <= 0:
print("Hero Win")
break
###
print("Moster turn")
time.sleep(1.5)
if Moster.HP < 5 :
Moster.HardAtk(Hero)
print("Moster use Hard Atk")
else :
Moster.DoAtk(Hero)
print("Moster atk")
print("Hero HP:" + str(Hero.HP))
print("Moster HP:" + str(Moster.HP) + "\n")
if Moster.HP <= 0:
print("Hero Win")
break
if Hero.HP <= 0:
print("Moster Win")
break
time.sleep(1.5) | 29.550725 | 64 | 0.518391 | [
"MPL-2.0"
] | ZoneTwelve/CE3058-Network-and-Database-Programming | week-3/HeroAndMonster.py | 2,039 | Python |
"""The tests for hls streams."""
from datetime import timedelta
from unittest.mock import patch
from urllib.parse import urlparse
import av
from homeassistant.components.stream import request_stream
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video, preload_stream
async def test_hls_stream(hass, hass_client, stream_worker_sync):
"""
Test hls stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Fetch init
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
init_url = playlist_url + "/init.mp4"
init_response = await http_client.get(init_url)
assert init_response.status == 200
# Fetch segment
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + "/" + playlist.splitlines()[-1]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
stream_worker_sync.resume()
# Stop stream, if it hasn't quit already
stream.stop()
# Ensure playlist not accessible after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_timeout(hass, hass_client, stream_worker_sync):
"""Test hls stream timeout."""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
# Fetch again to reset timer
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
stream_worker_sync.resume()
# Wait 5 minutes
future = dt_util.utcnow() + timedelta(minutes=5)
async_fire_time_changed(hass, future)
# Ensure playlist not accessible
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_ended(hass, stream_worker_sync):
"""Test hls stream packets ended."""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
# Request stream
request_stream(hass, source)
# Run it dead
while True:
segment = await track.recv()
if segment is None:
break
segments = segment.sequence
# Allow worker to finalize once enough of the stream is been consumed
if segments > 1:
stream_worker_sync.resume()
assert segments > 1
assert not track.get_segment()
# Stop stream, if it hasn't quit already
stream.stop()
async def test_stream_keepalive(hass):
"""Test hls stream retries the stream when keepalive=True."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = "test_stream_keepalive_source"
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
track.num_segments = 2
cur_time = 0
def time_side_effect():
nonlocal cur_time
if cur_time >= 80:
stream.keepalive = False # Thread should exit and be joinable.
cur_time += 40
return cur_time
with patch("av.open") as av_open, patch(
"homeassistant.components.stream.worker.time"
) as mock_time, patch(
"homeassistant.components.stream.worker.STREAM_RESTART_INCREMENT", 0
):
av_open.side_effect = av.error.InvalidDataError(-2, "error")
mock_time.time.side_effect = time_side_effect
# Request stream
request_stream(hass, source, keepalive=True)
stream._thread.join()
stream._thread = None
assert av_open.call_count == 2
# Stop stream, if it hasn't quit already
stream.stop()
| 29.616279 | 78 | 0.698469 | [
"Apache-2.0"
] | BoresXP/core | tests/components/stream/test_hls.py | 5,094 | Python |
# -*- coding: utf-8 -*-
## @package pycv_tutorial.color_space
#
# 画像処理: 色空間の変換
# @author tody
# @date 2016/06/27
import cv2
import matplotlib.pyplot as plt
# RGB画像の表示
def showImageRGB(image_file):
image_bgr = cv2.imread(image_file)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.title('RGB')
plt.imshow(image_rgb)
plt.axis('off')
plt.show()
# グレースケール画像の表示
def showImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
plt.title('Gray')
plt.gray()
plt.imshow(image_gray)
plt.axis('off')
plt.show()
# HSVチャンネルの表示
def showImageHSV(image_file):
image_bgr = cv2.imread(image_file)
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
H = image_hsv[:, :, 0]
S = image_hsv[:, :, 1]
V = image_hsv[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('Hue')
plt.gray()
plt.imshow(H)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Saturation')
plt.gray()
plt.imshow(S)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Value')
plt.gray()
plt.imshow(V)
plt.axis('off')
plt.show()
# Labチャンネルの表示
def showImageLab(image_file):
image_bgr = cv2.imread(image_file)
image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)
L = image_Lab[:, :, 0]
a = image_Lab[:, :, 1]
b = image_Lab[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('L')
plt.gray()
plt.imshow(L)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('a')
plt.gray()
plt.imshow(a)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('b')
plt.gray()
plt.imshow(b)
plt.axis('off')
plt.show()
if __name__ == '__main__':
image_file = "images/peppers.png"
showImageRGB(image_file)
showImageGray(image_file)
showImageHSV(image_file)
showImageLab(image_file) | 19.123711 | 58 | 0.60593 | [
"MIT"
] | OYukiya/PyIntroduction | opencv/pycv_tutorial/color_space.py | 1,941 | Python |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from stock import *
import partner
import product
import procurement
import report
import wizard
import res_config
import controllers
| 34.84375 | 78 | 0.627803 | [
"Apache-2.0"
] | diogocs1/comps | web/addons/stock/__init__.py | 1,115 | Python |
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
| 33.975104 | 97 | 0.578163 | [
"Apache-2.0"
] | kiminh/RLs | rls/utils/build_networks.py | 16,408 | Python |
from decimal import Decimal
from django.db import models
from polymorphic.models import PolymorphicModel
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from auction.utils.loader import get_model_string
from django.conf import settings
class CurrencyField(models.DecimalField):
def to_python(self, value):
try:
return super(CurrencyField, self).to_python(value=value).quantize(Decimal("0.01"))
except AttributeError:
return None
class BaseAuction(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Auction name'))
slug = models.SlugField(unique=True, verbose_name=_('Slug'))
start_date = models.DateTimeField(verbose_name=_('Start date'))
end_date = models.DateTimeField(verbose_name=_('End date'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
total_bids = models.IntegerField(default=0, verbose_name=_('Total bids'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction')
verbose_name_plural = _('Auctions')
def __unicode__(self):
return self.name
class BaseBidBasket(models.Model):
"""
This models functions similarly to a shopping cart, except it expects a logged in user.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('User'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid basket')
verbose_name_plural = _('Bid baskets')
def add_bid(self, lot, amount):
from auction.models import BidItem
self.save()
if not lot.is_biddable:
return False
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
from auction.models.lot import Lot
item,created = BidItem.objects.get_or_create(bid_basket=self,
content_type=ContentType.objects.get_for_model(Lot),
lot_id=lot.pk)
if item:
item.amount=amount
item.save()
return item
def update_bid(self, bid_basket_item_id, amount):
"""
Update amount of bid. Delete bid if amount is 0.
"""
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
if amount == 0:
bid_basket_item.delete()
else:
bid_basket_item.amount = amount
bid_basket_item.save()
self.save()
return bid_basket_item
def delete_bid(self, bid_basket_item_id):
"""
Delete a single item from bid basket.
"""
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
bid_basket_item.delete()
return bid_basket_item
def empty(self):
"""
Remove all bids from bid basket.
"""
if self.pk:
bids = self.bids.all()
for bid in bids:
if not bid.is_locked():
bid.delete()
@property
def bids(self):
"""
Used as accessor for abstract related (BaseBidItem.bid_items).
If you override BaseBidItem and use a label other than "auction"
you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME.
Example: foo_biditem_related
(where your label is "foo" and your model is "BidItem")
"""
bids = getattr(settings, 'AUCTION_BIDBASKET_BIDS_RELATED_NAME',
'auction_biditem_related')
return getattr(self, bids)
@property
def total_bids(self):
"""
Returns total bids in basket.
"""
return len(self.bids.all())
class BaseAuctionLot(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Lot name'))
slug = models.SlugField(auto_created=True, verbose_name=_('Slug'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
is_biddable = models.BooleanField(default=False, verbose_name=_('Is biddable?'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_lots",
verbose_name=_('Content type'))
object_id = models.PositiveIntegerField(verbose_name=_('Object ID'))
content_object = GenericForeignKey('content_type', 'object_id')
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction lot')
verbose_name_plural = _('Auction lots')
def __unicode__(self):
return self.name
@property
def is_locked(self):
"""
This property is meant to be overwritten with your own logic. Bid baskets
check this method to find out if a bid can be manipulated.
"""
import auction.utils.generic
now = auction.utils.generic.get_current_time()
return self.content_object.end_date <= now
class BaseBidItem(models.Model):
"""
This is a holder for total number of bids and a pointer to
item being bid on.
"""
bid_basket = models.ForeignKey(get_model_string("BidBasket"), on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Bid basket'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Content type'))
lot_id = models.PositiveIntegerField(verbose_name=_('Lot ID'))
lot_object = GenericForeignKey('content_type', 'lot_id')
amount = CurrencyField(max_digits=10, decimal_places=2, null=True, blank=True, verbose_name=_('Amount'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid item')
verbose_name_plural = _('Bid items')
def is_locked(self):
return self.lot.is_locked
@property
def lot(self):
return self.lot_object | 36.657895 | 169 | 0.649103 | [
"MIT"
] | JohnRomanski/django-auction | auction/models/bases.py | 6,965 | Python |
# __init.py
from .home import Home
from .alarm import Alarm
from .light import Light
from .lock import Lock | 18.166667 | 24 | 0.770642 | [
"MIT"
] | abkraynak/smart-home | home/__init__.py | 109 | Python |
import msgpack
import zlib
import numpy as np
import helper_functions as hf
import datetime_helper as dh
def strip_data_by_time(t_data, data, t_min, t_max):
data = np.array([s for s, t in zip(data, t_data) if t >= t_min and t <= t_max])
t_data = np.array([t for t in t_data if t >= t_min and t <= t_max])
return t_data, data
def load_example_data(filename_augmento_topics,
filename_augmento_data,
filename_bitmex_data,
datetime_start=None,
datetime_end=None):
# load the topics
with open(filename_augmento_topics, "rb") as f:
temp = msgpack.unpackb(zlib.decompress(f.read()), encoding='utf-8')
augmento_topics = {int(k) : v for k, v in temp.items()}
augmento_topics_inv = {v : int(k) for k, v in temp.items()}
# load the augmento data
with open(filename_augmento_data, "rb") as f:
temp = msgpack.unpackb(zlib.decompress(f.read()), encoding='utf-8')
t_aug_data = np.array([el["t_epoch"] for el in temp], dtype=np.float64)
aug_data = np.array([el["counts"] for el in temp], dtype=np.int32)
# load the price data
with open(filename_bitmex_data, "rb") as f:
temp = msgpack.unpackb(zlib.decompress(f.read()), encoding='utf-8')
t_price_data = np.array([el["t_epoch"] for el in temp], dtype=np.float64)
price_data = np.array([el["open"] for el in temp], dtype=np.float64)
# set the start and end times if they are specified
if datetime_start != None:
t_start = dh.datetime_to_epoch(datetime_start)
else:
t_start = max(np.min(t_aug_data), np.min(t_price_data))
if datetime_end != None:
t_end = dh.datetime_to_epoch(datetime_end)
else:
t_end = min(np.max(t_aug_data), np.max(t_price_data))
# strip the sentiments and prices outside the shared time range
t_aug_data, aug_data = strip_data_by_time(t_aug_data, aug_data, t_start, t_end)
t_price_data, price_data = strip_data_by_time(t_price_data, price_data, t_start, t_end)
return augmento_topics, augmento_topics_inv, t_aug_data, aug_data, t_price_data, price_data
| 39.461538 | 92 | 0.705653 | [
"MIT"
] | ArthurBernard/quant-reseach | src/example_helper.py | 2,052 | Python |
#!/usr/bin/env python
#
# $Id$
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print detailed information about a process.
Author: Giampaolo Rodola' <[email protected]>
"""
import os
import datetime
import socket
import sys
import psutil
def convert_bytes(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def print_(a, b):
if sys.stdout.isatty() and os.name == 'posix':
fmt = '\x1b[1;32m%-17s\x1b[0m %s' %(a, b)
else:
fmt = '%-15s %s' %(a, b)
# python 2/3 compatibility layer
sys.stdout.write(fmt + '\n')
sys.stdout.flush()
def run(pid):
ACCESS_DENIED = ''
try:
p = psutil.Process(pid)
pinfo = p.as_dict(ad_value=ACCESS_DENIED)
except psutil.NoSuchProcess:
sys.exit(str(sys.exc_info()[1]))
try:
if p.parent:
parent = '(%s)' % p.parent.name
else:
parent = ''
except psutil.Error:
parent = ''
started = datetime.datetime.fromtimestamp(pinfo['create_time']
).strftime('%Y-%M-%d %H:%M')
io = pinfo.get('io_counters', None)
mem = '%s%% (resident=%s, virtual=%s) ' % (
round(pinfo['memory_percent'], 1),
convert_bytes(pinfo['memory_info'].rss),
convert_bytes(pinfo['memory_info'].vms))
children = p.get_children()
print_('pid', pinfo['pid'])
print_('name', pinfo['name'])
print_('exe', pinfo['exe'])
print_('parent', '%s %s' % (pinfo['ppid'], parent))
print_('cmdline', ' '.join(pinfo['cmdline']))
print_('started', started)
print_('user', pinfo['username'])
if os.name == 'posix':
print_('uids', 'real=%s, effective=%s, saved=%s' % pinfo['uids'])
print_('gids', 'real=%s, effective=%s, saved=%s' % pinfo['gids'])
print_('terminal', pinfo['terminal'] or '')
if hasattr(p, 'getcwd'):
print_('cwd', pinfo['cwd'])
print_('memory', mem)
print_('cpu', '%s%% (user=%s, system=%s)' % (pinfo['cpu_percent'],
pinfo['cpu_times'].user,
pinfo['cpu_times'].system))
print_('status', pinfo['status'])
print_('niceness', pinfo['nice'])
print_('num threads', pinfo['num_threads'])
if io != ACCESS_DENIED:
print_('I/O', 'bytes-read=%s, bytes-written=%s' % \
(convert_bytes(io.read_bytes),
convert_bytes(io.write_bytes)))
if children:
print_('children', '')
for child in children:
print_('', 'pid=%s name=%s' % (child.pid, child.name))
if pinfo['open_files'] != ACCESS_DENIED:
print_('open files', '')
for file in pinfo['open_files']:
print_('', 'fd=%s %s ' % (file.fd, file.path))
if pinfo['threads']:
print_('running threads', '')
for thread in pinfo['threads']:
print_('', 'id=%s, user-time=%s, sys-time=%s' \
% (thread.id, thread.user_time, thread.system_time))
if pinfo['connections'] != ACCESS_DENIED:
print_('open connections', '')
for conn in pinfo['connections']:
if conn.type == socket.SOCK_STREAM:
type = 'TCP'
elif conn.type == socket.SOCK_DGRAM:
type = 'UDP'
else:
type = 'UNIX'
lip, lport = conn.local_address
if not conn.remote_address:
rip, rport = '*', '*'
else:
rip, rport = conn.remote_address
print_('', '%s:%s -> %s:%s type=%s status=%s' \
% (lip, lport, rip, rport, type, conn.status))
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
sys.exit(run(os.getpid()))
elif len(argv) == 2:
sys.exit(run(int(argv[1])))
else:
sys.exit('usage: %s [pid]' % __file__)
if __name__ == '__main__':
sys.exit(main())
| 33.422222 | 79 | 0.505098 | [
"BSD-3-Clause"
] | hybridlogic/psutil | examples/process_detail.py | 4,512 | Python |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
| 36.509506 | 108 | 0.614559 | [
"MIT"
] | cisco-en-programmability/dnacentersdk | dnacentersdk/api/v1_3_1/clients.py | 9,602 | Python |
#!/usr/bin/python
"""
Sample program to add SSO options to a Manager/Pinbox.
:Copyright:
Copyright 2014 Lastline, Inc. All Rights Reserved.
Created on: Dec 8, 2014 by Lukyan Hritsko
"""
import requests
import argparse
import ConfigParser
import os.path
import logging
import re
from lxml import etree
from json import dumps
from urlparse import urlparse
from papi_client import papi_client
from papi_client import loader
class MissingValue(Exception):
pass
class InvalidXML(Exception):
pass
class InvalidFile(Exception):
pass
class InvalidURL(Exception):
pass
class MetadataExtractor(object):
XPATHS = {
'entity_descriptor': '/md:EntityDescriptor',
'idp_sso_descriptor': '/md:EntityDescriptor/md:IDPSSODescriptor'
}
NAMESPACES = {
'md': 'urn:oasis:names:tc:SAML:2.0:metadata',
'ds': 'http://www.w3.org/2000/09/xmldsig#'
}
def __init__(self, xml):
self.entity_id = None
self.x509_cert = None
self.sso_service_url = None
self.idp_binding = None
self.name_id_format = None
self.parse_values(xml)
def get_values_as_dict(self):
return {
'entity_id': self.entity_id,
'x509_cert': self.x509_cert,
'sso_service_url': self.sso_service_url,
'idp_binding': self.idp_binding,
'name_id_format': self.name_id_format,
}
def parse_entity_id(self, xml_root):
try:
entity_descriptor = xml_root.xpath(MetadataExtractor.XPATHS['entity_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
self.entity_id = entity_descriptor.attrib['entityID']
except (KeyError, IndexError):
raise MissingValue("Unable to parse entityID")
def parse_x509_cert(self, key_desc_node):
xpath_from_node = 'ds:KeyInfo/ds:X509Data/ds:X509Certificate'
try:
x509_node = key_desc_node.xpath(xpath_from_node,
namespaces=MetadataExtractor.NAMESPACES)[0]
self.x509_cert = x509_node.text
if not self.x509_cert:
raise MissingValue
except (IndexError, MissingValue):
raise MissingValue("Unable to parse x509 certificate")
def parse_idp_binding_and_location(self, sso_node):
try:
attributes = sso_node.attrib
self.sso_service_url = attributes['Location']
self.idp_binding = attributes['Binding']
except (KeyError) as e:
raise MissingValue("Unable to parse %s", e.message)
def parse_name_id_format(self, name_id_node):
self.name_id_format = name_id_node.text
if not self.name_id_format:
raise MissingValue("Unable to parse name id format")
def extract_tag(self, raw_tag):
return raw_tag[raw_tag.find('}') + 1:]
def get_parser_dispatcher(self):
return {
'KeyDescriptor': self.parse_x509_cert,
'NameIDFormat': self.parse_name_id_format,
'SingleSignOnService': self.parse_idp_binding_and_location
}
def parse_values(self, xml):
try:
root = etree.fromstring(xml)
except (Exception) as e:
raise InvalidXML("Unable to load XML: %s" % e.message)
parser_dispatcher = self.get_parser_dispatcher()
self.parse_entity_id(root)
try:
idp_sso_desc = root.xpath(MetadataExtractor.XPATHS['idp_sso_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
except (IndexError) as e:
raise InvalidXML("Unable to parse IdP SSO Descriptor Node")
for node in idp_sso_desc.getchildren():
tag = self.extract_tag(node.tag)
parser = parser_dispatcher[tag]
parser(node)
def xml_read_from_file(file_name):
xml_fn = os.path.expanduser(file_name)
if not os.path.isfile(xml_fn):
raise InvalidFile("Specified file: '%s' not found" % xml_fn)
with open(xml_fn, 'r') as fp:
return fp.read()
def xml_read_from_url(url, skip_validation=False):
try:
req = requests.get(url, verify=(not skip_validation))
req.raise_for_status()
if not req.content:
raise Exception
except Exception:
raise InvalidURL("Unable to extract metadata from URL")
return req.content
def get_config_parser(file_name):
config_fn = os.path.expanduser(file_name)
if not os.path.isfile(config_fn):
raise InvalidFile("Specified config file: '%s' not found" % config_fn)
config_parser = ConfigParser.ConfigParser()
config_parser.read(config_fn)
return config_parser
def get_logger():
# Python logger...
logger = logging.getLogger()
sh = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
return logger
def get_papi_client(config_parser, logger):
base_client = papi_client.PapiClientFactory.client_from_config(
config_parser,
'papi',
logger)
client = loader.PapiClientCollection(base_client=base_client,
conf=config_parser,
logger=logger)
client.load_view("appliance_mgmt")
return client
class SAMLApplianceConfiguration(object):
def __init__(
self, appliance_uuid, config_index, metadata=None, display_name=None):
self._appliance_uuid = appliance_uuid
self._config_index = config_index
self._metadata = metadata
self._display_name = display_name
def _get_config_settings(self, is_add=True):
sso_config_key = "sso_saml2_config%d" % self._config_index
sso_enabled_key = "sso_saml2_enabled%d" % self._config_index
if is_add:
sso_config_settings = self._metadata.get_values_as_dict()
sso_config_settings['display_name'] = self._display_name
else:
sso_config_settings = {}
return {
sso_enabled_key: is_add,
sso_config_key: dumps(sso_config_settings)
}
def add_sso(self, client):
settings = self._get_config_settings()
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def delete_sso(self, client):
settings = self._get_config_settings(is_add=False)
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def url_or_file(string):
if re.match(r'https?://', string, re.IGNORECASE):
return {'url': string}
else:
return {'file': string}
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode",
help="Add or delete a config")
# Parser for add mode
add_parser = subparsers.add_parser('add')
add_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
add_parser.add_argument("url_or_file",
type=url_or_file,
help="Specify file location of metadata or specify "
"a url to automatically parse information.")
add_parser.add_argument("display_name",
nargs="?",
default=None,
help="Specify a namne that will be displayed in "
"the UI.")
add_parser.add_argument("-n",
"--index",
type=int,
dest="config_index",
default=0,
choices=xrange(0, 4),
help="Specify configuration index for single "
"sign on. This is used when configuring "
"multiple SSO options, i.e., first config "
"is 0, second is 1, and so on...")
add_parser.add_argument("--skip-verify-ssl",
default=False,
action="store_true",
help="Skips validation of SSL when retrieving "
"metadata from a URL")
add_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
# Parser for delete mode
delete_parser = subparsers.add_parser("delete")
delete_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
delete_parser.add_argument("config_index",
type=int,
choices=xrange(0, 4),
help="Specify which configuration to remove.")
delete_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
args = parser.parse_args()
logger = get_logger()
try:
config_parser = get_config_parser(args.config)
client = get_papi_client(config_parser, logger)
if args.mode == "delete":
saml_configuration = SAMLApplianceConfiguration(
args.appliance_uuid, args.config_index)
saml_configuration.delete_sso(client)
return 0
if args.url_or_file.get('url', None):
xml_content = xml_read_from_url(args.url_or_file['url'],
args.skip_verify_ssl)
else:
xml_content = xml_read_from_file(args.url_or_file['file'])
metadata = MetadataExtractor(xml_content)
# If no display name exists, let's use the FQDN of the IdP
display_name = args.display_name
if not display_name:
display_name = urlparse(metadata.entity_id).netloc # pylint: disable=E1101
logger.info("Adding SSO configuration (index %d) for appliance %s" %
(args.config_index, args.appliance_uuid))
saml_configuration = SAMLApplianceConfiguration(args.appliance_uuid,
args.config_index,
metadata=metadata,
display_name=display_name)
saml_configuration.add_sso(client)
except (MissingValue, InvalidXML, InvalidFile, InvalidURL) as e:
logger.error(e.message)
return 1
return 0
if __name__ == "__main__":
main()
| 33.886154 | 93 | 0.579769 | [
"Apache-2.0"
] | YmonOy/lastline_api | examples/add_saml_sso_from_metadata.py | 11,013 | Python |
"""
Forgot Password Web Controller
"""
# Standard Library
import os
# Third Party Library
from django.views import View
from django.shortcuts import render
from django.utils.translation import gettext as _
# Local Library
from app.modules.core.context import Context
from app.modules.entity.option_entity import OptionEntity
from app.modules.core.decorators import redirect_if_authenticated
from app.modules.core.decorators import redirect_if_not_installed
class ForgotPassword(View):
template_name = 'templates/forgot_password.html'
__context = None
__option_entity = None
__correlation_id = None
@redirect_if_not_installed
@redirect_if_authenticated
def get(self, request):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__context = Context()
self.__option_entity = OptionEntity()
self.__context.autoload_options()
self.__context.push({
"page_title": _("Forgot Password · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback"))
})
return render(request, self.template_name, self.__context.get())
| 28.878049 | 121 | 0.734797 | [
"BSD-3-Clause"
] | arxcdr/silverback | app/controllers/web/forgot_password.py | 1,185 | Python |
import os
import dj_database_url
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
MODE=config("MODE", default="dev")
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
UPLOADCARE = {
'pub_key': config('pub_key'),
'secret': config('secret'),
}
# Application definition
INSTALLED_APPS = [
'pyuploadcare.dj',
'gram.apps.GramConfig',
'tinymce',
'bootstrap4',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
LOGIN_REDIRECT_URL = '/home'
# AUTH_PROFILE_MODULE = 'accounts.UserProfile'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DBNAME'),
'USER': config('DBUSER'),
'PASSWORD': config('DBPASS')
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
db_from_env=dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static')]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATICFILES_STORAGE='whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
# Email configurations
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD') | 27.821918 | 91 | 0.693747 | [
"MIT"
] | Brayonski/Instagram-1 | instagram/settings.py | 4,062 | Python |
from django import forms
from .models import Image, Comments
#......
class NewStoryForm(forms.ModelForm):
class Meta:
model = Image
fields = ('image', 'image_caption')
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ('comment',)
| 23 | 43 | 0.64214 | [
"MIT"
] | cossie14/Slygram | instagram/forms.py | 299 | Python |
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
| 33.97561 | 119 | 0.603494 | [
"Apache-2.0"
] | shuichiro-makigaki/yandeley-python-sdk | yandeley/models/files.py | 4,179 | Python |
import unittest
from pyjsg.validate_json import JSGPython
class MemberExampleTestCase(unittest.TestCase):
def test1(self):
x = JSGPython('''doc {
last_name : @string, # exactly one last name of type string
first_name : @string+ # array or one or more first names
age : @int?, # optional age of type int
weight : @number* # array of zero or more weights
}
''')
rslts = x.conforms('''
{ "last_name" : "snooter",
"first_name" : ["grunt", "peter"],
"weight" : []
}''')
self.assertTrue(rslts.success)
if __name__ == '__main__':
unittest.main()
| 28.4 | 77 | 0.533803 | [
"CC0-1.0"
] | hsolbrig/pyjsg | tests/test_issues/test_member_example.py | 710 | Python |
#!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import sys, operator, argparse, os
from collections import defaultdict
# This script reads 'ctm-edits' file format that is produced by get_ctm_edits.py
# and modified by modify_ctm_edits.py and taint_ctm_edits.py Its function is to
# produce a segmentation and text from the ctm-edits input.
# The ctm-edits file format that this script expects is as follows
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> ['tainted']
# [note: file-id is really utterance-id at this point].
parser = argparse.ArgumentParser(
description = "This program produces segmentation and text information "
"based on reading ctm-edits input format which is produced by "
"steps/cleanup/internal/get_ctm_edits.py, steps/cleanup/internal/modify_ctm_edits.py and "
"steps/cleanup/internal/taint_ctm_edits.py.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--min-segment-length", type = float, default = 0.5,
help = "Minimum allowed segment length (in seconds) for any "
"segment; shorter segments than this will be discarded.")
parser.add_argument("--min-new-segment-length", type = float, default = 1.0,
help = "Minimum allowed segment length (in seconds) for newly "
"created segments (i.e. not identical to the input utterances). "
"Expected to be >= --min-segment-length.")
parser.add_argument("--frame-length", type = float, default = 0.01,
help = "This only affects rounding of the output times; they will "
"be constrained to multiples of this value.")
parser.add_argument("--max-tainted-length", type = float, default = 0.05,
help = "Maximum allowed length of any 'tainted' line. Note: "
"'tainted' lines may only appear at the boundary of a "
"segment")
parser.add_argument("--max-edge-silence-length", type = float, default = 0.5,
help = "Maximum allowed length of silence if it appears at the "
"edge of a segment (will be truncated). This rule is "
"relaxed if such truncation would take a segment below "
"the --min-segment-length or --min-new-segment-length.")
parser.add_argument("--max-edge-non-scored-length", type = float, default = 0.5,
help = "Maximum allowed length of a non-scored word (noise, cough, etc.) "
"if it appears at the edge of a segment (will be truncated). "
"This rule is relaxed if such truncation would take a "
"segment below the --min-segment-length.")
parser.add_argument("--max-internal-silence-length", type = float, default = 2.0,
help = "Maximum allowed length of silence if it appears inside a segment "
"(will cause the segment to be split).")
parser.add_argument("--max-internal-non-scored-length", type = float, default = 2.0,
help = "Maximum allowed length of a non-scored word (noise, etc.) if "
"it appears inside a segment (will cause the segment to be "
"split). Note: reference words which are real words but OOV "
"are not included in this category.")
parser.add_argument("--unk-padding", type = float, default = 0.05,
help = "Amount of padding with <unk> that we do if a segment boundary is "
"next to errors (ins, del, sub). That is, we add this amount of "
"time to the segment and add the <unk> word to cover the acoustics. "
"If nonzero, the --oov-symbol-file option must be supplied.")
parser.add_argument("--max-junk-proportion", type = float, default = 0.1,
help = "Maximum proportion of the time of the segment that may "
"consist of potentially bad data, in which we include 'tainted' lines of "
"the ctm-edits input and unk-padding.")
parser.add_argument("--max-deleted-words-kept-when-merging", type = str, default = 1,
help = "When merging segments that are found to be overlapping or "
"adjacent after all other processing, keep in the transcript the "
"reference words that were deleted between the segments [if any] "
"as long as there were no more than this many reference words. "
"Setting this to zero will mean that any reference words that "
"were deleted between the segments we're about to reattach will "
"not appear in the generated transcript (so we'll match the hyp).")
parser.add_argument("--oov-symbol-file", type = str, default = None,
help = "Filename of file such as data/lang/oov.txt which contains "
"the text form of the OOV word, normally '<unk>'. Supplied as "
"a file to avoid complications with escaping. Necessary if "
"the --unk-padding option has a nonzero value (which it does "
"by default.")
parser.add_argument("--ctm-edits-out", type = str,
help = "Filename to output an extended version of the ctm-edits format "
"with segment start and end points noted. This file is intended to be "
"read by humans; there are currently no scripts that will read it.")
parser.add_argument("--word-stats-out", type = str,
help = "Filename for output of word-level stats, of the form "
"'<word> <bad-proportion> <total-count-in-ref>', e.g. 'hello 0.12 12408', "
"where the <bad-proportion> is the proportion of the time that this "
"reference word does not make it into a segment. It can help reveal words "
"that have problematic pronunciations or are associated with "
"transcription errors.")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/internal/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("text_out", metavar = "<text-out>",
help = "Filename of output text file (same format as data/train/text, i.e. "
"<new-utterance-id> <word1> <word2> ... <wordN>")
parser.add_argument("segments_out", metavar = "<segments-out>",
help = "Filename of output segments. This has the same format as data/train/segments, "
"but instead of <recording-id>, the second field is the old utterance-id, i.e "
"<new-utterance-id> <old-utterance-id> <start-time> <end-time>")
args = parser.parse_args()
def IsTainted(split_line_of_utt):
return len(split_line_of_utt) > 8 and split_line_of_utt[8] == 'tainted'
# This function returns a list of pairs (start-index, end-index) representing
# the cores of segments (so if a pair is (s, e), then the core of a segment
# would span (s, s+1, ... e-1).
#
# By the 'core of a segment', we mean a sequence of ctm-edits lines including at
# least one 'cor' line and a contiguous sequence of other lines of the type
# 'cor', 'fix' and 'sil' that must be not tainted. The segment core excludes
# any tainted lines at the edge of a segment, which will be added later.
#
# We only initiate segments when it contains something correct and not realized
# as unk (i.e. ref==hyp); and we extend it with anything that is 'sil' or 'fix'
# or 'cor' that is not tainted. Contiguous regions of 'true' in the resulting
# boolean array will then become the cores of prototype segments, and we'll add
# any adjacent tainted words (or parts of them).
def ComputeSegmentCores(split_lines_of_utt):
num_lines = len(split_lines_of_utt)
line_is_in_segment_core = [ False] * num_lines
for i in range(num_lines):
if split_lines_of_utt[i][7] == 'cor' and \
split_lines_of_utt[i][4] == split_lines_of_utt[i][6]:
line_is_in_segment_core[i] = True
# extend each proto-segment forwards as far as we can:
for i in range(1, num_lines):
if line_is_in_segment_core[i-1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
# extend each proto-segment backwards as far as we can:
for i in reversed(range(0, num_lines - 1)):
if line_is_in_segment_core[i+1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
segment_ranges = []
cur_segment_start = None
for i in range(0, num_lines):
if line_is_in_segment_core[i]:
if cur_segment_start == None:
cur_segment_start = i
else:
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, i) )
cur_segment_start = None
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, num_lines) )
return segment_ranges
class Segment:
def __init__(self, split_lines_of_utt, start_index, end_index, debug_str = None):
self.split_lines_of_utt = split_lines_of_utt
# start_index is the index of the first line that appears in this
# segment, and end_index is one past the last line. This does not
# include unk-padding.
self.start_index = start_index
self.end_index = end_index
# If the following values are nonzero, then when we create the segment
# we will add <unk> at the start and end of the segment [representing
# partial words], with this amount of additional audio.
self.start_unk_padding = 0.0
self.end_unk_padding = 0.0
# debug_str keeps track of the 'core' of the segment.
if debug_str == None:
debug_str = 'core-start={0},core-end={1}'.format(start_index,end_index)
self.debug_str = debug_str
# This gives the proportion of the time of the first line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
# proportion of the time.
self.start_keep_proportion = 1.0
# This gives the proportion of the time of the last line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
# proportion of the time.
self.end_keep_proportion = 1.0
# This is stage 1 of segment processing (after creating the boundaries of the
# core of the segment, which is done outside of this class).a
#
# This function may reduce start_index and/or increase end_index by
# including a single adjacent 'tainted' line from the ctm-edits file. This
# is only done if the lines at the boundaries of the segment are currently
# real non-silence words and not non-scored words. The idea is that we
# probably don't want to start or end the segment right at the boundary of a
# real word, we want to add some kind of padding.
def PossiblyAddTaintedLines(self):
global non_scored_words
split_lines_of_utt = self.split_lines_of_utt
# we're iterating over the segment (start, end)
for b in [False, True]:
if b:
boundary_index = self.end_index - 1
adjacent_index = self.end_index
else:
boundary_index = self.start_index
adjacent_index = self.start_index - 1
if adjacent_index >= 0 and adjacent_index < len(split_lines_of_utt):
# only consider merging the adjacent word into the segment if we're not
# at a segment boundary.
adjacent_line_is_tainted = IsTainted(split_lines_of_utt[adjacent_index])
# if the adjacent line wasn't tainted, then there must have been
# another stronger reason why we didn't include it in the core
# of the segment (probably that it was an ins, del or sub), so
# there is no point considering it.
if adjacent_line_is_tainted:
boundary_edit_type = split_lines_of_utt[boundary_index][7]
boundary_hyp_word = split_lines_of_utt[boundary_index][7]
# we only add the tainted line to the segment if the word at
# the boundary was a non-silence word that was correctly
# decoded and not fixed [see modify_ctm_edits.py.]
if boundary_edit_type == 'cor' and \
not boundary_hyp_word in non_scored_words:
# Add the adjacent tainted line to the segment.
if b:
self.end_index += 1
else:
self.start_index -= 1
# This is stage 2 of segment processing.
# This function will split a segment into multiple pieces if any of the
# internal [non-boundary] silences or non-scored words are longer
# than the allowed values --max-internal-silence-length and
# --max-internal-non-scored-length. This function returns a
# list of segments. In the normal case (where there is no splitting)
# it just returns an array with a single element 'self'.
def PossiblySplitSegment(self):
global non_scored_words, args
# make sure the segment hasn't been processed more than we expect.
assert self.start_unk_padding == 0.0 and self.end_unk_padding == 0.0 and \
self.start_keep_proportion == 1.0 and self.end_keep_proportion == 1.0
segments = [] # the answer
cur_start_index = self.start_index
cur_start_is_split = False
# only consider splitting at non-boundary lines. [we'd just truncate
# the boundary lines.]
for index_to_split_at in range(cur_start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[index_to_split_at]
this_duration = float(this_split_line[3])
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (this_edit_type == 'sil' and this_duration > args.max_internal_silence_length) or \
(this_ref_word in non_scored_words and this_duration > args.max_internal_non_scored_length):
# We split this segment at this index, dividing the word in two
# [later on, in PossiblyTruncateBoundaries, it may be further
# truncated.]
# Note: we use 'index_to_split_at + 1' because the Segment constructor
# takes an 'end-index' which is interpreted as one past the end.
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
index_to_split_at + 1, self.debug_str)
if cur_start_is_split:
new_segment.start_keep_proportion = 0.5
new_segment.end_keep_proportion = 0.5
cur_start_is_split = True
cur_start_index = index_to_split_at
segments.append(new_segment)
if len(segments) == 0: # We did not split.
segments.append(self)
else:
# We did split. Add the very last segment.
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
self.end_index, self.debug_str)
assert cur_start_is_split
new_segment.start_keep_proportion = 0.5
segments.append(new_segment)
return segments
# This is stage 3 of segment processing. It will truncate the silences and
# non-scored words at the segment boundaries if they are longer than the
# --max-edge-silence-length and --max-edge-non-scored-length respectively
# (and to the extent that this wouldn't take us below the
# --min-segment-length or --min-new-segment-length).
def PossiblyTruncateBoundaries(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
truncated_duration = None
this_duration = float(this_split_line[3])
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_edit == 'sil' and \
this_duration > args.max_edge_silence_length:
truncated_duration = args.max_edge_silence_length
elif this_ref_word in non_scored_words and \
this_duration > args.max_edge_non_scored_length:
truncated_duration = args.max_edge_non_scored_length
if truncated_duration != None:
keep_proportion = truncated_duration / this_duration
if b:
self.start_keep_proportion = keep_proportion
else:
self.end_keep_proportion = keep_proportion
# This relaxes the segment-boundary truncation of
# PossiblyTruncateBoundaries(), if it would take us below
# min-new-segment-length or min-segment-length. Note: this does not relax
# the boundary truncation for a particular boundary (start or end) if that
# boundary corresponds to a 'tainted' line of the ctm (because it's
# dangerous to include too much 'tainted' audio).
def RelaxBoundaryTruncation(self):
# this should be called before adding unk padding.
assert self.start_unk_padding == self.end_unk_padding == 0.0
if self.start_keep_proportion == self.end_keep_proportion == 1.0:
return # nothing to do there was no truncation.
length_cutoff = max(args.min_new_segment_length, args.min_segment_length)
length_with_truncation = self.Length()
if length_with_truncation >= length_cutoff:
return # Nothing to do.
orig_start_keep_proportion = self.start_keep_proportion
orig_end_keep_proportion = self.end_keep_proportion
if not IsTainted(self.split_lines_of_utt[self.start_index]):
self.start_keep_proportion = 1.0
if not IsTainted(self.split_lines_of_utt[self.end_index - 1]):
self.end_keep_proportion = 1.0
length_with_relaxed_boundaries = self.Length()
if length_with_relaxed_boundaries <= length_cutoff:
# Completely undo the truncation [to the extent allowed by the
# presence of tainted lines at the start/end] if, even without
# truncation, we'd be below the length cutoff. This segment may be
# removed later on (but it may not, if removing truncation makes us
# identical to the input utterance, and the length is between
# min_segment_length min_new_segment_length).
return
# Next, compute an interpolation constant a such that the
# {start,end}_keep_proportion values will equal a *
# [values-computed-by-PossiblyTruncateBoundaries()] + (1-a) * [completely-relaxed-values].
# we're solving the equation:
# length_cutoff = a * length_with_truncation + (1-a) * length_with_relaxed_boundaries
# -> length_cutoff - length_with_relaxed_boundaries =
# a * (length_with_truncation - length_with_relaxed_boundaries)
# -> a = (length_cutoff - length_with_relaxed_boundaries) / (length_with_truncation - length_with_relaxed_boundaries)
a = (length_cutoff - length_with_relaxed_boundaries) / \
(length_with_truncation - length_with_relaxed_boundaries)
if a < 0.0 or a > 1.0:
print("segment_ctm_edits.py: bad 'a' value = {0}".format(a), file = sys.stderr)
return
self.start_keep_proportion = \
a * orig_start_keep_proportion + (1-a) * self.start_keep_proportion
self.end_keep_proportion = \
a * orig_end_keep_proportion + (1-a) * self.end_keep_proportion
if not abs(self.Length() - length_cutoff) < 0.01:
print("segment_ctm_edits.py: possible problem relaxing boundary "
"truncation, length is {0} vs {1}".format(self.Length(), length_cutoff),
file = sys.stderr)
# This is stage 4 of segment processing.
# This function may set start_unk_padding and end_unk_padding to nonzero
# values. This is done if the current boundary words are real, scored
# words and we're not next to the beginning or end of the utterance.
def PossiblyAddUnkPadding(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
this_start_time = float(this_split_line[2])
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words:
# we can consider adding unk-padding.
if b: # start of utterance.
unk_padding = args.unk_padding
if unk_padding > this_start_time: # close to beginning of file
unk_padding = this_start_time
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.start_unk_padding = unk_padding
else: # end of utterance.
this_end_time = this_start_time + float(this_split_line[3])
last_line = self.split_lines_of_utt[-1]
utterance_end_time = float(last_line[2]) + float(last_line[3])
max_allowable_padding = utterance_end_time - this_end_time
assert max_allowable_padding > -0.01
unk_padding = args.unk_padding
if unk_padding > max_allowable_padding:
unk_padding = max_allowable_padding
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.end_unk_padding = unk_padding
# This function will merge the segment in 'other' with the segment
# in 'self'. It is only to be called when 'self' and 'other' are from
# the same utterance, 'other' is after 'self' in time order (based on
# the original segment cores), and self.EndTime() >= other.StartTime().
# Note: in this situation there will normally be deleted words
# between the two segments. What this program does with the deleted
# words depends on '--max-deleted-words-kept-when-merging'. If there
# were any inserted words in the transcript (less likely), this
# program will keep the reference.
def MergeWithSegment(self, other):
assert self.EndTime() >= other.StartTime() and \
self.StartTime() < other.EndTime() and \
self.split_lines_of_utt is other.split_lines_of_utt
orig_self_end_index = self.end_index
self.debug_str = "({0}/merged-with/{1})".format(self.debug_str, other.debug_str)
# everything that relates to the end of this segment gets copied
# from 'other'.
self.end_index = other.end_index
self.end_unk_padding = other.end_unk_padding
self.end_keep_proportion = other.end_keep_proportion
# The next thing we have to do is to go over any lines of the ctm that
# appear between 'self' and 'other', or are shared between both (this
# would only happen for tainted silence or non-scored-word segments),
# and decide what to do with them. We'll keep the reference for any
# substitutions or insertions (which anyway are unlikely to appear
# in these merged segments). Note: most of this happens in self.Text(),
# but at this point we need to decide whether to mark any deletions
# as 'discard-this-word'.
first_index_of_overlap = min(orig_self_end_index - 1, other.start_index)
last_index_of_overlap = max(orig_self_end_index - 1, other.start_index)
num_deleted_words = 0
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
edit_type = self.split_lines_of_utt[i][7]
if edit_type == 'del':
num_deleted_words += 1
if num_deleted_words > args.max_deleted_words_kept_when_merging:
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
if self.split_lines_of_utt[i][7] == 'del':
self.split_lines_of_utt[i].append('do-not-include-in-text')
# Returns the start time of the utterance (within the enclosing utterance)
# This is before any rounding.
def StartTime(self):
first_line = self.split_lines_of_utt[self.start_index]
first_line_start = float(first_line[2])
first_line_duration = float(first_line[3])
first_line_end = first_line_start + first_line_duration
return first_line_end - self.start_unk_padding \
- (first_line_duration * self.start_keep_proportion)
# Returns some string-valued information about 'this' that is useful for debugging.
def DebugInfo(self):
return 'start=%d,end=%d,unk-padding=%.2f,%.2f,keep-proportion=%.2f,%.2f,' % \
(self.start_index, self.end_index, self.start_unk_padding,
self.end_unk_padding, self.start_keep_proportion, self.end_keep_proportion) + \
self.debug_str
# Returns the start time of the utterance (within the enclosing utterance)
def EndTime(self):
last_line = self.split_lines_of_utt[self.end_index - 1]
last_line_start = float(last_line[2])
last_line_duration = float(last_line[3])
return last_line_start + (last_line_duration * self.end_keep_proportion) \
+ self.end_unk_padding
# Returns the segment length in seconds.
def Length(self):
return self.EndTime() - self.StartTime()
def IsWholeUtterance(self):
# returns true if this segment corresponds to the whole utterance that
# it's a part of (i.e. its start/end time are zero and the end-time of
# the last segment.
last_line_of_utt = self.split_lines_of_utt[-1]
last_line_end_time = float(last_line_of_utt[2]) + float(last_line_of_utt[3])
return abs(self.StartTime() - 0.0) < 0.001 and \
abs(self.EndTime() - last_line_end_time) < 0.001
# Returns the proportion of the duration of this segment that consists of
# unk-padding and tainted lines of input (will be between 0.0 and 1.0).
def JunkProportion(self):
# Note: only the first and last lines could possibly be tainted as
# that's how we create the segments; and if either or both are tainted
# the utterance must contain other lines, so double-counting is not a
# problem.
junk_duration = self.start_unk_padding + self.end_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
junk_duration += first_duration * self.start_keep_proportion
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
junk_duration += last_duration * self.end_keep_proportion
return junk_duration / self.Length()
# This function will remove something from the beginning of the
# segment if it's possible to cleanly lop off a bit that contains
# more junk, as a proportion of its length, than 'args.junk_proportion'.
# Junk is defined as unk-padding and/or tainted segments.
# It considers as a potential split point, the first silence
# segment or non-tainted non-scored-word segment in the
# utterance. See also TruncateEndForJunkProportion
def PossiblyTruncateStartForJunkProportion(self):
begin_junk_duration = self.start_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
begin_junk_duration += first_duration * self.start_keep_proportion
if begin_junk_duration == 0.0:
# nothing to do.
return
candidate_start_index = None
# the following iterates over all lines internal to the utterance.
for i in range(self.start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
# (i.e. making the silence or non-scored word the left boundary of
# the new utterance and discarding the piece to the left of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_start_index = i
candidate_start_time = float(this_split_line[2])
break # Consider only the first potential truncation.
if candidate_start_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = candidate_start_time - self.StartTime()
if begin_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.start_index = candidate_start_index
self.start_unk_padding = 0.0
self.start_keep_proportion = 1.0
self.debug_str += ',truncated-start-for-junk'
# This is like PossiblyTruncateStartForJunkProportion(), but
# acts on the end of the segment; see comments there.
def PossiblyTruncateEndForJunkProportion(self):
end_junk_duration = self.end_unk_padding
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
end_junk_duration += last_duration * self.end_keep_proportion
if end_junk_duration == 0.0:
# nothing to do.
return
candidate_end_index = None
# the following iterates over all lines internal to the utterance
# (starting from the end).
for i in reversed(range(self.start_index + 1, self.end_index - 1)):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
# (i.e. making the silence or non-scored word the right boundary of
# the new utterance and discarding the piece to the right of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_end_index = i + 1 # note: end-indexes are one past the last.
candidate_end_time = float(this_split_line[2]) + float(this_split_line[3])
break # Consider only the latest potential truncation.
if candidate_end_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = self.EndTime() - candidate_end_time
if end_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.end_index = candidate_end_index
self.end_unk_padding = 0.0
self.end_keep_proportion = 1.0
self.debug_str += ',truncated-end-for-junk'
# this will return true if there is at least one word in the utterance
# that's a scored word (not a non-scored word) and not an OOV word that's
# realized as unk. This becomes a filter on keeping segments.
def ContainsAtLeastOneScoredNonOovWord(self):
global non_scored_words
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_hyp_word = this_split_line[4]
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words \
and this_ref_word == this_hyp_word:
return True
return False
# Returns the text corresponding to this utterance, as a string.
def Text(self):
global oov_symbol
text_array = []
if self.start_unk_padding != 0.0:
text_array.append(oov_symbol)
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_ref_word != '<eps>' and this_split_line[-1] != 'do-not-include-in-text':
text_array.append(this_ref_word)
if self.end_unk_padding != 0.0:
text_array.append(oov_symbol)
return ' '.join(text_array)
# Here, 'text' will be something that indicates the stage of processing,
# e.g. 'Stage 0: segment cores', 'Stage 1: add tainted lines',
#, etc.
def AccumulateSegmentStats(segment_list, text):
global segment_total_length, num_segments
for segment in segment_list:
num_segments[text] += 1
segment_total_length[text] += segment.Length()
def PrintSegmentStats():
global segment_total_length, num_segments, \
num_utterances, num_utterances_without_segments, \
total_length_of_utterances
print('Number of utterances is %d, of which %.2f%% had no segments after '
'all processing; total length of data in original utterances (in seconds) '
'was %d' % (num_utterances,
num_utterances_without_segments * 100.0 / num_utterances,
total_length_of_utterances),
file = sys.stderr)
keys = sorted(segment_total_length.keys())
for i in range(len(keys)):
key = keys[i]
if i > 0:
delta_percentage = '[%+.2f%%]' % ((segment_total_length[key] - segment_total_length[keys[i-1]])
* 100.0 / total_length_of_utterances)
print('At %s, num-segments is %d, total length %.2f%% of original total %s' % (
key, num_segments[key],
segment_total_length[key] * 100.0 / total_length_of_utterances,
delta_percentage if i > 0 else ''),
file = sys.stderr)
# This function creates the segments for an utterance as a list
# of class Segment.
# It returns a 2-tuple (list-of-segments, list-of-deleted-segments)
# where the deleted segments are only useful for diagnostic printing.
# Note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
def GetSegmentsForUtterance(split_lines_of_utt):
global num_utterances, num_utterances_without_segments, total_length_of_utterances
num_utterances += 1
segment_ranges = ComputeSegmentCores(split_lines_of_utt)
utterance_end_time = float(split_lines_of_utt[-1][2]) + float(split_lines_of_utt[-1][3])
total_length_of_utterances += utterance_end_time
segments = [ Segment(split_lines_of_utt, x[0], x[1])
for x in segment_ranges ]
AccumulateSegmentStats(segments, 'stage 0 [segment cores]')
for segment in segments:
segment.PossiblyAddTaintedLines()
AccumulateSegmentStats(segments, 'stage 1 [add tainted lines]')
new_segments = []
for s in segments:
new_segments += s.PossiblySplitSegment()
segments = new_segments
AccumulateSegmentStats(segments, 'stage 2 [split segments]')
for s in segments:
s.PossiblyTruncateBoundaries()
AccumulateSegmentStats(segments, 'stage 3 [truncate boundaries]')
for s in segments:
s.RelaxBoundaryTruncation()
AccumulateSegmentStats(segments, 'stage 4 [relax boundary truncation]')
for s in segments:
s.PossiblyAddUnkPadding()
AccumulateSegmentStats(segments, 'stage 5 [unk-padding]')
deleted_segments = []
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if (not s.IsWholeUtterance() and s.Length() < 0.999 * args.min_new_segment_length):
s.debug_str += '[deleted-because-of--min-new-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 6 [remove new segments under --min-new-segment-length')
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if s.Length() < 0.999 * args.min_segment_length:
s.debug_str += '[deleted-because-of--min-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 7 [remove segments under --min-segment-length')
for s in segments:
s.PossiblyTruncateStartForJunkProportion()
AccumulateSegmentStats(segments, 'stage 8 [truncate segment-starts for --max-junk-proportion')
for s in segments:
s.PossiblyTruncateEndForJunkProportion()
AccumulateSegmentStats(segments, 'stage 9 [truncate segment-ends for --max-junk-proportion')
new_segments = []
for s in segments:
if s.ContainsAtLeastOneScoredNonOovWord():
new_segments.append(s)
else:
s.debug_str += '[deleted-because-no-scored-non-oov-words]'
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 10 [remove segments without scored,non-OOV words]')
new_segments = []
for s in segments:
j = s.JunkProportion()
if j <= args.max_junk_proportion:
new_segments.append(s)
else:
s.debug_str += '[deleted-because-junk-proportion={0}]'.format(j)
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 11 [remove segments with junk exceeding --max-junk-proportion]')
new_segments = []
if len(segments) > 0:
new_segments.append(segments[0])
for i in range(1, len(segments)):
if new_segments[-1].EndTime() >= segments[i].StartTime():
new_segments[-1].MergeWithSegment(segments[i])
else:
new_segments.append(segments[i])
segments = new_segments
AccumulateSegmentStats(segments, 'stage 12 [merge overlapping or touching segments]')
for i in range(len(segments) - 1):
if segments[i].EndTime() > segments[i+1].StartTime():
# this just adds something to --ctm-edits-out output
segments[i+1].debug_str += ",overlaps-previous-segment"
if len(segments) == 0:
num_utterances_without_segments += 1
return (segments, deleted_segments)
# this prints a number with a certain number of digits after
# the point, while removing trailing zeros.
def FloatToString(f):
num_digits = 6 # we want to print 6 digits after the zero
g = f
while abs(g) > 1.0:
g *= 0.1
num_digits += 1
format_str = '%.{0}g'.format(num_digits)
return format_str % f
# Gives time in string form as an exact multiple of the frame-length, e.g. 0.01
# (after rounding).
def TimeToString(time, frame_length):
n = round(time / frame_length)
assert n >= 0
# The next function call will remove trailing zeros while printing it, so
# that e.g. 0.01 will be printed as 0.01 and not 0.0099999999999999. It
# seems that doing this in a simple way is not really possible (at least,
# not without assuming that frame_length is of the form 10^-n, which we
# don't really want to do).
return FloatToString(n * frame_length)
def WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
old_utterance_name, segments):
for n in range(len(segments)):
segment = segments[n]
# split utterances will be named foo-bar-1 foo-bar-2, etc.
new_utterance_name = old_utterance_name + "-" + str(n + 1)
# print a line to the text output of the form like
# <new-utterance-id> <text>
# like:
# foo-bar-1 hello this is dan
print(new_utterance_name, segment.Text(), file = text_output_handle)
# print a line to the segments output of the form
# <new-utterance-id> <old-utterance-id> <start-time> <end-time>
# like:
# foo-bar-1 foo-bar 5.1 7.2
print(new_utterance_name, old_utterance_name,
TimeToString(segment.StartTime(), args.frame_length),
TimeToString(segment.EndTime(), args.frame_length),
file = segments_output_handle)
# Note, this is destrutive of 'segments_for_utterance', but it won't matter.
def PrintDebugInfoForUtterance(ctm_edits_out_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance):
# info_to_print will be list of 2-tuples (time, 'start-segment-n'|'end-segment-n')
# representing the start or end times of segments.
info_to_print = []
for n in range(len(segments_for_utterance)):
segment = segments_for_utterance[n]
start_string = 'start-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
# for segments that were deleted we print info like start-deleted-segment-1, and
# otherwise similar info to segments that were retained.
for n in range(len(deleted_segments_for_utterance)):
segment = deleted_segments_for_utterance[n]
start_string = 'start-deleted-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-deleted-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
info_to_print = sorted(info_to_print)
for i in range(len(split_lines_of_cur_utterance)):
split_line=split_lines_of_cur_utterance[i]
split_line[0] += '[' + str(i) + ']' # add an index like [0], [1], to
# the utterance-id so we can easily
# look up segment indexes.
start_time = float(split_line[2])
end_time = start_time + float(split_line[3])
split_line_copy = list(split_line)
while len(info_to_print) > 0 and info_to_print[0][0] <= end_time:
(segment_start, string) = info_to_print[0]
# shift the first element off of info_to_print.
info_to_print = info_to_print[1:]
# add a field like 'start-segment1[...]=3.21' to what we're about to print.
split_line_copy.append(string + "=" + TimeToString(segment_start, args.frame_length))
print(' '.join(split_line_copy), file = ctm_edits_out_handle)
# This accumulates word-level stats about, for each reference word, with what
# probability it will end up in the core of a segment. Words with low
# probabilities of being in segments will generally be associated with some kind
# of error (there is a higher probability of having a wrong lexicon entry).
def AccWordStatsForUtterance(split_lines_of_utt,
segments_for_utterance):
# word_count_pair is a map from a string (the word) to
# a list [total-count, count-not-within-segments]
global word_count_pair
line_is_in_segment = [ False ] * len(split_lines_of_utt)
for segment in segments_for_utterance:
for i in range(segment.start_index, segment.end_index):
line_is_in_segment[i] = True
for i in range(len(split_lines_of_utt)):
this_ref_word = split_lines_of_utt[i][6]
if this_ref_word != '<eps>':
word_count_pair[this_ref_word][0] += 1
if not line_is_in_segment[i]:
word_count_pair[this_ref_word][1] += 1
def PrintWordStats(word_stats_out):
try:
f = open(word_stats_out, 'w')
except:
sys.exit("segment_ctm_edits.py: error opening word-stats file --word-stats-out={0} "
"for writing".format(word_stats_out))
global word_count_pair
# Sort from most to least problematic. We want to give more prominence to
# words that are most frequently not in segments, but also to high-count
# words. Define badness = pair[1] / pair[0], and total_count = pair[0],
# where 'pair' is a value of word_count_pair. We'll reverse sort on
# badness^3 * total_count = pair[1]^3 / pair[0]^2.
for key, pair in sorted(word_count_pair.items(),
key = lambda item: (item[1][1] ** 3) * 1.0 / (item[1][0] ** 2),
reverse = True):
badness = pair[1] * 1.0 / pair[0]
total_count = pair[0]
print(key, badness, total_count, file = f)
try:
f.close()
except:
sys.exit("segment_ctm_edits.py: error closing file --word-stats-out={0} "
"(full disk?)".format(word_stats_out))
print("segment_ctm_edits.py: please see the file {0} for word-level statistics "
"saying how frequently each word was excluded for a segment; format is "
"<word> <proportion-of-time-excluded> <total-count>. Particularly "
"problematic words appear near the top of the file.".format(word_stats_out),
file = sys.stderr)
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
text_output_handle = open(args.text_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening text output "
"file {0}".format(args.text_out))
try:
segments_output_handle = open(args.segments_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening segments output "
"file {0}".format(args.text_out))
if args.ctm_edits_out != None:
try:
ctm_edits_output_handle = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
(segments_for_utterance,
deleted_segments_for_utterance) = GetSegmentsForUtterance(split_lines_of_cur_utterance)
AccWordStatsForUtterance(split_lines_of_cur_utterance, segments_for_utterance)
WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
cur_utterance, segments_for_utterance)
if args.ctm_edits_out != None:
PrintDebugInfoForUtterance(ctm_edits_output_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
text_output_handle.close()
segments_output_handle.close()
if args.ctm_edits_out != None:
ctm_edits_output_handle.close()
except:
sys.exit("modify_ctm_edits.py: error closing one or more outputs "
"(broken pipe or full disk?)")
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
oov_symbol = None
if args.oov_symbol_file != None:
try:
with open(args.oov_symbol_file) as f:
line = f.readline()
assert len(line.split()) == 1
oov_symbol = line.split()[0]
assert f.readline() == ''
except Exception as e:
sys.exit("segment_ctm_edits.py: error reading file --oov-symbol-file=" +
args.oov_symbol_file + ", error is: " + str(e))
elif args.unk_padding != 0.0:
sys.exit("segment_ctm_edits.py: if the --unk-padding option is nonzero (which "
"it is by default, the --oov-symbol-file option must be supplied.")
# segment_total_length and num_segments are maps from
# 'stage' strings; see AccumulateSegmentStats for details.
segment_total_length = defaultdict(int)
num_segments = defaultdict(int)
# the lambda expression below is an anonymous function that takes no arguments
# and returns the new list [0, 0].
word_count_pair = defaultdict(lambda: [0, 0])
num_utterances = 0
num_utterances_without_segments = 0
total_length_of_utterances = 0
ProcessData()
PrintSegmentStats()
if args.word_stats_out != None:
PrintWordStats(args.word_stats_out)
if args.ctm_edits_out != None:
print("segment_ctm_edits.py: detailed utterance-level debug information "
"is in " + args.ctm_edits_out, file = sys.stderr)
| 51.010618 | 125 | 0.641058 | [
"Apache-2.0"
] | HunterJiang/kaldi | egs/wsj/s5/steps/cleanup/internal/segment_ctm_edits.py | 52,847 | Python |
from lightning_transformers.task.nlp.translation.datasets.wmt16 import WMT16TranslationDataModule
from lightning_transformers.task.nlp.translation.datasets.smiles import SMILESTranslationDataModule
| 40 | 99 | 0.9 | [
"Apache-2.0"
] | zhaisilong/lightning-transformers | lightning_transformers/task/nlp/translation/datasets/__init__.py | 200 | Python |
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from .enums import ChannelType
from .messageable import Messageable
if TYPE_CHECKING:
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import Group as GroupDMChannelPayload
from .types import SavedMessages as SavedMessagesPayload
from .types import TextChannel as TextChannelPayload
from .user import User
__all__ = ("Channel",)
class Channel:
"""Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server: Optional[:class:`Server`]
The server the channel is part of
"""
__slots__ = ("state", "id", "channel_type", "server")
def __init__(self, data: ChannelPayload, state: State):
self.state = state
self.id = data["_id"]
self.channel_type = ChannelType(data["channel_type"])
self.server = None
class SavedMessageChannel(Channel, Messageable):
"""The Saved Message Channel"""
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
class DMChannel(Channel, Messageable):
"""A DM channel"""
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
class GroupDMChannel(Channel, Messageable):
__slots__ = ("recipients", "name", "owner")
"""A group DM channel"""
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipients = cast(list[User], list(filter(bool, [state.get_user(user_id) for user_id in data["recipients"]])))
self.name = data["name"]
self.owner = state.get_user(data["owner"])
class TextChannel(Channel, Messageable):
__slots__ = ("name", "description", "last_message", "last_message_id")
"""A text channel"""
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.server = state.get_server(data["server"])
self.name = data["name"]
self.description = data.get("description")
last_message_id = data.get("last_message")
self.last_message = state.get_message(last_message_id)
self.last_message_id = last_message_id
class VoiceChannel(Channel):
"""A voice channel"""
def __init__(self, data: ChannelPayload, state: State):
super().__init__(data, state)
def channel_factory(data: ChannelPayload, state: State) -> Channel:
if data["channel_type"] == "SavedMessage":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception
| 33.655914 | 123 | 0.66869 | [
"MIT"
] | XiehCanCode/revolt.py | revolt/channel.py | 3,130 | Python |
'''
Author: what-is-me
E-mail: [email protected]
Github: https://github.com/what-is-me
LeetCode: https://leetcode-cn.com/u/what-is-me/
Date: 2021-05-17 23:22:14
LastEditors: what-is-me
LastEditTime: 2021-05-19 12:33:23
Description: 查询单个单词/词组意思
'''
import re
import urllib.parse
import requests
class getimg:
def youdao(html):
html = html.split('</h2>')[-1]
html = html.split('<span>网络释义</span>')[0]
reg = r'<li>(.*?)</li>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = ""
for s in img_list:
if (s != ""):
result = result + s + ";"
result = "".join(result.split())
result = re.sub(r'<(.*?)>', '', result)
if result == '' or result[0:1] == '<a':
return "未收录"
return result
def jinshan(html):
reg = r'<ul class="Mean_part__1RA2V"><li>(.*?)</ul>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = "".join(img_list)
result = re.sub('<', '[', result)
result = re.sub('>', ']', result)
result = re.sub(r'<(.*?)>', '', result)
if result == "":
return "未收录"
return result
def bing(html):
reg = r'<meta name="description" content="(.*?)" />'
img = re.compile(reg)
result = re.search(img, html).group()
result = result.split('<meta name="description" content="')[-1]
result = result.split('" />')[0]
result = re.sub('必应词典为您提供', '', result)
result = re.sub('的释义', '', result)
result = re.sub('英', '', result)
result = re.sub('美', '', result)
result = re.sub(',', '', result)
result = result.split('网络释义:')[0]
result = re.sub(r'\[(.*?)\]', '', result)
if result == "" or result[0:3] == "必应词典":
return "未收录"
return result
def haici(html):
html = html.split('<div class="basic clearfix">')[-1]
html = html.split('<li style="padding-top: 25px;">')[0]
reg1 = r'<span>(.*?)</span>'
img1 = re.compile(reg1)
img_list1 = re.findall(img1, html)
reg2 = r'<strong>(.*?)</strong>'
img2 = re.compile(reg2)
img_list2 = re.findall(img2, html)
if len(img_list2) == 0:
result = "未收录"
return result
result = ''
if(len(img_list1) == 0):
for i in range(0, len(img_list2)):
result += img_list2[i]
else:
for i in range(0, len(img_list1)):
result += "["+img_list1[i]+"]"
result += img_list2[i]
return result
def youdao_jp(html):
html = html.split('<!--日汉词典结果 -->')[-1]
html = html.split('<!--网络翻译-->')[0]
result = "".join(html.split())
result = re.sub(r'<span class="keyword">(.*?)</span>', '', result)
result = re.sub(r'<h4>(.*?)</sup>', '', result)
result = re.sub(r'<sup>(.*?)</sup>', '', result)
result = re.sub('<span>网络释义</span>', '', result)
result = re.sub(r'例证:(.*?)li>', '', result)
result = re.sub(r'谚语或成语:(.*?)li>', '', result)
result = re.sub(r'<p class="exam-sen">(.*?)</p>', '', result)
result = re.sub(r'<(.*?)>', '', result)
if result[0] == "【":
return "未收录,日语暂不支持有道翻译函数"
result = result.split('【')[-1]
return '【'+result
def youdao_fr(html):
html = html.split('<!--Title -->')[-1]
html = html.split(
'<div id="webTrans" class="trans-wrapper trans-tab">')[0]
result = re.sub(r'<(.*?)>', '', html)
return "".join(result.split())
def de(html):
html = html.split('<div id="ExpFCChild" class="expDiv">')[-1]
n = 0
while(html[n] != '\n'):
n += 1
result = html[0:n-1]
result = re.sub(r'<i>(.*?)</i>', '', result)
result = re.sub(r'<span class=eg>(.*?)</span>', '', result)
result = re.sub(r'<span id="phrase">(.*?)</span>', '', result)
result = re.sub(r'<[a-zA-Z]{1,}(.*?)>', '', result)
result = re.sub(r'<\/.*?>', '', result)
result = re.sub(r'<\!.*?>', '', result)
result = "".join(result.split())
result = re.sub('赞踩改进更换举报initThumbnail', '', result)
result = re.sub('欧路软件版权所有', '', result)
result = re.sub('欧路软件', '', result)
result = re.sub('德语助手', '', result)
result = re.sub("()", '', result)
return result
def getImg(html, choice):
if(choice == 1):
return getimg.youdao(html)
if(choice == 2):
return getimg.jinshan(html)
if(choice == 3):
return getimg.bing(html)
if(choice == 4):
return getimg.haici(html)
if(choice == 5):
return getimg.youdao_jp(html)
if(choice == 6):
return getimg.youdao_fr(html)
if(choice == 7):
return getimg.de(html)
def url(choice): # 选择翻译网站
if(choice == 1):
return "http://dict.youdao.com/w/eng/"
if(choice == 2):
return "https://www.iciba.com/word?w="
if(choice == 3):
return "https://cn.bing.com/dict/search?q="
if(choice == 4):
return "https://dict.cn/search?q="
if(choice == 5):
return "http://www.youdao.com/w/jap/"
if(choice == 6):
return "http://www.youdao.com/w/fr/"
if(choice == 7):
return "http://www.godic.net/dicts/de/"
def phrase(choice, word): # 如果是词组,就将空格替换
if(choice == 1):
return re.sub(' ', '%20', word)
if(choice == 2):
return re.sub(' ', '%20', word)
if(choice == 3):
return re.sub(' ', '+', word)
if(choice == 4):
return re.sub(' ', '+', word)
if(choice == 5):
return re.sub(' ', '%20', word)
if(choice == 6):
return re.sub(' ', '%20', word)
if(choice == 7):
ans = urllib.parse.quote(word)
return ans
def getHtml(url):
# 获得网址源代码
headers = {
"User-Agent": "User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
page = requests.get(url, headers=headers)
page.encoding = 'utf-8'
html = page.text
return html
def help():
help = '''
==================================================================================
Help:
choice:
英>>
1. 有道
2. 金山
3. bing
4. 海词
日>>
5. 有道
法>>
6. 有道
德>>
7. 德语助手
默认有道查询源
functions:
查询单个单词/词组:
search(word, choice=1)
查询单词/词组列表,并生成[字典(dict)]:
wordlist_todict(wordlis, choice=1)
查询单词/词组列表,并生成列表:
wordlist_tolist(wordlist, choice=1, div = " : ", needword = True)
div是输出的list里单词和意思之间的分隔符
needword为False则表示return纯解释列表
==================================================================================
'''
print(help)
def search(word, choice=1):
_url = url(choice) + phrase(choice, word)
_html = getHtml(_url)
return getImg(_html, choice)
def wordlist_todict(wordlist, choice=1):
_dict = {}
for word in wordlist:
_dict[word] = search(word, choice)
return _dict
def wordlist_tolist(wordlist, choice=1, div=" : ", needword=True):
result_list = []
for word in wordlist:
result_list.append(
((word + div)if needword else "") + search(word, choice))
return result_list
| 31.410788 | 100 | 0.480713 | [
"Apache-2.0"
] | what-is-me/WordListEnquiry | Dict-search/__init__.py | 8,022 | Python |
#!/usr/bin/env python
import contextlib as __stickytape_contextlib
@__stickytape_contextlib.contextmanager
def __stickytape_temporary_dir():
import tempfile
import shutil
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
with __stickytape_temporary_dir() as __stickytape_working_dir:
def __stickytape_write_module(path, contents):
import os, os.path
def make_package(path):
parts = path.split("/")
partial_path = __stickytape_working_dir
for part in parts:
partial_path = os.path.join(partial_path, part)
if not os.path.exists(partial_path):
os.mkdir(partial_path)
with open(os.path.join(partial_path, "__init__.py"), "wb") as f:
f.write(b"\n")
make_package(os.path.dirname(path))
full_path = os.path.join(__stickytape_working_dir, path)
with open(full_path, "wb") as module_file:
module_file.write(contents)
import sys as __stickytape_sys
__stickytape_sys.path.insert(0, __stickytape_working_dir)
__stickytape_write_module('dispatcher.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport select\nimport socket\nfrom typing import Any, Dict, Union, TextIO, TYPE_CHECKING, Optional, List\n\n\nif TYPE_CHECKING:\n from processor import Processor\n from pydev_server_monitor import PydevServerMonitor\n\n\nclass Dispatcher:\n """\n The dispatcher class implements the main loop of the program,\n waiting for new I/O inputs (either from socket or pipe),\n then calling the relevant processor to handle the input.\n\n It also regularly calls monitors which are used to perform health checks\n on Pydev debug servers. If auto_stop is enabled, the loop exits when the last\n monitor terminates (i.e. no Pydev debug servers are running).\n """\n def __init__(self, auto_stop: bool):\n self._port_to_processors: "Dict[Any, Processor]" = {}\n self._socket_to_processors: Dict[Union[socket.socket, TextIO], Processor] = {}\n self._server_monitors: Dict[Any, PydevServerMonitor] = {}\n self._auto_stop = auto_stop\n\n def add_processor(self, processor: "Processor"):\n self._port_to_processors[processor.key] = processor\n self._socket_to_processors[processor.socket] = processor\n\n def remove_processor(self, processor: "Processor"):\n try:\n del self._port_to_processors[processor.key]\n del self._socket_to_processors[processor.socket]\n except KeyError:\n pass\n processor.close()\n\n def add_server_monitor(self, monitor: "PydevServerMonitor"):\n self._server_monitors[monitor.key] = monitor\n\n def remove_server_monitor(self, monitor: "PydevServerMonitor"):\n try:\n del self._server_monitors[monitor.key]\n except KeyError:\n pass\n\n def find_processor(self, key: Any) -> "Optional[Processor]":\n return self._port_to_processors.get(key, None)\n\n def get_all_processors(self) -> "List[Processor]":\n return list(self._port_to_processors.values())\n\n def dispatch_loop(self):\n while True:\n inputs = list(self._socket_to_processors.keys())\n \n inputs_ready, _, _ = select.select(inputs, [], [], 1)\n\n for input_socket in inputs_ready:\n processor = self._socket_to_processors[input_socket]\n processor.on_input_ready()\n\n for monitor in list(self._server_monitors.values()):\n monitor.monitor()\n\n if self._auto_stop and len(self._server_monitors) == 0:\n return\n \n')
__stickytape_write_module('processor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport abc\nimport socket\nfrom typing import Any, Union, TextIO\n\n\nclass Processor(abc.ABC):\n @property\n @abc.abstractmethod\n def key(self) -> Any: raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def socket(self) -> Union[socket.socket, TextIO]: raise NotImplementedError\n\n @abc.abstractmethod\n def on_input_ready(self) -> None: raise NotImplementedError\n\n @abc.abstractmethod\n def close(self) -> None: raise NotImplementedError\n')
__stickytape_write_module('pydev_server_monitor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_server_monitor")\n\n\nclass PydevServerMonitor:\n """\n Monitor a local Pydev debug server.\n\n When initialised, this class sends a message to the remote to create a corresponding listening server.\n When the Pydev server stops, this class detects that the server is no longer running\n and also close the remote server.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start monitoring the port {local_port}")\n self._dispatcher = dispatcher\n self._local_port = local_port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self._is_terminated = False\n\n if self.is_socket_alive():\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to start new server on port {local_port}")\n server.write(local_port, "", "start_server\\n")\n else:\n logger.debug(f"server is not running")\n self._is_terminated = True\n\n @property\n def key(self) -> Any:\n return self._local_port\n \n def is_socket_alive(self) -> bool:\n if self._is_terminated:\n return False\n\n try:\n self._socket.bind((\'\', int(self._local_port)))\n except Exception:\n return True\n\n try:\n self._socket.shutdown(2)\n except:\n pass\n\n return False\n\n def monitor(self):\n if not self.is_socket_alive() and not self._is_terminated:\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to stop server on port {self._local_port}")\n server.write(self._local_port, "", "stop_server\\n")\n self._dispatcher.remove_server_monitor(self)\n self._is_terminated = True\n')
__stickytape_write_module('pipe_client_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport fcntl\nimport logging\nimport os\nimport io\nfrom typing import Any, BinaryIO\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pipe_client_server")\n\n\nclass PipeClientServer(Processor):\n """\n This class handles the communication between the local and remote hosts using a pipe.\n """\n def __init__(self, dispatcher: Dispatcher, stdin: BinaryIO, stdout: BinaryIO):\n logger.debug("create new pipe client/server")\n self._dispatcher = dispatcher\n self._read_buffer = ""\n self._stdin = stdin\n self._stdout = stdout\n orig_fl = fcntl.fcntl(self._stdin, fcntl.F_GETFL)\n fcntl.fcntl(self._stdin, fcntl.F_SETFL, orig_fl | os.O_NONBLOCK)\n\n @property\n def key(self) -> Any:\n return None\n\n @property\n def socket(self) -> BinaryIO:\n return self._stdin\n\n def on_input_ready(self):\n data = self._stdin.read(1024)\n if len(data) == 0:\n logger.debug("the end of the pipe has been closed. Exiting.")\n import sys\n sys.exit(0)\n\n self._read_buffer += (data if isinstance(data, str) else data.decode())\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n args = command.split("\\t", 2)\n\n local_port = args[0]\n remote_port = args[1]\n command = args[2]\n\n if command == "start_client":\n self.start_client(local_port, remote_port)\n elif command == "stop_client":\n self.close_client(local_port, remote_port)\n elif command == "start_server":\n self.start_server(local_port)\n elif command == "stop_server":\n self.stop_server(local_port)\n else:\n self.dispatch_command_to_client(local_port, remote_port, command+"\\n")\n\n def write(self, local_port: str, remote_port: str, command: str):\n data = local_port+"\\t"+remote_port+"\\t"+command\n if isinstance(self._stdout, (io.BufferedIOBase, io.RawIOBase)):\n data = data.encode()\n self._stdout.write(data)\n self._stdout.flush()\n\n def start_server(self, local_port: str):\n logger.debug(f"start the server on {local_port}")\n from pydev_server import PydevServer\n server = PydevServer(self._dispatcher, local_port)\n self._dispatcher.add_processor(server)\n\n def stop_server(self, local_port: str):\n logger.debug(f"stop the server on {local_port}")\n server = self._dispatcher.find_processor(local_port)\n self._dispatcher.remove_processor(server)\n\n def start_client(self, local_port: str, remote_port: str):\n from pydev_client import PydevClient\n logger.debug(f"create new client (local: {local_port}, remote: {remote_port}")\n client = PydevClient(self._dispatcher, local_port, remote_port)\n self._dispatcher.add_processor(client)\n\n def dispatch_command_to_client(self, local_port: str, remote_port: str, command: str):\n key = (local_port, remote_port)\n client = self._dispatcher.find_processor(key)\n client.write(command)\n\n def close_client(self, local_port: str, remote_port: str):\n logger.debug(f"close the client (local: {local_port}, remote: {remote_port})")\n key = (local_port, remote_port)\n\n client = self._dispatcher.find_processor(key)\n\n if client is not None:\n self._dispatcher.remove_processor(client)\n\n def close(self) -> None:\n pass\n')
__stickytape_write_module('pydev_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pydev_server")\n\n\nclass PydevServer(Processor):\n """\n Listen on the remote pod for new debugger connection and create a new client for each connection.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start new server on port {local_port}")\n self._dispatcher = dispatcher\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.bind((\'\', int(local_port)))\n self._socket.listen(100)\n self._socket.setblocking(False)\n self._local_port = str(local_port)\n\n @property\n def key(self) -> Any:\n return self._local_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n \n def on_input_ready(self):\n client_socket, address = self._socket.accept()\n remote_port = address[1]\n\n from pydev_client import PydevClient\n from pipe_client_server import PipeClientServer\n\n self._dispatcher.add_processor(\n PydevClient(self._dispatcher, self._local_port, str(remote_port), client_socket))\n \n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n server.write(self._local_port, str(remote_port), "start_client\\n")\n\n def close(self):\n self._socket.close()\n')
__stickytape_write_module('pydev_client.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_client")\n\n\nclass PydevClient(Processor):\n """\n Client which reads Pydev commands (either on the local or remote) and send them through the pipe\n to the other end.\n\n The client also detects when a Pydev debug server starts a new server.\n When this happens, a monitor is created to handle this new server.\n (this is part of the support for multiproc in PyCharm)\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str, remote_port: str, client_socket=None):\n logger.debug(f"start new client (local: {local_port}, remote: {remote_port})")\n self._read_buffer = ""\n self._dispatcher = dispatcher\n\n if client_socket is None:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(("127.0.0.1", int(local_port)))\n else:\n self._socket = client_socket\n\n self._socket.setblocking(False)\n self._local_port = local_port\n self._remote_port = remote_port\n\n @property\n def key(self) -> Any:\n return self._local_port, self._remote_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n\n def write(self, data: str):\n logger.debug("write: "+data)\n self._socket.sendall(data.encode())\n\n def on_input_ready(self):\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n recv_data = self._socket.recv(1024).decode()\n if len(recv_data) == 0:\n # The socket has been closed\n logger.debug(f"stop this client, and ask remote to stop (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n server.write(self._local_port, self._remote_port, "stop_client\\n")\n self._dispatcher.remove_processor(self)\n\n self._read_buffer += recv_data\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n # Detect when PyCharm tries to start a new server\n args = command.split("\\t", 2)\n if len(args) == 3 and args[0] == "99" and args[1] == "-1":\n new_local_port = args[2]\n logger.debug(f"start monitoring for {new_local_port} (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n from pydev_server_monitor import PydevServerMonitor\n self._dispatcher.add_server_monitor(PydevServerMonitor(self._dispatcher, new_local_port))\n \n logger.debug("read : "+command)\n server.write(self._local_port, self._remote_port, command+"\\n")\n\n def close(self):\n self._socket.close()\n')
# Copyright 2021 Grégoire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
from dispatcher import Dispatcher
from pipe_client_server import PipeClientServer
from pydev_server_monitor import PydevServerMonitor
import sys
import subprocess
import os
import logging
is_local = len(sys.argv) > 1
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
format_header = "local" if is_local else "remote"
formatter = logging.Formatter('%(asctime)s - '+format_header+' %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if is_local:
#Local connection worker.
#
#Start the child connection (the remote), establish the pipe between the parent and child process,
#then add a monitor for the local Pydev server.
local_port = sys.argv[1]
worker_command = sys.argv[2:]
child = subprocess.Popen(worker_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
dispatcher = Dispatcher(auto_stop=True)
dispatcher.add_processor(PipeClientServer(dispatcher, child.stdout, child.stdin))
server_monitor = PydevServerMonitor(dispatcher, local_port)
if server_monitor.is_socket_alive():
dispatcher.add_server_monitor(server_monitor)
else:
# Remote connection worker.
#
# Establish the pipe between the parent and child process.
dispatcher = Dispatcher(auto_stop=False)
dispatcher.add_processor(PipeClientServer(dispatcher, sys.stdin, sys.stdout))
child = None
# Finally, start the main loop
dispatcher.dispatch_loop()
if child is not None:
child.terminate()
child.wait()
| 182.636364 | 3,959 | 0.673027 | [
"Apache-2.0"
] | gdlg/k8s-debugger-pycharm-pluggin | src/main/resources/pydev_tunnel/tunnel_single_script.py | 18,082 | Python |
"""
nuts_finder
-----------
You give it a point, it tells you all the EU NUTS regions
"""
import geojson
import requests
import re
from io import BytesIO
from zipfile import ZipFile
from shapely import geometry
from functools import lru_cache
import logging
YEAR_REGEX = "NUTS ([0-9]+)"
SCALE_REGEX = "1:([0-9]+) Million"
TOP_URL = "https://ec.europa.eu/eurostat/cache/" "GISCO/distribution/v2/nuts/download"
ZIP_URL = f"{TOP_URL}/" "ref-nuts-{year}-{scale}m.geojson.zip"
NESTED_FILE = "NUTS_RG_{scale}M_{year}_4326.geojson"
def _middle(values):
"""Lower bound of median, without using numpy (heavy reqs)"""
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector):
"""Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise
select a `value` from the available range of allowed values, selected by a custom `selector`
function.
Args:
obj: An object on which to run setattr
value: A value which if not None will be set as an attribute of object
value_name (str): The name of the new attribute
regex (str): regex string by which to find allowed values on the NUTS website.
selector (function): Function which takes an iterable and selects a value.
"""
allowed_values = _get_available(regex)
if value is None:
value = selector(allowed_values)
if value not in allowed_values:
raise ValueError(f"'{value_name}' must be one of {allowed_values}")
setattr(obj, value_name, value)
@lru_cache()
def _get_available(regex):
"""Use the provided regex to find allowed values on the NUTS website."""
r = requests.get(TOP_URL, verify=True)
values = set(int(yr) for yr in re.findall(regex, r.text))
return values
class NutsFinder:
"""
Object for holding onto NUTS data and exposing to the user, also
providing a lat, lon lookup
"""
def __init__(self, year=None, scale=None):
"""
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
"""
self.years = list(_get_available(YEAR_REGEX))
self.year_selector = max
_setattr(self, year, "year", YEAR_REGEX, self.year_selector)
_setattr(self, scale, "scale", SCALE_REGEX, _middle) # Take the middle scale
self.shapes = self._get_shapes()
def _get_shapes(self):
"""Load the shape files for the given year and scale"""
scale = str(self.scale).zfill(2)
filename = NESTED_FILE.format(year=self.year, scale=scale)
url = ZIP_URL.format(year=self.year, scale=scale)
r = requests.get(url, verify=True)
r.raise_for_status()
try:
with ZipFile(BytesIO(r.content)) as zipfile:
with zipfile.open(filename) as f:
shapes = geojson.load(f)
# For some reason this year/scale isn't available
except KeyError:
logging.warning(
f"No match for this year ({self.year}) and scale ({self.scale})"
)
# Remove this year from the sample and try another year
self.years.remove(self.year)
self.year = self.year_selector(self.years)
logging.warning(f"Retrying with year ({self.year})")
return self._get_shapes()
return shapes
def find(self, lat, lon):
"""Find every NUTS region for this lat, lon"""
p = geometry.Point(lon, lat)
nuts = []
for region in self.shapes["features"]:
s = geometry.shape(region["geometry"])
if s.contains(p):
nuts.append(region["properties"])
return sorted(nuts, key=lambda row: row["LEVL_CODE"])
| 36.055046 | 97 | 0.637913 | [
"MIT"
] | nestauk/nuts_finder | nuts_finder/nuts_finder.py | 3,930 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.