hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b922114fe8857917cc97991185cf8a10ca74b3e
| 618 |
py
|
Python
|
lconfig.py
|
litecord/rest-py
|
1fcc08566dafd435e1df701a5928b0806ce9a294
|
[
"MIT"
] | null | null | null |
lconfig.py
|
litecord/rest-py
|
1fcc08566dafd435e1df701a5928b0806ce9a294
|
[
"MIT"
] | 6 |
2018-02-22T21:03:18.000Z
|
2018-02-23T21:29:53.000Z
|
lconfig.py
|
litecord/rest-py
|
1fcc08566dafd435e1df701a5928b0806ce9a294
|
[
"MIT"
] | null | null | null |
# configuration for litecord rest
# Where to start the webserver
server_url = ('0.0.0.0', 8000)
ssl = False
ssl_certfile = ''
ssl_keyfile = ''
# Where the gateway is in the world
gateway_url = 'ws://localhost:8081/gw'
# Where the litebridge connection will happen
litebridge_server = 'ws://localhost:10101/'
litebridge_password = '123'
# Postgres arguments
pgargs = {
'user': 'litecord',
'password': '123',
'database': 'litecord',
'host': 'localhost',
}
# recommended amount is 1000 guilds for each shard
# changing this can lead to overall service degradation
# on high loads
GUILDS_SHARD = 1000
| 22.071429 | 55 | 0.708738 |
3991e3c3070b3c6658247242dfbc7361cced9cfe
| 545 |
py
|
Python
|
Ekler/Neural_Network/skNN.py
|
ardamavi/SesimVar
|
ed344b4a88e6186127d9488c1787d7232fcd7c38
|
[
"Apache-2.0"
] | 18 |
2017-04-01T19:50:02.000Z
|
2021-03-10T06:52:17.000Z
|
Ekler/Neural_Network/skNN.py
|
sygops/SesimVar
|
ed344b4a88e6186127d9488c1787d7232fcd7c38
|
[
"Apache-2.0"
] | 1 |
2017-04-01T19:47:20.000Z
|
2017-04-01T19:47:20.000Z
|
Ekler/Neural_Network/skNN.py
|
sygops/SesimVar
|
ed344b4a88e6186127d9488c1787d7232fcd7c38
|
[
"Apache-2.0"
] | 6 |
2018-02-17T20:58:14.000Z
|
2021-01-06T23:37:16.000Z
|
# Arda Mavi
from sklearn.neural_network import MLPClassifier
def egitim(X,y):
# X => Eğitim için örnek giriş verileri.
# y => Eğitim için X girişlerinin beklenen çıktıları.
# MLPClassifier -> multi-layer perceptron (MLP)
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(2), random_state=1)
# Eğitim verileri ile eğitim:
clf = clf.fit(X, y)
return clf
def tahmin(clf, X):
# clf => Daha önceden eğitilmiş sınflandırıcı.
# X => Sinir ağlarındaki giriş değerleri.
return clf.predict(X)
| 27.25 | 90 | 0.689908 |
ba1e02988122dbee1766330fbf0929a094e611f2
| 3,656 |
py
|
Python
|
GPCR/main_bert.py
|
nepp1d0/transformerCPI
|
a84c1e9b23b35ba3f02ad13621a1413f0ae7c62a
|
[
"Apache-2.0"
] | null | null | null |
GPCR/main_bert.py
|
nepp1d0/transformerCPI
|
a84c1e9b23b35ba3f02ad13621a1413f0ae7c62a
|
[
"Apache-2.0"
] | null | null | null |
GPCR/main_bert.py
|
nepp1d0/transformerCPI
|
a84c1e9b23b35ba3f02ad13621a1413f0ae7c62a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Time:Created on 2019/9/17 8:54
@author: LiFan Chen
@Filename: main.py
@Software: PyCharm
"""
import torch
import numpy as np
import random
import os
import time
import timeit
import warnings
from model_bert import BERT
from model_bert import Trainer
def load_tensor(file_name, dtype):
return [dtype(d).to(device) for d in np.load(file_name + '.npy', allow_pickle=True)]
def shuffle_dataset(dataset, seed):
np.random.seed(seed)
np.random.shuffle(dataset)
return dataset
def split_dataset(dataset, ratio):
n = int(ratio * len(dataset))
dataset_1, dataset_2 = dataset[:n], dataset[n:]
return dataset_1, dataset_2
if __name__ == "__main__":
SEED = 1
random.seed(SEED)
torch.manual_seed(SEED)
# torch.backends.cudnn.deterministic = True
DATASET = "GPCR_train"
"""CPU or GPU"""
if torch.cuda.is_available():
device = torch.device('cuda:0')
print('The code uses GPU...')
else:
device = torch.device('cpu')
print('The code uses CPU!!!')
"""Load preprocessed data."""
dir_input = ('tokenizer_directory/' )
smiles = load_tensor(dir_input + 'smiles', torch.FloatTensor)
targets = load_tensor(dir_input + 'targets', torch.FloatTensor)
labels = load_tensor(dir_input + 'labels', torch.FloatTensor)
print(f'Shape of smiles loaded tensors {smiles[0].shape}')
print(f'Shape of smiles masks loaded tensors {smiles[1].shape}')
"""Create a dataset and split it into train/dev/test."""
dataset = list(zip(smiles[0], smiles[1], targets[0], targets[1], labels))
dataset = shuffle_dataset(dataset, 1234)
dataset_train, dataset_dev = split_dataset(dataset, 0.8)
""" create model ,trainer and tester """
protein_dim = 100
atom_dim = 100
hid_dim = 64
n_layers = 3
n_heads = 8
pf_dim = 256
dropout = 0.1
batch = 64
lr = 1e-4
weight_decay = 1e-4
decay_interval = 5
lr_decay = 1.0
iteration = 300
kernel_size = 7
print("Building BERT model")
model = BERT(70 - 5, hidden=hid_dim, n_layers=n_layers, attn_heads=n_heads) # 70 (smiles_vocab_size + targets_vocab_size) - 5 (special_tokens not repeated)
# model.load_state_dict(torch.load("output/model/lr=0.001,dropout=0.1,lr_decay=0.5"))
model.to(device)
trainer = Trainer(model, lr, weight_decay, batch)
#tester = Tester(model)
"""Output files."""
file_AUCs = 'output_bert/result/bertv1' + '.txt'
file_model = 'output_bert/model/' + 'bertv1'
AUCs = ('Epoch\tTime(sec)\tLoss_train\tAUC_dev\tPRC_dev')
if not os.path.exists('output_bert'):
os.makedirs('output_bert')
if not os.path.exists('output_bert/result'):
os.makedirs('output_bert/result')
if not os.path.exists('output_bert/model'):
os.makedirs('output_bert/model')
with open(file_AUCs, 'w') as f:
f.write(AUCs + '\n')
"""Start training."""
print('Training...')
print(AUCs)
start = timeit.default_timer()
max_AUC_dev = 0
for epoch in range(1, iteration+1):
if epoch % decay_interval == 0:
trainer.optimizer.param_groups[0]['lr'] *= lr_decay
loss_train = trainer.train(dataset_train, device)
#AUC_dev, PRC_dev = tester.test(dataset_dev)
end = timeit.default_timer()
time = end - start
#tester.save_AUCs(AUCs, file_AUCs)
AUCs = [epoch, time, loss_train]
print('\t'.join(map(str, AUCs)))
'''if AUC_dev > max_AUC_dev:
tester.save_model(model, file_model)
max_AUC_dev = AUC_dev
print('\t'.join(map(str, AUCs)))'''
| 29.723577 | 160 | 0.646061 |
ccbaa1d4bd9a9a155dc22845efdaeb1b42c9a366
| 18,841 |
py
|
Python
|
Hangman example test thing 3.py
|
nicholashall14/Hangman-Game-Python
|
94a75f95b873001f72b94cb770a9c3bf66cdb70e
|
[
"MIT"
] | null | null | null |
Hangman example test thing 3.py
|
nicholashall14/Hangman-Game-Python
|
94a75f95b873001f72b94cb770a9c3bf66cdb70e
|
[
"MIT"
] | null | null | null |
Hangman example test thing 3.py
|
nicholashall14/Hangman-Game-Python
|
94a75f95b873001f72b94cb770a9c3bf66cdb70e
|
[
"MIT"
] | null | null | null |
#################################################
# Author : Ciel, imwithye
# Matric No : U1220539K
# Group : FS4
#################################################
import time
import random
easy = ['PHONE','HAPPY','APPLE','EARTH','GONGYIWEI']
normal = ['PYTHON','PIONEER','SINGAPORE','FATHER','MOTHER','GONGYIWEI']
#################################################
#clear : This function will clear the whole screen with 30 blank lines
#
#pre-condition : NONE
#post-condition : print out 30 blank lines
#################################################
def clear():
for i in range(30):
print ('\n')
#################################################
#hangmanInterface : This function will print a basic interface of Hangman.
# The argument index means how many steps left.
#pre-condition : index is the number of steps left and should GREATTER or EQUALS to 0!
#post-condition : print out a basic interface of Hangman
#################################################
def hangmanInterface(index):
if index==0:
print(' _____ ')
print(' | | ')
print(' O | ')
print(' /|\ | ')
print(' / \ | ')
print(' | ')
print(' ________|_')
return
if index==1:
print(' _____ ')
print(' | | ')
print(' O | ')
print(' /|\ | ')
print(' / | ')
print(' | ')
print(' ________|_')
return
if index==2:
print(' _____ ')
print(' | | ')
print(' O | ')
print(' /|\ | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==3:
print(' _____ ')
print(' | | ')
print(' O | ')
print(' /| | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==4:
print(' _____ ')
print(' | | ')
print(' O | ')
print(' | | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==5:
print(' _____ ')
print(' | | ')
print(' O | ')
print(' | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==6:
print(' _____ ')
print(' | | ')
print(' | ')
print(' | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==7:
print(' _____ ')
print(' | ')
print(' | ')
print(' | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==8:
print(' ')
print(' | ')
print(' | ')
print(' | ')
print(' | ')
print(' | ')
print(' ________|_')
return
if index==9:
print(' ')
print(' ')
print(' ')
print(' ')
print(' ')
print(' ')
print(' ________|_')
return
#################################################
#startInterface : startInterface in the first user interface
#pre-condition : input should be a interger between 1 to 3
#post-condition : this function will give out user's choice of gaming!
#################################################
def startInterface():
clear()
print('####################')
print('# #')
print('# Hangman #')
print('# #')
print('####################')
print(' 1.Start #')
print(' 2.Copyright #')
print(' 3.Exit #')
choice = input('Input Selection: ')
return choice
#################################################
#startInterface2 : startInterface2 is the second interface user will see
#pre-condition : input should be a integer between 1 to 3
#post-condition : this function will give out user's choice of degree of difficulty
#################################################
def startInterface2():
clear()
print('####################')
print('# #')
print('# Hangman #')
print('# #')
print('####################')
print(' 1.Easy #')
print(' 2.Normal #')
print(' 3.Expert #')
choice = input('Input Selection: ')
return choice
#################################################
#copyrightInterface : this function shows Copyright
#pre-condition : input can be anything
#post-condition : bring user back to first user interface
#################################################
def copyrightInterface():
clear()
print('####################')
print('# #')
print('# Hangman #')
print('# #')
print('####################')
print('# Author: Ciel, imwithye')
print('# School of Computer Engeineering')
print('# Nanyang Technological University')
print('key in any input to go back')
input('')
return
#################################################
#gameInterface : gameInterface is the main user interface in gaming.
#pre-condition : user's guess,miss_attempts should not greater than 6,misses is the history of mistake
#post-condition : print out the interface and return # if restart, return ! if call for
# help,return character else
#################################################
def gameInterface(guess,miss_attempts,misses,hintleft):
clear()
left = 9-miss_attempts
hangmanInterface(left)
print('###########################################')
print('Word: ',end='')
for i in guess:
print(i,' ',end='')
print()
print('# Misses: ',end='')
for i in misses:
print(i,' ',end='')
print()
if left != 0:
print('# You have ',left,' attempt(s) left')
print('# You have ',hintleft,' hint(s) left')
print('# input ? to get hint, # to restart and ! to see answer')
_in = input('# Guess: ')
if len(_in)>1:
return '<'
else:
if _in == '?' or _in == '#' or _in == '!':
return _in
elif _in.isalpha():
return _in.upper()
else:
return '<'
else:
print('# key in any input to Try Again!')
_in = input('')
return '#'
#################################################
#game : game is the process of gaming. return false if game ends
#pre-condition : the answer 'word' should be a string. hintMax should not be greater than 3
#post-condition : return false if game ends
#################################################
def game(word,hintMax):
length = len(word)
miss = 0
hintTimes = 0
misses = []
guess = ['_' for i in range(length)]
while True:
operation = gameInterface(guess,miss,misses,hintMax-hintTimes)
if operation == '#':
print('Restarting...')
time.sleep(3)
return True
elif operation == '?':
if hintTimes<hintMax:
operation = hint(word,guess)
for i in range(length):
if word[i]==operation:
guess[i] = operation
hintTimes = hintTimes+1
else:
print('# Can not get hint any more!Try your best!')
print('# Wait for 2 seconds')
time.sleep(2)
continue
elif operation == '<':
print('# Please input correctly!')
print('# Wait for 2 seconds')
time.sleep(2)
continue
elif operation == '!':
clear()
hangmanInterface(9-miss)
print('###########################################')
print('# The answer is',word)
print('# wait fewer seconds to back to main menu')
time.sleep(3)
return False
else:
flag=0
for i in range(length):
if word[i]==operation:
guess[i] = operation
flag = 1
if flag==0:
miss = miss+1
if not operation in misses:
misses.append(operation)
if not '_' in guess:
clear()
hangmanInterface(9-miss)
print('###########################################')
print('# ',end='')
for i in guess:
print(i,' ',end='')
print()
print('# Great!')
time.sleep(3)
return False
#################################################
#hint : this function will return a correct character
#pre-condition : word should be a string and guess should be a list
#post-condition : return back a correct character
#################################################
def hint(word,guess):
length = len(word)
for i in range(length):
if guess[i]=='_':
hintletter = word[i]
return hintletter
#################################################
#getWord : getWord function will get a get a word randomly
# Admin model can see the whole library and add words
#pre-condition : sel is the degree of difficulty and should be less than 3
# if sel is greater then 3, that means Admin model!
#post-condition : return a word whose type is string.
# Or Admin model to access library!
#################################################
def getWord(sel):
global easy,normal
if sel==3:
sel = 2
if sel == 1:
length = len(easy)
index = random.randrange(0,length)
return easy[index]
elif sel == 2:
length = len(normal)
index = random.randrange(0,length)
return normal[index]
else:
while True:
clear()
print('####################################')
print('# Edit Library, input "exit()" to go back to main menu:')
print('# 1.Show Library')
print('# 2.Add word; Program will automatically select degree of difficulty')
print('# 3.RESET to default.')
print('####################################')
selection = input('Selection: ')
if selection == 'exit()':
return 'DONE'
elif selection == '1':
print('_________________Library Files___________________')
print('easy:')
for i in easy:
print(' ',i)
print()
print('normal and expert:')
for i in normal:
print(' ',i)
print()
print('_________________Library Files___________________')
print('# key in any input to return')
temp = input()
elif selection == '2':
while True:
print('_________________Addition___________________')
print('Use end() to end')
add = input()
if add != 'end()':
if add.isalpha():
degree = check(add)
if degree == 1:
if not add.upper() in easy:
easy.append(add.upper())
print('# Add successfully!')
else:
print('# REPEAT! Words has ready added!')
else:
if not add.upper() in normal:
normal.append(add.upper())
print('# Add successfully!')
else:
print('# REPEAT! Words has ready added!')
else:
print('# Please input correctly!')
continue
else:
print('_________________Addition___________________')
print('Addition Done')
time.sleep(2)
break
elif selection == '3':
while True:
print('# Please be sure all data will be set to default!')
print('# key in "confirm()" to continue and key in anything else to cancel')
temp = input()
if temp=='confirm()':
easy = ['PHONE','HAPPY','APPLE','EARTH','GONGYIWEI']
normal = ['PYTHON','PIONEER','SINGAPORE','FATHER','MOTHER','GONGYIWEI']
print('# Successfully! wait 2 seconds.')
time.sleep(2)
break
else:
print('# Canceled! wait 2 seconds.')
time.sleep(2)
break
else:
continue
#################################################
#check : Check the whether is easy or normal
#pre-condition : addition should be a string only contenting alpha
#post-condition : return 1 if it is easy return 2 if it is normal
#################################################
def check(addition):
sign = 0
result = ['CHECK']
for i in addition:
if not i in result:
result.append(i)
sign = len(result)
if sign>6:
return 2
else:
return 1
#################################################
# mainfunction: Hangman's main function
# goal : A command line Hangman game
# explain : There are three degrees of difficulties to choose;
# esay is a short word with repeat
# nomal degree, the word will not be too long
# expert will be very difficult.
# nomal and expert will use the same library but expert's hint is less
# Author : GONG YIWEI
# Matric No : U1220539K
# Group : FS4
#################################################
clear()
print(" __ __ ___ .__ __. _______ .___ ___. ___ .__ __.")
print("| | | | / \ | \ | | / _____|| \/ | / \ | \ | |")
print("| |__| | / ^ \ | \| | | | __ | \ / | / ^ \ | \| |")
print("| __ | / /_\ \ | . ` | | | |_ | | |\/| | / /_\ \ | . ` |")
print("| | | | / _____ \ | |\ | | |__| | | | | | / _____ \ | |\ |")
print("|__| |__| /__/ \__\ |__| \__| \______| |__| |__| /__/ \__\ |__| \__|")
print('#######################################################')
print('# key in admin to get in admin model! #')
print('# key in whosyourdaddy to get in special model! #')
print('# Warnning! admin mode is ONLY for developers! #')
print('# any bugs please report to #')
print('# http://github.com/imwithye #')
print('# email: [email protected] #')
print('#######################################################')
print()
print('# Wait for starting...',end='')
print('<====',end='')
print('====',end='')
print('====',end='')
print('====',end='')
print('====> 100% DONE!')
input('# Press Enter')
while True:
gameProcess = True
sel = startInterface()
if sel=='1':
while True:
sel2 = startInterface2()
if not sel2.isdigit():
continue
elif len(sel2)>1:
continue
elif int(sel2)>3 or int(sel2)<0:
continue
else:
word = getWord(int(sel2))
while gameProcess:
gameProcess = game(word,4-int(sel2))
break
elif sel=='2':
copyrightInterface()
continue
elif sel=='3':
break
elif sel=='admin':
getWord(4)
elif sel=='whosyourdaddy':
word = 'HANGMAN'
temp = game(word,1000)
else:
continue
| 40.605603 | 104 | 0.359854 |
89f11fefed8fd135e8461264054ff4357f0133c4
| 4,242 |
py
|
Python
|
hypha/apply/api/v1/utils.py
|
maxpearl/hypha
|
e181ebadfb744aab34617bb766e746368d6f2de0
|
[
"BSD-3-Clause"
] | 20 |
2021-04-08T16:38:49.000Z
|
2022-02-09T20:05:57.000Z
|
hypha/apply/api/v1/utils.py
|
maxpearl/hypha
|
e181ebadfb744aab34617bb766e746368d6f2de0
|
[
"BSD-3-Clause"
] | 1,098 |
2017-12-15T11:23:03.000Z
|
2020-01-24T07:58:07.000Z
|
hypha/apply/api/v1/utils.py
|
maxpearl/hypha
|
e181ebadfb744aab34617bb766e746368d6f2de0
|
[
"BSD-3-Clause"
] | 17 |
2020-02-07T14:55:54.000Z
|
2021-04-04T19:32:38.000Z
|
from collections import OrderedDict
from django import forms
from django.contrib.auth import get_user_model
from django.db.models import Q
from tinymce.widgets import TinyMCE
from wagtail.core.models import Page
from hypha.apply.categories.models import Option
from hypha.apply.funds.models import ApplicationSubmission, Round, ScreeningStatus
from hypha.apply.review.fields import ScoredAnswerField, ScoredAnswerWidget
from hypha.apply.stream_forms.forms import BlockFieldWrapper
from hypha.apply.users.groups import STAFF_GROUP_NAME
User = get_user_model()
def get_field_kwargs(form_field):
if isinstance(form_field, BlockFieldWrapper):
return {'text': form_field.block.value.source}
kwargs = OrderedDict()
kwargs = {
'initial': form_field.initial,
'required': form_field.required,
'label': form_field.label,
'label_suffix': form_field.label_suffix,
'help_text': form_field.help_text,
'help_link': form_field.help_link
}
if isinstance(form_field, forms.CharField):
if hasattr(form_field, 'word_limit'):
kwargs['word_limit'] = form_field.word_limit
kwargs['max_length'] = form_field.max_length
kwargs['min_length'] = form_field.min_length
kwargs['empty_value'] = form_field.empty_value
if isinstance(form_field, forms.ChoiceField):
kwargs['choices'] = form_field.choices
if isinstance(form_field, forms.TypedChoiceField):
kwargs['empty_value'] = form_field.empty_value
if isinstance(form_field, forms.IntegerField):
kwargs['max_value'] = form_field.max_value
kwargs['min_value'] = form_field.min_value
if isinstance(form_field, ScoredAnswerField):
fields = [
{
'type': form_field.fields[0].__class__.__name__,
'max_length': form_field.fields[0].max_length,
'min_length': form_field.fields[0].min_length,
'empty_value': form_field.fields[0].empty_value
},
{
'type': form_field.fields[1].__class__.__name__,
'choices': form_field.fields[1].choices,
},
]
kwargs['fields'] = fields
return kwargs
def get_field_widget(form_field):
if isinstance(form_field, BlockFieldWrapper):
return {'type': 'LoadHTML', 'attrs': {}}
widget = {
'type': form_field.widget.__class__.__name__,
'attrs': form_field.widget.attrs
}
if isinstance(form_field.widget, TinyMCE):
mce_attrs = form_field.widget.mce_attrs
plugins = mce_attrs.get('plugins')
if not isinstance(plugins, list):
mce_attrs['plugins'] = [plugins]
if 'toolbar1' in mce_attrs:
mce_attrs['toolbar'] = mce_attrs.pop('toolbar1')
widget['mce_attrs'] = mce_attrs
if isinstance(form_field.widget, ScoredAnswerWidget):
field_widgets = form_field.widget.widgets
widgets = [
{
'type': field_widgets[0].__class__.__name__,
'attrs': field_widgets[0].attrs,
'mce_attrs': field_widgets[0].mce_attrs
},
{
'type': field_widgets[1].__class__.__name__,
'attrs': field_widgets[1].attrs,
}
]
widget['widgets'] = widgets
return widget
def get_round_leads():
return User.objects.filter(submission_lead__isnull=False).distinct()
def get_reviewers():
""" All assigned reviewers, staff or admin """
return User.objects.filter(Q(submissions_reviewer__isnull=False) | Q(groups__name=STAFF_GROUP_NAME) | Q(is_superuser=True)).distinct()
def get_screening_statuses():
return ScreeningStatus.objects.filter(
id__in=ApplicationSubmission.objects.all().values('screening_statuses__id').distinct('screening_statuses__id'))
def get_used_rounds():
return Round.objects.filter(submissions__isnull=False).distinct()
def get_used_funds():
# Use page to pick up on both Labs and Funds
return Page.objects.filter(applicationsubmission__isnull=False).distinct()
def get_category_options():
return Option.objects.filter(
category__filter_on_dashboard=True
)
| 35.647059 | 138 | 0.67091 |
6d8c2c5848af5f1b26c7e919bf95d2aae504f0df
| 16,775 |
py
|
Python
|
setup.py
|
halvisg/dtaidistance
|
80a23e9998bc3f3ca50213c2fd27d307ef6098e5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
halvisg/dtaidistance
|
80a23e9998bc3f3ca50213c2fd27d307ef6098e5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
halvisg/dtaidistance
|
80a23e9998bc3f3ca50213c2fd27d307ef6098e5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
python3 setup.py build_ext --inplace
"""
from setuptools import setup, Command, find_packages
from setuptools.extension import Extension
from setuptools.command.test import test as TestCommand
from setuptools.command.sdist import sdist as SDistCommand
from setuptools.command.build_ext import build_ext as BuildExtCommand
from setuptools.command.install import install
from setuptools import Distribution
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
import platform
import os
import sys
import re
import subprocess as sp
from pathlib import Path
try:
import numpy
except ImportError:
numpy = None
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
here = Path(__file__).parent
dtaidistancec_path = Path('dtaidistance') / 'lib' / 'DTAIDistanceC' / 'DTAIDistanceC'
c_args = {
# Xpreprocessor is required for the built-in CLANG on macos, but other
# installations of LLVM don't seem to be bothered by it (although it's
# not required.
# GCC should also not be bothered by it but appears to be on some systems.
'unix': ['-Xpreprocessor', '-fopenmp',
'-I'+str(dtaidistancec_path)],
'msvc': ['/openmp', '/Ox', '/fp:fast', '/favor:INTEL64', '/Og',
'/I'+str(dtaidistancec_path)],
'mingw32': ['-fopenmp', '-O3', '-ffast-math', '-march=native', '-DMS_WIN64',
'-I'+str(dtaidistancec_path)],
'llvm': ['-Xpreprocessor', '-fopenmp', # custom key for Homebrew llvm
'-I'+str(dtaidistancec_path)],
'gnugcc': ['-Xpreprocessor', '-fopenmp', # custom key for GNU GCC
'-I'+str(dtaidistancec_path)]
}
l_args = {
'unix': ['-Xpreprocessor', '-fopenmp'], # '-lgomp' / '-lomp'
'msvc': [],
'mingw32': ['-fopenmp'],
'llvm': ['-Xpreprocessor', '-fopenmp', '-lomp'], # custom key for Homebrew llvm
'gnugcc': ['-Xpreprocessor', '-fopenmp', '-lgomp'] # custom key for GNU GCC
}
class PyTest(TestCommand):
description = "Run tests"
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
pytest_args = []
test_args = []
def initialize_options(self):
self.pytest_args = ['--ignore=venv']
try:
import pytest_benchmark
self.pytest_args += ['--benchmark-skip']
except ImportError:
print("No benchmark library, ignore benchmarks")
self.pytest_args += ['--ignore', str(Path('tests') / 'test_benchmark.py')]
def finalize_options(self):
pass
def run_tests(self):
import pytest
sys.path.append('.')
errno = pytest.main(self.pytest_args)
sys.exit(errno)
class MyDistribution(Distribution):
global_options = Distribution.global_options + [
('noopenmp', None, 'No compiler/linker flags for OpenMP'),
('forceopenmp', None, 'Force compiler/linker flags with OpenMP'),
('noxpreprocessor', None, 'Assume OpenMP is built-in (remove -Xpreprocessor argument)'),
('forcellvm', None, 'Force compile/linker flags for LLVM'),
('forcegnugcc', None, 'Force compile/linker flags for GNU GCC'),
]
def __init__(self, attrs=None):
self.noopenmp = 0
self.forceopenmp = 0
self.noxpreprocessor = 0
self.forcellvm = 0
self.forcegnugcc = 0
super().__init__(attrs)
class MyInstallCommand(install):
pass
# def initialize_options(self):
# install.initialize_options(self)
# def finalize_options(self):
# install.finalize_options(self)
# def run(self):
# install.run(self)
def set_custom_envvars_for_homebrew():
"""Update environment variables automatically for Homebrew if CC is not set"""
# DEPRECATED. OpenMP is now supported through -Xpreprocessor
# if platform.system() == 'Darwin' and "CC" not in os.environ:
# print("Set custom environment variables for Homebrew Clang because CC is not set")
# cppflags = []
# if "CPPFLAGS" in os.environ:
# cppflags.append(os.environ["CPPFLAGS"])
# cflags = []
# if "CFLAGS" in os.environ:
# cflags.append(os.environ["CFLAGS"])
# ldflags = []
# if "LDFLAGS" in os.environ:
# ldflags.append(os.environ["LDFLAGS"])
# if os.path.exists("/usr/local/opt/llvm/bin/clang"):
# # We have a recent version of LLVM that probably supports openmp to compile parallel C code (installed using
# # `brew install llvm`).
# os.environ["CC"] = "/usr/local/opt/llvm/bin/clang"
# print("CC={}".format(os.environ["CC"]))
# ldflags += ["-L/usr/local/opt/llvm/lib"]
# cppflags += ["-I/usr/local/opt/llvm/include"]
# cflags += ["-I/usr/local/opt/llvm/include"]
# try:
# mac_ver = [int(nb) for nb in platform.mac_ver()[0].split(".")]
# if mac_ver[0] == 10 and mac_ver[1] >= 14:
# # From Mojave on, the header files are part of Xcode.app
# incpath = '-I/Applications/Xcode.app/Contents/Developer/Platforms/' + \
# 'MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include'
# cppflags += [incpath]
# cflags += [incpath]
# except Exception as exc:
# print("Failed to check version")
# print(exc)
# else:
# # The default clang in XCode is compatible with OpenMP when using -Xpreprocessor
# pass
#
# if len(cppflags) > 0:
# os.environ["CPPFLAGS"] = " ".join(cppflags)
# print("CPPFLAGS={}".format(os.environ["CPPFLAGS"]))
# if len(cflags) > 0:
# os.environ["CFLAGS"] = " ".join(cflags)
# print("CFLAGS={}".format(os.environ["CFLAGS"]))
# if len(ldflags) > 0:
# os.environ["LDFLAGS"] = " ".join(ldflags)
# print("LDFLAGS={}".format(os.environ["LDFLAGS"]))
# else:
# print("Using the following environment variables:")
# print("CC={}".format(os.environ.get("CC", "")))
# print("CPPFLAGS={}".format(os.environ.get("CPPFLAGS", "")))
# print("CFLAGS={}".format(os.environ.get("CPLAGS", "")))
# print("LDFLAGS={}".format(os.environ.get("LDFLAGS", "")))
class MyBuildExtCommand(BuildExtCommand):
def build_extensions(self):
c = self.compiler.compiler_type
# Custom for homebrew
print("Compiler type: {}".format(c))
print("--noopenmp: {}".format(self.distribution.noopenmp))
print("--forceopenmp: {}".format(self.distribution.forceopenmp))
print("--noxpreprocessor: {}".format(self.distribution.noxpreprocessor))
print("--forcellvm: {}".format(self.distribution.forcellvm))
print("--forcegnugcc: {}".format(self.distribution.forcegnugcc))
if self.distribution.forcellvm or (c == "unix" and "local/opt/llvm" in self.compiler.compiler[0]):
print('Using Homebrew LLVM settings')
c = 'llvm'
elif self.distribution.forcegnugcc or (c == "unix" and "gnu-gcc" in self.compiler.compiler[0]):
print('Using GNU GCC settings')
c = 'gnugcc'
if self.distribution.noopenmp == 0 and self.distribution.forceopenmp == 0:
try:
check_result = check_openmp(self.compiler.compiler[0], self.distribution.noxpreprocessor)
except Exception as exc:
print("WARNING: Cannot check for OpenMP, assuming to be available")
print(exc)
check_result = True # Assume to be present by default
if not check_result:
print("WARNING: OpenMP is not available, disabling OpenMP (no parallel computing in C)")
self.distribution.noopenmp = 1
# Not removing the dtw_cc_omp extension, this will be compiled but
# without any real functionality except is_openmp_supported()
if c in c_args:
if self.distribution.noopenmp == 1:
args = [arg for arg in c_args[c] if arg not in ['-Xpreprocessor', '-fopenmp', '-lomp']]
elif self.distribution.noxpreprocessor == 1:
args = [arg for arg in c_args[c] if arg not in ['-Xpreprocessor']]
else:
args = c_args[c]
for e in self.extensions:
e.extra_compile_args = args
else:
print("Unknown compiler type: {}".format(c))
if c in l_args:
if self.distribution.noopenmp == 1:
args = [arg for arg in l_args[c] if arg not in ['-Xpreprocessor', '-fopenmp', '-lomp']]
elif self.distribution.noxpreprocessor == 1:
args = [arg for arg in l_args[c] if arg not in ['-Xpreprocessor']]
else:
args = l_args[c]
for e in self.extensions:
e.extra_link_args = args
else:
print("Unknown linker type: {}".format(c))
if numpy is None:
self.extensions = [arg for arg in self.extensions if "numpy" not in str(arg)]
print(f'All extensions:')
print(self.extensions)
BuildExtCommand.build_extensions(self)
def initialize_options(self):
set_custom_envvars_for_homebrew()
super().initialize_options()
# def finalize_options(self):
# super().finalize_options()
# def run(self):
# super().run()
class MyBuildExtInPlaceCommand(MyBuildExtCommand):
def initialize_options(self):
super().initialize_options()
self.inplace = True
def check_openmp(cc_bin, noxpreprocessor):
"""Check if OpenMP is available"""
print("Checking for OpenMP availability for {}".format(cc_bin))
cc_binname = os.path.basename(cc_bin)
args = None
kwargs = None
if "clang" in cc_binname or "cc" in cc_binname:
if noxpreprocessor == 0:
args = [[str(cc_bin), "-dM", "-E", "-Xpreprocessor", "-fopenmp", "-"]]
else:
args = [[str(cc_bin), "-dM", "-E", "-fopenmp", "-"]]
kwargs = {"stdout": sp.PIPE, "stderr": sp.PIPE, "input": '', "encoding": 'ascii'}
print(" ".join(args[0]) + " # with " + ", ".join(str(k) + "=" + str(v) for k, v in kwargs.items()))
if args is not None:
try:
p = sp.run(*args, **kwargs)
print(p.stderr)
defs = p.stdout.splitlines()
for curdef in defs:
if "_OPENMP" in curdef:
print(curdef)
print("... found OpenMP")
return True
except Exception:
print("... no OpenMP")
return False
else:
print("... do not know how to check for OpenMP (unknown CC), assume to be available")
return True
return False
# Set up extension
extensions = []
if cythonize is not None:
# - Cython uses the glob package to find files, thus use unix-style paths
# - Multiple extensions are created to have a sub-package per type of distance
# and per functionality (e.g. with or without OpenMP).
# The disadvantage is that the same C-files are reused for multiple extensions
extensions.append(
Extension(
"dtaidistance.dtw_cc",
["dtaidistance/dtw_cc.pyx", "dtaidistance/dtw_cc.pxd",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_dtw.c",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_ed.c"
],
depends=["dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_globals.h",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_ed.h"],
include_dirs=[str(dtaidistancec_path), "dtaidistance/lib/DTAIDistanceC/DTAIDistanceC"],
library_dirs=[str(dtaidistancec_path), "dtaidistance/lib/DTAIDistanceC/DTAIDistanceC"],
extra_compile_args=[],
extra_link_args=[]))
extensions.append(
Extension(
"dtaidistance.ed_cc",
["dtaidistance/ed_cc.pyx",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_ed.c"],
depends=["dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_globals.h"],
include_dirs=[str(dtaidistancec_path), "dtaidistance/lib/DTAIDistanceC/DTAIDistanceC"],
extra_compile_args=[],
extra_link_args=[]))
extensions.append(
Extension(
"dtaidistance.dtw_cc_omp",
["dtaidistance/dtw_cc_omp.pyx",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_dtw_openmp.c",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_dtw.c",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_ed.c"],
depends=["dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_globals.h",
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_dtw.h"
"dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_ed.h"],
include_dirs=[str(dtaidistancec_path), "dtaidistance/lib/DTAIDistanceC/DTAIDistanceC"],
extra_compile_args=[],
extra_link_args=[]))
if numpy is not None:
extensions.append(
Extension(
"dtaidistance.dtw_cc_numpy", ["dtaidistance/util_numpy_cc.pyx"],
depends=["dtaidistance/lib/DTAIDistanceC/DTAIDistanceC/dd_globals.h"],
include_dirs=[numpy.get_include(), str(dtaidistancec_path), "dtaidistance/lib/DTAIDistanceC/DTAIDistanceC"],
extra_compile_args=[],
extra_link_args=[]))
else:
print("WARNING: Numpy was not found, preparing a version without Numpy support.")
ext_modules = cythonize(extensions)
# compiler_directives={'language_level': "3"})
else:
print("WARNING: Cython was not found, preparing a pure Python version.")
ext_modules = []
install_requires = ['cython>=0.29.6']
setup_requires = ['setuptools>=18.0', 'cython>=0.29.6']
tests_require = ['pytest', 'pytest-benchmark']
# Check version number
init_fn = here / 'dtaidistance' / '__init__.py'
with init_fn.open('r', encoding='utf-8') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
# Set up readme file
readme_path = here / 'README.md'
if os.path.exists(readme_path):
with readme_path.open('r', encoding='utf-8') as f:
long_description = f.read()
else:
long_description = ""
long_description_content_type = "text/markdown"
# Create setup
setup_kwargs = {}
def set_setup_kwargs(**kwargs):
global setup_kwargs
setup_kwargs = kwargs
set_setup_kwargs(
name='dtaidistance',
version=version,
description='Distance measures for time series',
long_description=long_description,
long_description_content_type=long_description_content_type,
author='Wannes Meert',
author_email='[email protected]',
url='https://dtai.cs.kuleuven.be',
project_urls={
'DTAIDistance documentation': 'http://dtaidistance.readthedocs.io/en/latest/',
'DTAIDistance source': 'https://github.com/wannesm/dtaidistance'
},
packages=['dtaidistance', 'dtaidistance.clustering'],
python_requires='>=3.5',
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require={
'vis': ['matplotlib'],
'numpy': ['numpy', 'scipy'],
'all': ['matplotlib', 'numpy', 'scipy']
},
include_package_data=True,
package_data={
'dtaidistance': ['*.pyx', '*.pxd', '*.c', '*.h'],
},
distclass=MyDistribution,
cmdclass={
'test': PyTest,
'buildinplace': MyBuildExtInPlaceCommand,
'build_ext': MyBuildExtCommand,
'install': MyInstallCommand
},
license='Apache 2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3'
],
keywords='dtw',
zip_safe=False
)
try:
setup(ext_modules=ext_modules, **setup_kwargs)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, SystemExit) as exc:
print("ERROR: The C extension could not be compiled")
print(exc)
if 'build_ext' in setup_kwargs['cmdclass']:
del setup_kwargs['cmdclass']['build_ext']
setup(**setup_kwargs)
print("Installed the plain Python version of the package.")
print("If you need the C extension, try reinstalling.")
| 39.940476 | 124 | 0.611326 |
f63d9bdf68321f45affad5b7f59d37803575cd91
| 794 |
py
|
Python
|
class5/ex2b.py
|
nkbyrne/pyplus
|
2fd31eb41c697259f641fd90a371d2cd9ed4a673
|
[
"Apache-2.0"
] | null | null | null |
class5/ex2b.py
|
nkbyrne/pyplus
|
2fd31eb41c697259f641fd90a371d2cd9ed4a673
|
[
"Apache-2.0"
] | null | null | null |
class5/ex2b.py
|
nkbyrne/pyplus
|
2fd31eb41c697259f641fd90a371d2cd9ed4a673
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader([".", "./templates/"])
template_file = "ex2b.j2"
template = env.get_template(template_file)
nxos1 = {
"interface": "Ethernet2/1",
"ip_address": "10.1.100.1",
"netmask": "24",
"local_as": "22",
"peer_ip": "10.1.100.2",
"remote_as": "22",
}
nxos2 = {
"interface": "Ethernet2/1",
"ip_address": "10.1.100.2",
"netmask": "24",
"local_as": "22",
"peer_ip": "10.1.100.1",
"remote_as": "22",
}
devices = [nxos1, nxos2]
for device in devices:
output = template.render(**device)
print(output)
| 22.685714 | 55 | 0.653652 |
6b75449b66afa0bb0cb1820fb349703a2cc771c0
| 10,107 |
py
|
Python
|
tests/bip/bip32/test_bip32_keys.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 149 |
2020-05-15T08:11:43.000Z
|
2022-03-29T16:34:42.000Z
|
tests/bip/bip32/test_bip32_keys.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 41 |
2020-04-03T15:57:56.000Z
|
2022-03-31T08:25:11.000Z
|
tests/bip/bip32/test_bip32_keys.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 55 |
2020-04-03T17:05:15.000Z
|
2022-03-24T12:43:42.000Z
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
from bip_utils import (
Bip32KeyError,
Bip32ChainCode, Bip32Depth, Bip32KeyIndex, Bip32FingerPrint, Bip32KeyData,
Bip32PublicKey, Bip32PrivateKey
)
from bip_utils.bip.bip32.bip32_const import Bip32Const
from tests.ecc.test_ecc import *
# Public keys for testing
TEST_PUB_KEYS = [
{
"key": TEST_ED25519_PUB_KEY,
"fprint": b"6ff1e466",
"key_id": b"6ff1e46644e62d8d44ee2fffb45960d350202c4b",
"ext": "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6M92aGJUuyzo3iKu8Tb6Jq9HnFbbiyiU4QAK6jM2uTxAQH8D2z9",
},
{
"key": TEST_ED25519_BLAKE2B_PUB_KEY,
"fprint": b"23e1ef48",
"key_id": b"23e1ef48982188655152d7e651b754e562eb018e",
"ext": "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6MjSvvgvbUpTCdiDZv1NjVCyfZdj97RkKXsQRkzksL4xhRE3px7",
},
{
"key": TEST_ED25519_MONERO_PUB_KEY,
"fprint": b"41a4a2c0",
"key_id": b"41a4a2c0cfa0c22ee6b00a6033fff32bd7aa9959",
"ext": "Deb7pNXSbX7qSvc2e43XLxrU4Wbif71fzakq2ecQpZSkGDbATEXFMJkjpWRoUgATX3eHcbp5fSCXmS8BQ7Yk4P3L2xCtnnhj5rFET3oeLkqLHL",
},
{
"key": TEST_NIST256P1_PUB_KEY,
"fprint": b"5fa155ff",
"key_id": b"5fa155ff09510ec6ca9dd3f8e51b06e327bf4845",
"ext": "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6T6rsRpT4yF4fz2ss6DqTdiKczA3f5aWFpMq4QND6iPeQJENNmM",
},
{
"key": TEST_SECP256K1_PUB_KEY,
"fprint": b"e168bdf4",
"key_id": b"e168bdf4a501ed739b5a94731bd13d0044efd7c7",
"ext": "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6RZYJkUia7CG7jyT3sA25TNNT1zXNBYE2YmshUj7TZbuFCYsVZf",
},
{
"key": TEST_SR25519_PUB_KEY,
"fprint": b"7bde84a2",
"key_id": b"7bde84a21e328728228f4fc69a24f57d85f7d1a4",
"ext": "Deb7pNXSbX7qSvc2e43XLxrU4Wbif71fzakq2ecQpZSkGDbATEXFMJkjpWRoUpFrrfLeLKYGpWEcqZeSUxdBe1GVs4vezvdnpmQYUfu3JPRUhT",
},
]
# Private keys for testing
TEST_PRIV_KEYS = [
{
"key": TEST_ED25519_PRIV_KEY,
"ext": "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF9oDwUq8hbmYVXq9jRSi64zDnhjwYo5AMM7tJamccfayBLd1QF4",
},
{
"key": TEST_ED25519_BLAKE2B_PRIV_KEY,
"ext": "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF9oDwUq8hbmYVXq9jRSi64zDnhjwYo5AMM7tJamccfayBLd1QF4",
},
{
"key": TEST_ED25519_MONERO_PRIV_KEY,
"ext": "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF9oDwUq8hbmYVXq9jRSi64zDnhjwYo5AMM7tJamccfayBLd1QF4",
},
{
"key": TEST_NIST256P1_PRIV_KEY,
"ext": "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzFAn5ewnjpGej3M4sp2Ko6VjT9cdgsww3GdZwZVYfomqjj5ES3Nq",
},
{
"key": TEST_SECP256K1_PRIV_KEY,
"ext": "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzFAkzWAg4o7snpEUubcfTkvFE7LgFLqVV97bWwCfT6M27dRW7hQW",
},
]
# Invalid public keys for testing
TEST_INVALID_PUB_KEYS = [
{"keys": TEST_VECT_ED25519_PUB_KEY_INVALID, "curve": EllipticCurveTypes.ED25519},
{"keys": TEST_VECT_ED25519_PUB_KEY_INVALID, "curve": EllipticCurveTypes.ED25519_BLAKE2B},
{"keys": TEST_VECT_ED25519_PUB_KEY_INVALID, "curve": EllipticCurveTypes.ED25519_MONERO},
{"keys": TEST_VECT_NIST256P1_PUB_KEY_INVALID, "curve": EllipticCurveTypes.NIST256P1},
{"keys": TEST_VECT_SECP256K1_PUB_KEY_INVALID, "curve": EllipticCurveTypes.SECP256K1},
{"keys": TEST_VECT_SR25519_PUB_KEY_INVALID, "curve": EllipticCurveTypes.SR25519},
]
# Invalid private keys for testing
TEST_INVALID_PRIV_KEYS = [
{"keys": TEST_VECT_ED25519_PRIV_KEY_INVALID, "curve": EllipticCurveTypes.ED25519},
{"keys": TEST_VECT_ED25519_PRIV_KEY_INVALID, "curve": EllipticCurveTypes.ED25519_BLAKE2B},
{"keys": TEST_VECT_ED25519_MONERO_PRIV_KEY_INVALID, "curve": EllipticCurveTypes.ED25519_MONERO},
{"keys": TEST_VECT_NIST256P1_PRIV_KEY_INVALID, "curve": EllipticCurveTypes.NIST256P1},
{"keys": TEST_VECT_SECP256K1_PRIV_KEY_INVALID, "curve": EllipticCurveTypes.SECP256K1},
{"keys": TEST_VECT_SR25519_PRIV_KEY_INVALID, "curve": EllipticCurveTypes.SR25519},
]
# Key data for testing
TEST_KEY_DATA = Bip32KeyData(Bip32Const.MAIN_NET_KEY_NET_VERSIONS,
Bip32Depth(0),
Bip32KeyIndex(0),
Bip32ChainCode(),
Bip32FingerPrint())
#
# Tests
#
class Bip32KeyDataTests(unittest.TestCase):
# Test private key
def test_priv_key(self):
for i, test in enumerate(TEST_PRIV_KEYS):
test_pub = TEST_PUB_KEYS[i]
# FromBytesOrKeyObject (object)
self.__test_priv_key_obj(Bip32PrivateKey.FromBytesOrKeyObject(test["key"], TEST_KEY_DATA, test["key"].CurveType()), test, test_pub)
# FromBytesOrKeyObject (bytes)
self.__test_priv_key_obj(Bip32PrivateKey.FromBytesOrKeyObject(test["key"].Raw().ToBytes(), TEST_KEY_DATA, test["key"].CurveType()), test, test_pub)
# FromBytes (bytes)
self.__test_priv_key_obj(Bip32PrivateKey.FromBytes(test["key"].Raw().ToBytes(), TEST_KEY_DATA, test["key"].CurveType()), test, test_pub)
# Test public key
def test_pub_key(self):
for test in TEST_PUB_KEYS:
# FromBytesOrKeyObject (object)
self.__test_pub_key_obj(Bip32PublicKey.FromBytesOrKeyObject(test["key"], TEST_KEY_DATA, test["key"].CurveType()), test)
# FromBytesOrKeyObject (compressed)
self.__test_pub_key_obj(Bip32PublicKey.FromBytesOrKeyObject(test["key"].RawCompressed().ToBytes(), TEST_KEY_DATA, test["key"].CurveType()), test)
# FromBytesOrKeyObject (uncompressed)
self.__test_pub_key_obj(Bip32PublicKey.FromBytesOrKeyObject(test["key"].RawUncompressed().ToBytes(), TEST_KEY_DATA, test["key"].CurveType()), test)
# FromBytes (compressed)
self.__test_pub_key_obj(Bip32PublicKey.FromBytes(test["key"].RawCompressed().ToBytes(), TEST_KEY_DATA, test["key"].CurveType()), test)
# FromBytes (uncompressed)
self.__test_pub_key_obj(Bip32PublicKey.FromBytes(test["key"].RawUncompressed().ToBytes(), TEST_KEY_DATA, test["key"].CurveType()), test)
# Test invalid keys
def test_invalid_keys(self):
# Invalid private keys
for test in TEST_INVALID_PRIV_KEYS:
for key in test["keys"]:
self.assertRaises(Bip32KeyError, Bip32PrivateKey.FromBytesOrKeyObject, key, TEST_KEY_DATA, test["curve"])
# Invalid public keys
for test in TEST_INVALID_PUB_KEYS:
for key in test["keys"]:
self.assertRaises(Bip32KeyError, Bip32PublicKey.FromBytesOrKeyObject, key, TEST_KEY_DATA, test["curve"])
# Test private key object
def __test_priv_key_obj(self, priv_key_obj, test_priv, test_pub):
self.assertEqual(test_priv["key"].CurveType(), priv_key_obj.CurveType())
self.assertTrue(isinstance(priv_key_obj.KeyObject(), type(test_priv["key"])))
self.assertTrue(priv_key_obj.Data() is TEST_KEY_DATA)
self.assertEqual(test_priv["key"].Raw().ToBytes(), priv_key_obj.Raw().ToBytes())
self.assertEqual(test_priv["key"].Raw().ToBytes(), bytes(priv_key_obj.Raw()))
self.assertEqual(test_priv["key"].Raw().ToHex(), priv_key_obj.Raw().ToHex())
self.assertEqual(test_priv["key"].Raw().ToHex(), str(priv_key_obj.Raw()))
self.assertEqual(test_priv["ext"],priv_key_obj.ToExtended())
# Public key associated to the private one
self.__test_pub_key_obj(priv_key_obj.PublicKey(), test_pub)
# Test public key object
def __test_pub_key_obj(self, pub_key_obj, test):
self.assertEqual(test["key"].CurveType(), pub_key_obj.CurveType())
self.assertTrue(isinstance(pub_key_obj.KeyObject(), type(test["key"])))
self.assertTrue(pub_key_obj.Data() is TEST_KEY_DATA)
self.assertEqual(test["key"].RawCompressed().ToBytes(), pub_key_obj.RawCompressed().ToBytes())
self.assertEqual(test["key"].RawCompressed().ToBytes(), bytes(pub_key_obj.RawCompressed()))
self.assertEqual(test["key"].RawCompressed().ToHex(), pub_key_obj.RawCompressed().ToHex())
self.assertEqual(test["key"].RawCompressed().ToHex(), str(pub_key_obj.RawCompressed()))
self.assertEqual(test["key"].RawUncompressed().ToBytes(), pub_key_obj.RawUncompressed().ToBytes())
self.assertEqual(test["key"].RawUncompressed().ToBytes(), bytes(pub_key_obj.RawUncompressed()))
self.assertEqual(test["key"].RawUncompressed().ToHex(), pub_key_obj.RawUncompressed().ToHex())
self.assertEqual(test["key"].RawUncompressed().ToHex(), str(pub_key_obj.RawUncompressed()))
self.assertEqual(binascii.unhexlify(test["fprint"]),pub_key_obj.FingerPrint().ToBytes())
self.assertEqual(binascii.unhexlify(test["key_id"]),pub_key_obj.KeyIdentifier())
self.assertEqual(test["ext"],pub_key_obj.ToExtended())
| 51.045455 | 159 | 0.722173 |
7ec096710584bedea932b6345cfbd84ff055e1c2
| 680 |
py
|
Python
|
Mundo 2/ex070 Estatistica em produtos.py
|
AbelRapha/Python-Exercicios-CeV
|
17e7055c982c8a1224992602ece50bae8eeee365
|
[
"MIT"
] | null | null | null |
Mundo 2/ex070 Estatistica em produtos.py
|
AbelRapha/Python-Exercicios-CeV
|
17e7055c982c8a1224992602ece50bae8eeee365
|
[
"MIT"
] | null | null | null |
Mundo 2/ex070 Estatistica em produtos.py
|
AbelRapha/Python-Exercicios-CeV
|
17e7055c982c8a1224992602ece50bae8eeee365
|
[
"MIT"
] | null | null | null |
print("-="*15)
print(" MERCADAO ")
print("-="*15)
decisao =" "
dict_produtos = {}
while(decisao not in "N"):
produto = input("Digite o nome do produto: ").strip()
preco = float(input("Digite o preco do produto "))
dict_produtos[produto] = preco
decisao = input("Deseja continuar? ").strip().upper()
continue
print(f"""O valor total da compra foi de R$ {sum(dict_produtos.values())}.\n
E tiveram {len([produto for produto in dict_produtos if dict_produtos[produto]>1000])} produtos(s) acima de R$ 1.000,00. \n
Sendo que o produto mais barato foi {min(dict_produtos, key = dict_produtos.get)} e custou R$ {min(dict_produtos.values())}.""")
| 40 | 128 | 0.660294 |
971fa2b66f525b9d0f33bc530e21428d15fde61a
| 4,784 |
py
|
Python
|
yt/frontends/athena/io.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | 2 |
2021-03-02T18:59:49.000Z
|
2021-03-02T18:59:50.000Z
|
yt/frontends/athena/io.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | 4 |
2018-04-13T23:03:42.000Z
|
2018-05-08T17:50:43.000Z
|
yt/frontends/athena/io.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | 2 |
2020-05-16T15:29:37.000Z
|
2020-06-22T10:17:08.000Z
|
"""
The data-file handling functions
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.utilities.io_handler import \
BaseIOHandler
import numpy as np
from yt.funcs import mylog
from .data_structures import chk23
float_size = {"float":np.dtype(">f4").itemsize,
"double":np.dtype(">f8").itemsize}
axis_list = ["_x","_y","_z"]
class IOHandlerAthena(BaseIOHandler):
_dataset_type = "athena"
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
_read_table_offset = None
def _field_dict(self,fhandle):
keys = fhandle['field_types'].keys()
val = fhandle['field_types'].keys()
return dict(zip(keys,val))
def _read_field_names(self,grid):
pass
def _read_chunk_data(self,chunk,fields):
data = {}
if len(chunk.objs) == 0: return data
for grid in chunk.objs:
if grid.filename is None:
continue
f = open(grid.filename, "rb")
data[grid.id] = {}
grid_dims = grid.ActiveDimensions
read_dims = grid.read_dims.astype("int64")
grid_ncells = np.prod(read_dims)
grid0_ncells = np.prod(grid.index.grids[0].read_dims)
read_table_offset = get_read_table_offset(f)
for field in fields:
ftype, offsetr, dtype = grid.index._field_map[field]
if grid_ncells != grid0_ncells:
offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
if grid_ncells == grid0_ncells:
offset = offsetr
offset = int(offset) # Casting to be certain.
file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size[dtype]
xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])
yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])
f.seek(read_table_offset+offset+file_offset)
if dtype == 'float':
dt = '>f4'
elif dtype == 'double':
dt = '>f8'
if ftype == 'scalar':
f.seek(read_table_offset+offset+file_offset)
v = np.fromfile(f, dtype=dt,
count=grid_ncells).reshape(read_dims,order='F')
if ftype == 'vector':
vec_offset = axis_list.index(field[-1][-2:])
f.seek(read_table_offset+offset+3*file_offset)
v = np.fromfile(f, dtype=dt, count=3*grid_ncells)
v = v[vec_offset::3].reshape(read_dims,order='F')
if grid.ds.field_ordering == 1:
data[grid.id][field] = v[xread,yread,:].T.astype("float64")
else:
data[grid.id][field] = v[xread,yread,:].astype("float64")
f.close()
return data
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
if grid.ds.field_ordering == 1:
sl.reverse()
return self._read_data_set(grid, field)[tuple(sl)]
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks)
if any((ftype != "athena" for ftype, fname in fields)):
raise NotImplementedError
rv = {}
for field in fields:
rv[field] = np.empty(size, dtype="float64")
ng = sum(len(c.objs) for c in chunks)
mylog.debug("Reading %s cells of %s fields in %s grids",
size, [f2 for f1, f2 in fields], ng)
ind = 0
for chunk in chunks:
data = self._read_chunk_data(chunk, fields)
for g in chunk.objs:
for field in fields:
ftype, fname = field
ds = data[g.id].pop(field)
nd = g.select(selector, ds, rv[field], ind) # caches
ind += nd
data.pop(g.id)
return rv
def get_read_table_offset(f):
line = f.readline()
while True:
splitup = line.strip().split()
chkc = chk23('CELL_DATA')
chkp = chk23('POINT_DATA')
if chkc in splitup or chkp in splitup:
f.readline()
read_table_offset = f.tell()
break
line = f.readline()
return read_table_offset
| 37.375 | 93 | 0.534072 |
f51a45fe1a8a67667d2a40a89fde26a30975bde6
| 865 |
py
|
Python
|
2018/day01.py
|
harendra263/AdventOfCode
|
86db33409961681dcb05be9bf9197f7464ee7697
|
[
"MIT"
] | null | null | null |
2018/day01.py
|
harendra263/AdventOfCode
|
86db33409961681dcb05be9bf9197f7464ee7697
|
[
"MIT"
] | null | null | null |
2018/day01.py
|
harendra263/AdventOfCode
|
86db33409961681dcb05be9bf9197f7464ee7697
|
[
"MIT"
] | null | null | null |
from typing import List, Iterator, Set
with open('data/day01a.txt') as f:
numbers = [int(line.strip()) for line in f]
print(sum(numbers))
def all_frequencies(numbers: List[int], start: int = 0) -> Iterator[int]:
frequency = start
while True:
for number in numbers:
yield frequency
frequency += number
def first_repeat_frequency(numbers: List[int], start: int= 0) -> int:
seen = set()
for frequency in all_frequencies(numbers, start):
if frequency in seen:
return frequency
else:
seen.add(frequency)
# assert first_repeat_frequency([1,-1])== 0
# assert first_repeat_frequency([3,3,4,-2,-4]) == 10
# assert first_repeat_frequency([-6, +3, +8, +5, -6])== 5
# assert first_repeat_frequency([+7, +7, -2, -7, -4]) == 14
print(first_repeat_frequency(numbers))
| 20.116279 | 73 | 0.625434 |
74c4ba165f5264b43f651d253da1d1ddbad87714
| 2,270 |
py
|
Python
|
basicdata/FileTest.py
|
xuwujing/python-study
|
a48a81595b28757a7ecd4dcf0416b8d897b46e41
|
[
"Apache-2.0"
] | 1 |
2020-07-29T01:19:43.000Z
|
2020-07-29T01:19:43.000Z
|
basicdata/FileTest.py
|
xuwujing/python-study
|
a48a81595b28757a7ecd4dcf0416b8d897b46e41
|
[
"Apache-2.0"
] | null | null | null |
basicdata/FileTest.py
|
xuwujing/python-study
|
a48a81595b28757a7ecd4dcf0416b8d897b46e41
|
[
"Apache-2.0"
] | null | null | null |
# 文件读写
# open() 将会返回一个 file 对象,基本语法格式如下:
# open(filename, mode)
# filename:包含了你要访问的文件名称的字符串值。
# mode:决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
# 模式
# 描述
# r
# 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
# rb
# 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。
# r+
# 打开一个文件用于读写。文件指针将会放在文件的开头。
# rb+
# 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
# w
# 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
# wb
# 以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
# w+
# 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
# wb+
# 以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
# a
# 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
# ab
# 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
# a+
# 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
# ab+
# 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
# 打开一个文件,并制定格式,然后写入数据
f = open("C:\\test\\test1.txt", "w",encoding="utf-8")
num=f.write( "Python 是一个非常好的语言。\n是的,的确非常好!!\n" )
# 写入的字符数
print(num)
# 关闭打开的文件
f.close()
# 读取文件内容,并进行打印
f2 = open("C:\\test\\test1.txt", "r",encoding="utf-8")
# 读取全部
print(f2.read())
f2.close()
# 输出结果
# Python 是一个非常好的语言。
# 是的,的确非常好!!
# 读取文件内容,并进行打印
f3 = open("C:\\test\\test1.txt", "r",encoding="utf-8")
# 读取一行
print(f3.readline())
f3.close()
# Python 是一个非常好的语言。
# 读取文件内容,并进行打印
f3 = open("C:\\test\\test1.txt", "r",encoding="utf-8")
# 读取所有行
print(f3.readlines())
f3.close()
# ['Python 是一个非常好的语言。\n', '是的,的确非常好!!\n']
#迭代读取文件
# 打开一个文件
f4 = open("C:\\test\\test1.txt", "r",encoding="utf-8")
# 便利读取所有行
for line in f4:
print(line, end='')
f4.close()
# Python 是一个非常好的语言。
# 是的,的确非常好!!
# 吸入一个非字符串的数据
f5 = open("C:\\test\\test1.txt", "w",encoding="utf-8")
value = ('www.panchengming.com', 18)
s = str(value)
f5.write(s)
# 关闭打开的文件
f5.close()
## f.seek()
# 如果要改变文件当前的位置, 可以使用 f.seek(offset, from_what) 函数。
# from_what 的值, 如果是 0 表示开头, 如果是 1 表示当前位置, 2 表示文件的结尾,例如:
# seek(x,0) : 从起始位置即文件首行首字符开始移动 x 个字符
# seek(x,1) : 表示从当前位置往后移动x个字符
# seek(-x,2):表示从文件的结尾往前移动x个字符
f6 = open("C:\\test\\test1.txt", "rb+")
f6 .write(b'0123456789abcdef')
# 移动到文件的第六个字节
print(f6 .seek(5))
print(f6 .read(1))
# 移动到文件的倒数第三字节
print(f6 .seek(-3, 2))
print(f6 .read(1))
# 5
# b'5'
# 25
# b'1'
| 19.912281 | 83 | 0.701762 |
619af894f1d7a0532a43c088c2d6ae2aff7068b5
| 12,603 |
py
|
Python
|
main.py
|
addicx/plantsensor
|
3089fcb603266bd54d629637ef9ce0c10ffc9e74
|
[
"MIT"
] | 74 |
2021-08-04T20:15:58.000Z
|
2022-01-01T11:10:30.000Z
|
main.py
|
addicx/plantsensor
|
3089fcb603266bd54d629637ef9ce0c10ffc9e74
|
[
"MIT"
] | 1 |
2021-12-29T17:34:42.000Z
|
2021-12-29T17:34:42.000Z
|
main.py
|
addicx/plantsensor
|
3089fcb603266bd54d629637ef9ce0c10ffc9e74
|
[
"MIT"
] | 4 |
2021-08-06T18:35:42.000Z
|
2021-08-13T14:36:45.000Z
|
from machine import Pin,SPI,ADC #3v3 to humidity1, GP2 to humidity2 GND to humidity 4, (Link DHT22 pins 1&2 with a 4.7K - 10K resistor)
import framebuf, onewire, ds18x20, rp2, time, random
from src.CSMS import CSMS
DC = 8
RST = 12
MOSI = 11
SCK = 10
CS = 9
class OLED_1inch3(framebuf.FrameBuffer):
def __init__(self):
self.width = 128
self.height = 64
self.cs = Pin(CS,Pin.OUT)
self.rst = Pin(RST,Pin.OUT)
self.cs(1)
self.spi = SPI(1)
self.spi = SPI(1,2000_000)
self.spi = SPI(1,20000_000,polarity=0, phase=0,sck=Pin(SCK),mosi=Pin(MOSI),miso=None)
self.dc = Pin(DC,Pin.OUT)
self.dc(1)
self.buffer = bytearray(self.height * self.width // 8)
super().__init__(self.buffer, self.width, self.height, framebuf.MONO_HMSB)
self.init_display()
self.white = 0xffff
self.black = 0x0000
def write_cmd(self, cmd):
self.cs(1)
self.dc(0)
self.cs(0)
self.spi.write(bytearray([cmd]))
self.cs(1)
def write_data(self, buf):
self.cs(1)
self.dc(1)
self.cs(0)
self.spi.write(bytearray([buf]))
self.cs(1)
def init_display(self):
"""Initialize dispaly"""
self.rst(1)
time.sleep(0.001)
self.rst(0)
time.sleep(0.01)
self.rst(1)
self.write_cmd(0xAE)#turn off OLED display
self.write_cmd(0x00) #set lower column address
self.write_cmd(0x10) #set higher column address
self.write_cmd(0xB0) #set page address
self.write_cmd(0xdc) #et display start line
self.write_cmd(0x00)
self.write_cmd(0x81) #contract control
self.write_cmd(0x6f) #128
self.write_cmd(0x21) # Set Memory addressing mode (0x20/0x21) #
self.write_cmd(0xa0) #set segment remap
self.write_cmd(0xc0) #Com scan direction
self.write_cmd(0xa4) #Disable Entire Display On (0xA4/0xA5)
self.write_cmd(0xa6) #normal / reverse
self.write_cmd(0xa8) #multiplex ratio
self.write_cmd(0x3f) #duty = 1/64
self.write_cmd(0xd3) #set display offset
self.write_cmd(0x60)
self.write_cmd(0xd5) #set osc division
self.write_cmd(0x41)
self.write_cmd(0xd9) #set pre-charge period
self.write_cmd(0x22)
self.write_cmd(0xdb) #set vcomh
self.write_cmd(0x35)
self.write_cmd(0xad) #set charge pump enable
self.write_cmd(0x8a) #Set DC-DC enable (a=0:disable; a=1:enable)
self.write_cmd(0XAF)
def show(self):
self.write_cmd(0xb0)
for page in range(0,64):
self.column = 63 - page
self.write_cmd(0x00 + (self.column & 0x0f))
self.write_cmd(0x10 + (self.column >> 4))
for num in range(0,16):
self.write_data(self.buffer[page*16+num])
# Sensors
#Onboard Temp
sensor_temp = machine.ADC(4)
conversion_factor = 3.3 / (65535)
reading = sensor_temp.read_u16() * conversion_factor
temperature = 27 - (reading - 0.706)/0.001721
#External Temp
ds_pin = machine.Pin(16) #GP16 to central pin, 3v3_O to rightmost pin facing the flat side, GND to leftmost pin
ds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin))
roms = ds_sensor.scan()
#Soil Moisture (CSMS)
adc0 = machine.ADC(machine.Pin(26)) #CA changed pin to 26 (ADC pins on a Pico are 26, 27 & 28)
adc1 = machine.ADC(machine.Pin(27)) #CA changed pin to 26 (ADC pins on a Pico are 26, 27 & 28)
adc2 = machine.ADC(machine.Pin(28)) #CA changed pin to 26 (ADC pins on a Pico are 26, 27 & 28)
csms0 = CSMS(adc0, min_value=60000, max_value=24854)
csms1 = CSMS(adc1, min_value=60000, max_value=24854)
csms2 = CSMS(adc2, min_value=60000, max_value=24854)
#Humidity
dht = ""
@rp2.asm_pio(set_init=(rp2.PIO.OUT_LOW,rp2.PIO.OUT_LOW),#Pico pin GP2 to DHT22 pin 2 facing the grid, Pico GND to DHT22 pin 4, Pico 3v3_O to DHT22 pin 1 (Link DHT22 pins 1&2 with a 4.7K - 10K resistor)
autopush=True, in_shiftdir=rp2.PIO.SHIFT_LEFT)
def dht22():
wrap_target()
label("again")
pull(block)
set(pins, 0)
mov(x, osr)
label("loop1")
jmp(x_dec, "loop1")
set(pindirs, 0)
wait(1, pin, 0)
wait(0, pin, 0)
wait(1, pin, 0)
wait(0, pin, 0)
set(y, 31)
label("bits")
wait(1, pin, 0) [25]
in_(pins, 1)
wait(0, pin, 0)
jmp(y_dec, "bits")
set(y, 7)
label("check")
wait(1, pin, 0)[25]
set(pins,2)
set(pins,0)
in_(pins, 1)
wait(0, pin, 0)
jmp(y_dec, "check")
push(block)
wrap()
class DHT22():
def __init__(self, gpio):
self.sm = rp2.StateMachine(0, dht22, freq=490196,
in_base=Pin(gpio), set_base=Pin(gpio),
jmp_pin=Pin(gpio))
self.sm.active(1)
def getReading(self):
self.sm.put(500)
data=0
data = self.sm.get()
byte1 = (data >> 24 & 0xFF)
byte2 = (data >> 16 & 0xFF)
byte3 = (data >> 8 & 0xFF)
byte4 = (data & 0xFF)
checksum = self.sm.get() & 0xFF
self.checksum = (checksum == (byte1+byte2+byte3+byte4) & 0xFF)
self.humidity = ((byte1 << 8) | byte2) / 10.0
neg = byte3 & 0x80
byte3 = byte3 & 0x7F
self.temperature = (byte3 << 8 | byte4) / 10.0
if neg > 0:
self.temperature = -self.temperature
# Definitions
i = 0
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
while True:
filename = ("data" + str(random.getrandbits(16)) + ".csv") #log given a random name to minimise risk of overwriting data upon power loss
file = open(filename, "w")
file.close()
#It begins
while True:
#empty cache and assess memory strain
csvdata = []
with open(filename,'r') as file:
for line in file:
csvdata.append(line.rstrip('\n').rstrip('\r').split(','))
if len(csvdata) > 300:
break
#set time parameters
t_end = time.time() + 60 #time ~between reads - 3600 in final
#zfill date parameters
if len(str(abs(time.localtime()[4]))) == 1:
zmin = ("0" + str(abs(time.localtime()[4])))
else:
zmin = str(abs(time.localtime()[4]))
if len(str(abs(time.localtime()[3]))) == 1:
zhr = ("0" + str(abs(time.localtime()[3])))
else:
zhr = str(abs(time.localtime()[3]))
if len(str(abs(time.localtime()[2]))) == 1:
zday = ("0" + str(abs(time.localtime()[2])))
else:
zday = str(abs(time.localtime()[2]))
date = (zhr + ":" + zmin + " " + zday + months[abs(time.localtime()[1])] + "'" + str(abs(time.localtime()[0]) % 100))
#iterate lognum
i += 1
if len(str(i)) == 1:
lognum = ("Log0000" + str(i))
elif len(str(i)) == 2:
lognum = ("Log000" + str(i))
elif len(str(i)) == 3:
lognum = ("Log00" + str(i))
elif len(str(i)) == 4:
lognum = ("Log0" + str(i))
else:
lognum = ("Log" + str(i))
# MinMax values
sm1set = [x[5] for x in csvdata]
sm2set = [x[6] for x in csvdata]
sm3set = [x[7] for x in csvdata]
csvdata = []
if sm1set ==[]:
row3 = "High " + "N/A" + " " + "N/A" + " " + "N/A"
row5 = "Low " + "N/A" + " " + "N/A" + " " + "N/A"
else:
row3 = "High " + max(sm1set) + " " + max(sm2set) + " " + max(sm3set)
row5 = "Low " + min(sm1set) + " " + min(sm2set) + " " + min(sm3set)
sm1set = []
sm2set = []
sm3set = []
#update sensors
ds_sensor.convert_temp()
time.sleep_ms(750) #need to wait 750 ms between convert and read
for rom in roms:
stemp = ds_sensor.read_temp(rom)
if len(str(round(stemp))) ==1:
zstemp = "0" + str(round(stemp))
else:
zstemp = str(round(stemp))
dht = DHT22(2)
dht.getReading()
sensor_temp = machine.ADC(4)
conversion_factor = 3.3 / (65535)
reading = sensor_temp.read_u16() * conversion_factor
temperature = 27 - (reading - 0.706)/0.001721
soilz1 = csms0.read(25)
soilz2 = csms1.read(25)
soilz3 = csms2.read(25)
if len(str(soilz1)) == 1:#normalises the length of the values
soil1 = ("0" + str(soilz1) + "%")
elif len(str(soilz1)) == 2:
soil1 = (str(soilz1) + "%")
else:
soil1 = ("Max")
if len(str(soilz2)) == 1:#normalises the length of the values
soil2 = ("0" + str(soilz2) + "%")
elif len(str(soilz2)) == 2:
soil2 = (str(soilz2) + "%")
else:
soil2 = ("Max")
if len(str(soilz3)) == 1:#normalises the length of the values
soil3 = ("0" + str(soilz3) + "%")
elif len(str(soilz1)) == 2:
soil3 = (str(soilz3) + "%")
else:
soil3 = ("Max")
if len(str(round(dht.temperature))) ==1:
ztemp = "0" + str(round(dht.temperature))
else:
ztemp = str(round(dht.temperature))
if len(str(round(dht.humidity))) ==1:
zhum = "0" + str(round(dht.humidity))
else:
zhum = str(round(dht.humidity))
row0 = (date)
row1 = "A " + ztemp + "C " + zhum + "%h S " + zstemp + "C"
row2 = " Soil Moisture "
row4 = "Now " + str(soil1) + " " + str(soil2) + " " + str(soil3)
#write to screen
OLED = OLED_1inch3() #Fills the screen with the defined row strings
OLED.fill(0x0000)
OLED.text(row0,6,1)
OLED.text(row1,1,11)
OLED.text(row2,1,23)
OLED.line(9,31,120,31,OLED.white)
OLED.text(row3,1,34)
OLED.text(row4,1,44)
OLED.text(row5,1,54)
OLED.show()
#write to log
file = open(filename, "a+")
file.write(str(lognum) + "," + str(time.time()) + "," + str(dht.temperature) + "," + str(dht.humidity) + "," + str(stemp) + "," + str(soil1) + "," + str(soil2) + "," + str(soil3) + "\n")
file.close()
#buttons
keyA = Pin(15,Pin.IN,Pin.PULL_UP)
keyB = Pin(17,Pin.IN,Pin.PULL_UP)
import micropython
micropython.mem_info()#memory check DEBUG
keyA = Pin(15,Pin.IN,Pin.PULL_UP)
keyB = Pin(17,Pin.IN,Pin.PULL_UP)
with open(filename,'r') as file:
for line in file:
csvdata.append(line.rstrip('\n').rstrip('\r').split(','))
t_end = time.time() + 60
activeline = len(csvdata)
numlines = len(csvdata)
while time.time() < t_end:
if keyA.value() == 0:
OLED.fill(0x0000)
OLED.text(str(csvdata[activeline-1][0]),1,1)
OLED.line(9,11,120,11,OLED.white)
OLED.text("Air Temp: " + str(csvdata[activeline-1][2]) + "C",1,14)
OLED.text("Humidity: " + str(csvdata[activeline-1][3]) + "%",1,24)
OLED.text("Soil Temp: " + str(csvdata[activeline-1][4]) + "C",1,34)
OLED.text("CSMS:" + str(csvdata[activeline-1][5]) + " " + str(csvdata[activeline-1][6]) + " " + str(csvdata[activeline-1][7]),1,44)
OLED.text(str(time.time() - t_end),1,54)
OLED.show()
activeline += 1
if activeline > numlines:
csvdata = []
break
if keyB.value() == 0:
OLED.fill(0x0000)
OLED.text(str(csvdata[activeline-1][0]),1,1)
OLED.line(9,11,120,11,OLED.white)
OLED.text("Air Temp: " + str(csvdata[activeline-1][2]) + "C",1,14)
OLED.text("Humidity: " + str(csvdata[activeline-1][3]) + "%",1,24)
OLED.text("Soil Temp: " + str(csvdata[activeline-1][4]) + "C",1,34)
OLED.text("CSMS:" + str(csvdata[activeline-1][5]) + " " + str(csvdata[activeline-1][6]) + " " + str(csvdata[activeline-1][7]),1,44)
OLED.text(str(time.time() - t_end),1,54)
OLED.show()
activeline -= 1
if activeline < 0:
activeline += 1
OLED.show()
| 35.905983 | 201 | 0.523288 |
643627c436a8b0e9d41420be4b72c3a010bd3b54
| 488 |
py
|
Python
|
Python/python_study_4/page17/menu_item.py
|
zharmedia386/Progate-Course-Repo
|
0dec6bd2d5594b1624251a74f6ebcf8266c449ba
|
[
"MIT"
] | null | null | null |
Python/python_study_4/page17/menu_item.py
|
zharmedia386/Progate-Course-Repo
|
0dec6bd2d5594b1624251a74f6ebcf8266c449ba
|
[
"MIT"
] | null | null | null |
Python/python_study_4/page17/menu_item.py
|
zharmedia386/Progate-Course-Repo
|
0dec6bd2d5594b1624251a74f6ebcf8266c449ba
|
[
"MIT"
] | null | null | null |
class MenuItem:
def __init__(self, name, price):
self.name = name
self.price = price
def info(self):
return self.name + ': $' + str(self.price)
def get_total_price(self, count):
total_price = self.price * count
# If count is 3 or higher, multiply it by 0.9
if count >= 3 :
total_price *= 0.9
# Round total_price to the nearest whole number and return it
return round(total_price)
| 27.111111 | 69 | 0.567623 |
a6ebeb4dee48c51529f655eecbe38689866b16e1
| 1,586 |
py
|
Python
|
coreference/data_loader.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | null | null | null |
coreference/data_loader.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | null | null | null |
coreference/data_loader.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | null | null | null |
import random
import os
import numpy as np
import torch
import const
import utils
class DataLoader:
def __init__(self, inp_data, word2idx, cuda=True):
self.cuda = cuda
self.inp_data = inp_data
self.word2idx = word2idx
self.train_docs = self.load_files("train")
self.test_docs = self.load_files("development")
self.documents2tensor()
def load_files(self, dtype):
documents = []
data_path = f"{self.inp_data}/data/{dtype}"
for _, _, files in os.walk(data_path):
for inf in files:
if inf not in const.FILTERFILES and inf.endswith("conll"):
documents += utils.load_file(f"{data_path}/{inf}")
return documents
def documents2tensor(self):
for doc in self.train_docs:
doc.tokens2tensor(self.cuda, self.word2idx)
doc.mentions(self.word2idx)
doc.span2tonsor(self.word2idx)
for doc in self.test_docs:
doc.tokens2tensor(self.cuda, self.word2idx)
doc.mentions(self.word2idx)
doc.span2tonsor(self.word2idx)
if __name__ == "__main__":
corpus = torch.load(os.path.join(const.DATAPATH, "corpus.pt"))
dl = DataLoader(const.DATAPATH, corpus["word2idx"], cuda=False)
# doc = dl.sample_data()[0]
# corefs_idxs, mention_idxs, mention_spans, labels, distances, corefs = doc.sample(False, 20)
# print(corefs_idxs, mention_idxs)
for doc in dl.test_docs:
if doc.mention_spans.shape[0] == 0:
print(doc.filename.split("/")[-1])
| 29.37037 | 97 | 0.627364 |
c460d502c1ff96f051be2d605e5ec5fface23ea3
| 401 |
py
|
Python
|
Chapter04/04_04_determine_file_extension_from_contenttype.py
|
susumuasaga/Python-Web-Scraping-Cookbook
|
dcb6241c5ead11070648b9c829b18f4e0d90f464
|
[
"MIT"
] | 1 |
2019-09-29T13:58:54.000Z
|
2019-09-29T13:58:54.000Z
|
Chapter04/04_04_determine_file_extension_from_contenttype.py
|
MindaugasVaitkus2/Python-Web-Scraping-Cookbook
|
dcb6241c5ead11070648b9c829b18f4e0d90f464
|
[
"MIT"
] | null | null | null |
Chapter04/04_04_determine_file_extension_from_contenttype.py
|
MindaugasVaitkus2/Python-Web-Scraping-Cookbook
|
dcb6241c5ead11070648b9c829b18f4e0d90f464
|
[
"MIT"
] | null | null | null |
""" Demonstrate determining extension from content type returned in response """
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import const
from util.urls import URLUtility
util = URLUtility(const.ApodEclipseImage())
print("Filename from content-type: " + util.extension_from_contenttype)
print("Filename from url: " + util.extension_from_url)
| 28.642857 | 80 | 0.790524 |
714ac4e1a87d76c223c640703f0b4bf5513a7f3a
| 8,968 |
py
|
Python
|
dccd/histo_dl/exchange.py
|
irvingprog/Download_Crypto_Currencies_Data
|
b59f3a3eed3df29a1a68915cdf18f65b9023a992
|
[
"MIT"
] | null | null | null |
dccd/histo_dl/exchange.py
|
irvingprog/Download_Crypto_Currencies_Data
|
b59f3a3eed3df29a1a68915cdf18f65b9023a992
|
[
"MIT"
] | null | null | null |
dccd/histo_dl/exchange.py
|
irvingprog/Download_Crypto_Currencies_Data
|
b59f3a3eed3df29a1a68915cdf18f65b9023a992
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: [email protected]
# @Date: 2019-08-30 09:25:01
# @Last modified by: ArthurBernard
# @Last modified time: 2019-09-03 21:56:51
""" Base object to download historical data from REST API.
Notes
-----
The following object is shapped to download data from crypto-currency exchanges
(currently only Binance, GDax, Kraken and Poloniex).
"""
# Import built-in packages
import os
import pathlib
import time
# Import extern packages
import pandas as pd
# Import local packages
from dccd.tools.date_time import date_to_TS, TS_to_date
from dccd.tools.date_time import str_to_span, span_to_str
__all__ = ['ImportDataCryptoCurrencies']
class ImportDataCryptoCurrencies:
""" Base class to import data about crypto-currencies from some exchanges.
Parameters
----------
path : str
The path where data will be save.
crypto : str
The abreviation of the crypto-currencie.
span : {int, 'weekly', 'daily', 'hourly'}
- If str, periodicity of observation.
- If int, number of the seconds between each observation, minimal span\
is 60 seconds.
platform : str
The platform of your choice: 'Kraken', 'Poloniex'.
fiat : str
A fiat currency or a crypto-currency.
form : {'xlsx', 'csv'}
Your favorit format. Only 'xlsx' and 'csv' at the moment.
Notes
-----
Don't use directly this class, use the respective class for each exchange.
See Also
--------
FromBinance, FromKraken, FromGDax, FromPoloniex
Attributes
----------
pair : str
Pair symbol, `crypto + fiat`.
start, end : int
Timestamp to starting and ending download data.
span : int
Number of seconds between observations.
full_path : str
Path to save data.
form : str
Format to save data.
Methods
-------
import_data
save
get_data
"""
def __init__(self, path, crypto, span, platform, fiat='EUR', form='xlsx'):
""" Initialize object. """
self.path = path
self.crypto = crypto
self.span, self.per = self._period(span)
self.fiat = fiat
self.pair = str(crypto + fiat)
self.full_path = self.path + '/' + platform + '/Data/Clean_Data/'
self.full_path += str(self.per) + '/' + self.pair
self.last_df = pd.DataFrame()
self.form = form
def _get_last_date(self):
""" Find the last observation imported.
TODO : to finish
"""
pathlib.Path(self.full_path).mkdir(parents=True, exist_ok=True)
if not os.listdir(self.full_path):
return 1325376000
else:
last_file = sorted(os.listdir(self.full_path), reverse=True)[0]
if last_file.split('.')[-1] == 'xlsx':
self.last_df = pd.read_excel(
self.full_path + '/' + str(last_file)
)
return self.last_df.TS.iloc[-1]
else:
print('Last saved file is in format not allowing.',
'Start at 1st January 2012.')
return 1325376000
def _set_time(self, start, end):
""" Set the end and start in timestamp if is not yet.
Parameters
----------
start : int
Timestamp of the first observation of you want.
end : int
Timestamp of the last observation of you want.
"""
if start is 'last':
start = self._get_last_date()
elif isinstance(start, str):
start = date_to_TS(start)
else:
pass
if end is 'now':
end = time.time()
elif isinstance(end, str):
end = date_to_TS(end)
else:
pass
return int((start // self.span) * self.span), \
int((end // self.span) * self.span)
def _set_by_period(self, TS):
return TS_to_date(TS, form='%' + self.by_period)
def _name_file(self, date):
return self.per + '_of_' + self.crypto + self.fiat + '_in_' + date
def save(self, form='xlsx', by_period='Y'):
""" Save data by period (default is year) in the corresponding format
and file.
TODO : to finish
Parameters
----------
form : {'xlsx', 'csv'}
Format to save data.
by_period : {'Y', 'M', 'D'}
- If 'Y' group data by year.
- If 'M' group data by month.
- If 'D' group data by day.
"""
df = (self.last_df.append(self.df, sort=True)
.drop_duplicates(subset='TS', keep='last')
.reset_index(drop=True)
.drop('Date', axis=1)
.reindex(columns=[
'TS', 'date', 'time', 'close', 'high', 'low', 'open',
'quoteVolume', 'volume', 'weightedAverage'
]))
pathlib.Path(self.full_path).mkdir(parents=True, exist_ok=True)
self.by_period = by_period
grouped = (df.set_index('TS', drop=False)
.groupby(self._set_by_period, axis=0)) # .reset_index()
for name, group in grouped:
if form is 'xlsx':
self._excel_format(name, form, group)
elif form is 'csv':
group.to_csv(
self.full_path + '/' + self._name_file(name) + '.' + form
)
else:
print('Not allowing fomat')
return self
def _excel_format(self, name, form, group):
""" Save as excel format. """
writer = pd.ExcelWriter(
self.full_path + '/' + self._name_file(name) + '.' + form,
engine='xlsxwriter'
)
df_group = group.reset_index(drop=True)
df_group.to_excel(
writer, header=True, index=False, sheet_name='Sheet1'
)
work_book = writer.book
work_sheet = writer.sheets['Sheet1']
fmt = work_book.add_format(
{'align': 'center', 'num_format': '#,##0.00'}
)
fmt_time = work_book.add_format(
{'align': 'center', 'num_format': 'hh:mm:ss'}
)
fmt_date = work_book.add_format(
{'align': 'center', 'num_format': 'yyyy/mm/dd'}
)
fmt_TS = work_book.add_format({'align': 'center'})
work_sheet.set_column('A:A', 11, fmt_TS)
work_sheet.set_column('B:B', 10, fmt_date)
work_sheet.set_column('C:C', 10, fmt_time)
work_sheet.set_column('J:J', 17, fmt)
work_sheet.set_column('D:I', 13, fmt)
writer.save()
return self
def _sort_data(self, data):
""" Clean and sort the data.
TODO : to finish
"""
df = pd.DataFrame(
data,
#index=range((self.end - self.start) // self.span + 1),
# index=range(self.start, self.end, self.span)
).rename(columns={'date': 'TS'})
TS = pd.DataFrame(
list(range(self.start, self.end, self.span)),
columns=['TS']
)
df = (df.merge(TS, on='TS', how='outer', sort=False)
.sort_values('TS')
.reset_index(drop=True)
.fillna(method='pad'))
df = df.assign(Date=pd.to_datetime(df.TS, unit='s'))
df.set_index('Date')
self.df = df.assign(date=df.Date.dt.date, time=df.Date.dt.time)
return self
def import_data(self, start='last', end='now'):
""" Download data for specific time interval.
Parameters
----------
start : int or str
Timestamp of the first observation of you want as int or date
format 'yyyy-mm-dd hh:mm:ss' as string.
end : int or str /! NOT ALLOWED TO KRAKEN EXCHANGE /!
Timestamp of the last observation of you want as int or date
format 'yyyy-mm-dd hh:mm:ss' as string.
Returns
-------
data : pd.DataFrame
Data sorted and cleaned in a data frame.
"""
data = self._import_data(start=start, end=end)
return self._sort_data(data)
def get_data(self):
""" Print the dataframe.
Returns
-------
Data : pd.DataFrame
Current data.
"""
return self.df
def _period(self, span):
if type(span) is str:
return str_to_span(span), span
elif type(span) is int:
return span, span_to_str(span)
else:
print(
"Error, span don't have the appropiate format",
"as string or integer (seconds)"
)
def set_hierarchy(self, liste):
""" Set the specific hierarchy of the files where will save your data.
TODO : to finish
"""
self.full_path = self.path
for elt in liste:
self.full_path += '/' + elt
| 29.695364 | 79 | 0.547614 |
63866555de4a34269880685c3d02b4ff7c5909b0
| 6,308 |
py
|
Python
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/sub_customer_info_v2.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64 |
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/sub_customer_info_v2.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11 |
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/sub_customer_info_v2.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24 |
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SubCustomerInfoV2:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'display_name': 'str',
'status': 'int',
'org_id': 'str',
'org_name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'display_name': 'display_name',
'status': 'status',
'org_id': 'org_id',
'org_name': 'org_name'
}
def __init__(self, id=None, name=None, display_name=None, status=None, org_id=None, org_name=None):
"""SubCustomerInfoV2 - a model defined in huaweicloud sdk"""
self._id = None
self._name = None
self._display_name = None
self._status = None
self._org_id = None
self._org_name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if display_name is not None:
self.display_name = display_name
if status is not None:
self.status = status
if org_id is not None:
self.org_id = org_id
if org_name is not None:
self.org_name = org_name
@property
def id(self):
"""Gets the id of this SubCustomerInfoV2.
企业子账号的客户ID。
:return: The id of this SubCustomerInfoV2.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubCustomerInfoV2.
企业子账号的客户ID。
:param id: The id of this SubCustomerInfoV2.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this SubCustomerInfoV2.
企业子账号的用户名。
:return: The name of this SubCustomerInfoV2.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SubCustomerInfoV2.
企业子账号的用户名。
:param name: The name of this SubCustomerInfoV2.
:type: str
"""
self._name = name
@property
def display_name(self):
"""Gets the display_name of this SubCustomerInfoV2.
企业子账号的显示名称。 不限制特殊字符。
:return: The display_name of this SubCustomerInfoV2.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this SubCustomerInfoV2.
企业子账号的显示名称。 不限制特殊字符。
:param display_name: The display_name of this SubCustomerInfoV2.
:type: str
"""
self._display_name = display_name
@property
def status(self):
"""Gets the status of this SubCustomerInfoV2.
子账号状态: 1:正常2:创建中3:关闭中4:已关闭101:子账号注册中102:子账号待激活
:return: The status of this SubCustomerInfoV2.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this SubCustomerInfoV2.
子账号状态: 1:正常2:创建中3:关闭中4:已关闭101:子账号注册中102:子账号待激活
:param status: The status of this SubCustomerInfoV2.
:type: int
"""
self._status = status
@property
def org_id(self):
"""Gets the org_id of this SubCustomerInfoV2.
子账号归属的组织单元ID。
:return: The org_id of this SubCustomerInfoV2.
:rtype: str
"""
return self._org_id
@org_id.setter
def org_id(self, org_id):
"""Sets the org_id of this SubCustomerInfoV2.
子账号归属的组织单元ID。
:param org_id: The org_id of this SubCustomerInfoV2.
:type: str
"""
self._org_id = org_id
@property
def org_name(self):
"""Gets the org_name of this SubCustomerInfoV2.
子账号归属的组织单元名称。 说明: 当子账号归属的组织是企业组织根节点时,本属性可能为空。
:return: The org_name of this SubCustomerInfoV2.
:rtype: str
"""
return self._org_name
@org_name.setter
def org_name(self, org_name):
"""Sets the org_name of this SubCustomerInfoV2.
子账号归属的组织单元名称。 说明: 当子账号归属的组织是企业组织根节点时,本属性可能为空。
:param org_name: The org_name of this SubCustomerInfoV2.
:type: str
"""
self._org_name = org_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubCustomerInfoV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.232 | 103 | 0.556753 |
8b3928543d55f1c27ef1c4a2ab952fedf2d0d778
| 6,918 |
py
|
Python
|
train.py
|
Jiannan-Liu/nCoVSegNet
|
7543e68edff011a7f7b694c97cf0f185d441fd6b
|
[
"MIT"
] | 5 |
2021-08-31T14:28:52.000Z
|
2022-02-12T04:10:31.000Z
|
train.py
|
Jiannan-Liu/nCoVSegNet
|
7543e68edff011a7f7b694c97cf0f185d441fd6b
|
[
"MIT"
] | null | null | null |
train.py
|
Jiannan-Liu/nCoVSegNet
|
7543e68edff011a7f7b694c97cf0f185d441fd6b
|
[
"MIT"
] | 3 |
2021-08-09T02:53:17.000Z
|
2022-02-20T09:35:28.000Z
|
import torch
from torch.autograd import Variable
import os
import argparse
from datetime import datetime
from utils.dataset import get_loader
from utils.utils import clip_gradient, adjust_lr, AvgMeter
import torch.nn.functional as F
def joint_loss(pred, mask):
weit = 1 + 5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')
wbce = (weit*wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))
pred = torch.sigmoid(pred)
inter = ((pred * mask)*weit).sum(dim=(2, 3))
union = ((pred + mask)*weit).sum(dim=(2, 3))
wdice = 1 - (2 * inter + 1)/(union + 1)
return (wbce + wdice).mean()
def train(train_loader, model, optimizer, epoch, train_save):
model.train()
size_rates = [0.75, 1, 1.25]
loss_record1, loss_record2, loss_record3, loss_record4 = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
for i, pack in enumerate(train_loader, start=1):
for rate in size_rates:
optimizer.zero_grad()
# ---- data prepare ----
images, gts = pack
images = Variable(images).cuda()
gts = Variable(gts).cuda()
# ---- rescaling the inputs (img/gt) ----
trainsize = int(round(opt.trainsize * rate / 32) * 32)
if rate != 1:
images = F.upsample(images, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
gts = F.upsample(gts, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
# ---- forward ----
lateral_map_4, lateral_map_3, lateral_map_2, lateral_map_1 = model(images)
loss4 = joint_loss(lateral_map_4, gts)
loss3 = joint_loss(lateral_map_3, gts)
loss2 = joint_loss(lateral_map_2, gts)
loss1 = joint_loss(lateral_map_1, gts)
loss = loss1 + loss2 + loss3 + loss4
# ---- backward ----
loss.backward()
clip_gradient(optimizer, opt.clip)
optimizer.step()
# ---- recording loss ----
if rate == 1:
loss_record1.update(loss1.data, opt.batchsize)
loss_record2.update(loss2.data, opt.batchsize)
loss_record3.update(loss3.data, opt.batchsize)
loss_record4.update(loss4.data, opt.batchsize)
# ---- train logging ----
if i % 20 == 0 or i == total_step:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], [lateral-1: {:.4f}, '
'lateral-2: {:.4f}, lateral-3: {:0.4f}, lateral-4: {:0.4f}]'.
format(datetime.now(), epoch, opt.epoch, i, total_step, loss_record1.show(),
loss_record2.show(), loss_record3.show(), loss_record4.show()))
# ---- save model_lung_infection ----
save_path = './weight/'.format(train_save)
os.makedirs(save_path, exist_ok=True)
if (epoch+1) % 10 == 0:
torch.save(model.state_dict(), save_path + 'nCoVSegNet-%d.pth' % (epoch+1))
print('[Saving Snapshot:]', save_path + 'nCoVSegNet-%d.pth' % (epoch+1))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# hyper-parameters
parser.add_argument('--epoch', type=int, default=100,
help='epoch number')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--batchsize', type=int, default=4,
help='training batch size')
parser.add_argument('--trainsize', type=int, default=352,
help='set the size of training sample')
parser.add_argument('--clip', type=float, default=0.5,
help='gradient clipping margin')
parser.add_argument('--decay_rate', type=float, default=0.1,
help='decay rate of learning rate')
parser.add_argument('--decay_epoch', type=int, default=50,
help='every n epochs decay learning rate')
parser.add_argument('--is_thop', type=bool, default=True,
help='whether calculate FLOPs/Params (Thop)')
parser.add_argument('--gpu_device', type=int, default=0,
help='choose which GPU device you want to use')
parser.add_argument('--num_workers', type=int, default=4,
help='number of workers in dataloader. In windows, set num_workers=0')
parser.add_argument('--n_classes', type=int, default=1,
help='binary segmentation when n_classes=1')
parser.add_argument('--backbone', type=str, default='ResNet50',
help='change different backbone, choice: VGGNet16, ResNet50, Res2Net50')
parser.add_argument('--train_path', type=str,
default='/home/ljn/code/nCoVSegNet/data/train')
parser.add_argument('--train_save', type=str, default=None,
help='Use custom save path')
opt = parser.parse_args()
# ---- build models ----
torch.cuda.set_device(opt.gpu_device)
if opt.backbone == 'Res2Net50':
print('Backbone loading: Res2Net50')
# from module.Res2Net import Res2Net
elif opt.backbone == 'ResNet50':
print('Backbone loading: ResNet50')
from module.nCoVSegNet import nCoVSegNet
elif opt.backbone == 'VGGNet16':
print('Backbone loading: VGGNet16')
# from module.VGGNet import VGGNet
else:
raise ValueError('Invalid backbone parameters: {}'.format(opt.backbone))
model = nCoVSegNet(n_class=opt.n_classes).cuda()
## transfer learning
# model_path = '/home/ljn/code/nCoVSegNet/code/lidc_weight/nCoVSegNet-100.pth'
# model = nCoVSegNet(n_class=opt.n_classes)
# state_dict = torch.load(model_path, map_location='cpu')
# model.load_state_dict(state_dict, strict=False)
# model.cuda()
print('Use custom save path')
train_save = opt.train_save
# ---- calculate FLOPs and Params ----
if opt.is_thop:
from utils.utils import CalParams
x = torch.randn(1, 3, opt.trainsize, opt.trainsize).cuda()
CalParams(model, x)
# ---- load training sub-modules ----
params = model.parameters()
optimizer = torch.optim.Adam(params, opt.lr)
image_root = '{}/image/'.format(opt.train_path)
gt_root = '{}/mask/'.format(opt.train_path)
train_loader = get_loader(image_root, gt_root,
batchsize=opt.batchsize, trainsize=opt.trainsize, num_workers=opt.num_workers)
total_step = len(train_loader)
# ---- start !! -----
print("#"*20, "\nStart Training (nCoVSegNet-{})\n{}\n----\n".format(opt.backbone, opt), "#"*20)
for epoch in range(1, opt.epoch):
adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
train(train_loader, model, optimizer, epoch, train_save)
| 43.2375 | 109 | 0.603787 |
37ed19ad54af99b7080486c478a9cb6d9a5768c5
| 9,937 |
py
|
Python
|
graph.py
|
kunrii/NodeNet
|
192579807cb13fc363ea28f787da7d431b39646d
|
[
"MIT"
] | null | null | null |
graph.py
|
kunrii/NodeNet
|
192579807cb13fc363ea28f787da7d431b39646d
|
[
"MIT"
] | null | null | null |
graph.py
|
kunrii/NodeNet
|
192579807cb13fc363ea28f787da7d431b39646d
|
[
"MIT"
] | null | null | null |
import numpy as _lib_
import node as _node_
class Graph:
def __init__(self):
self.nodes = set()
self.inputNodes = set()
self.outputNodes = set()
self.links = set()
self.learning_rate = 1e-3
################################################################################################
##### TRAINING FUNCTIONALITY FOR EPOCH AND ITERATION
################################################################################################
def train(self, dataset, epochs = 20, batch_size = 64, dataset_size_restriction = None):
print("\n####################################### TRAINING #######################################\n")
if (dataset_size_restriction is not None):
assert dataset_size_restriction >= batch_size
for n in self.outputNodes:
if (n.loss == "CROSS_ENTROPY"):
n.accuracy = list()
for l in self.links:
l.adaptive_moment_estimation_current_count = 1
for i in range(epochs):
self.epoch(dataset, i + 1, batch_size, dataset_size_restriction)
def epoch(self, dataset, epoch_count, batch_size, dataset_size_restriction):
print("--------------------------------------- epoch {} ---------------------------------------".format(epoch_count))
Graph.shuffle(dataset)
if (dataset_size_restriction is None):
dataset_length = dataset["length"]
else:
dataset_length = dataset_size_restriction
for n in self.outputNodes:
if (n.loss == "CROSS_ENTROPY"):
n.hit_count = 0
i = 0
while (i < dataset_length):
if (i + batch_size < dataset_length):
self.trainingIteration(dataset, (i, (i + batch_size)), batch_size)
i += batch_size
else:
self.trainingIteration(dataset, (i, dataset_length), dataset_length - i)
break
for n in self.outputNodes:
if (n.loss == "CROSS_ENTROPY"):
n.accuracy.append(n.hit_count / dataset_length * 100)
print(n.id + " accuracy {}%".format(str(n.accuracy[-1])))
def trainingIteration(self, dataset, range, batch_size):
self.forwardPropagation(dataset, range, batch_size)
self.backwardPropagation(batch_size)
################################################################################################
##### TESTING FUNCTIONALITY
################################################################################################
def test(self, dataset, batch_size = 64):
print("\n####################################### TESTING #######################################\n")
dataset_length = dataset["length"]
print("dataset len " + str(dataset_length))
#should only be in classification nodes
for n in self.outputNodes:
if (n.loss == "CROSS_ENTROPY"):
n.accuracy = list()
n.hit_count = 0
i = 0
while (i < dataset_length):
if (i + batch_size < dataset_length):
self.forwardPropagation(dataset, (i, (i + batch_size)), batch_size)
i += batch_size
else:
self.forwardPropagation(dataset, (i, dataset_length), dataset_length - i)
break
for n in self.outputNodes:
if (n.loss == "CROSS_ENTROPY"):
n.accuracy.append(n.hit_count / dataset_length * 100)
print(n.id + " accuracy {}%".format(n.accuracy[-1]))
################################################################################################
##### FORWARD PROPAGATION FUNCTIONALITY
################################################################################################
def forwardPropagation(self, dataset, range, batch_size):
#set the neurons to this batch size
for n in self.nodes:
n.setNeurons(batch_size)
n.process_status = "IDLE"
#load up inputs into the nodes
for n in self.inputNodes:
self.loadInputs(n, dataset, range)
#load up outputs into the nodes
for n in self.outputNodes:
self.loadOutputs(n, dataset, range)
#forward process the nodes
for n in self.nodes:
self.forwardProcess(n)
def forwardProcess(self, node):
if (node.process_status == "PROCESSING"):
raise Exception("Neural network graphs cannot have cycles, but one was detected involving node " + node.id + " with obj id " + str(node))
elif (node.process_status == "DONE"):
return
node.process_status = "PROCESSING"
inboundLinks = self.getInboundLinks(node)
for l in inboundLinks:
if (l.prev_node.process_status == "IDLE"):
self.forwardProcess(l.prev_node)
l.passToNext()
node.setActivatedValue()
if (node in self.outputNodes):
node.setError()
node.process_status = "DONE"
def loadInputs(self, n, dataset, range):
neuronInputs = dataset["inputs"][n.id]
n.neurons_net_in[:,:] = neuronInputs[range[0]:range[1],:]
def loadOutputs(self, n, dataset, range):
neuronOutputs = dataset["outputs"][n.id]
n.observations[:,:] = neuronOutputs[range[0]:range[1],:]
################################################################################################
##### BACKWARD PROPAGATION FUNCTIONALITY
################################################################################################
def backwardPropagation(self, batch_size, standard_batch_size = None):
for n in self.nodes:
n.process_status = "IDLE"
#backward process the nodes, propagating the deltas
for n in self.nodes:
self.stack_count = -1
self.backwardProcess(n)
#corrections
for l in self.links:
l.correction(self.learning_rate, batch_size)
def backwardProcess(self, node):
if (node.process_status == "PROCESSING"):
raise Exception("Neural network graphs cannot have cycles, but one was detected involving node " + node.id + " with obj id " + str(node))
elif (node.process_status == "DONE"):
return
node.process_status = "PROCESSING"
outboundLinks = self.getOutboundLinks(node)
for l in outboundLinks:
if (l.next_node.process_status == "IDLE"):
self.backwardProcess(l.next_node)
l.passToPrev()
node.setDelta(outboundLinks)
node.process_status = "DONE"
################################################################################################
##### MISC FUNCTIONALITY
################################################################################################
#based on https://stackoverflow.com/questions/35646908/numpy-shuffle-multidimensional-array-by-row-only-keep-column-order-unchanged
def shuffle(data):
permutations = _lib_.arange(data["length"])
_lib_.random.shuffle(permutations)
for data_dir_key, data_dir_val in data.items():
if (data_dir_key == "inputs" or data_dir_key == "outputs"):
for node_key, node_val in data_dir_val.items():
node_val[:,:] = node_val[permutations]
################################################################################################
##### NODE MANAGEMENT FUNCTIONALITY
################################################################################################
def addNode(self, n):
if (n not in self.nodes):
self.nodes.add(n)
def removeNode(self, n):
if (n in self.nodes):
if (n in self.inputNodes):
self.inputNodes.remove(n)
elif (n in self.outputNodes):
self.outputNodes.remove(n)
self.nodes.remove(n)
def setInputNode(self, n, val):
self.setGeneric(n, val, self.inputNodes)
def setOutputNode(self, n, val):
self.setGeneric(n, val, self.outputNodes)
def setGeneric(self, n, val, nodeSet):
if (val):
if (n in self.nodes and n not in nodeSet):
nodeSet.add(n)
elif (n not in self.nodes and n not in nodeSet):
self.nodes.add(n)
nodeSet.add(n)
elif (not val):
if (n in nodeSet):
nodeSet.remove(n)
def getOutboundLinks(self, node):
oL = set()
for l in self.links:
if (l.prev_node == node):
oL.add(l)
return oL
def getInboundLinks(self, node):
iL = set()
for l in self.links:
if (l.next_node == node):
iL.add(l)
return iL
def addLink(self, l):
if (l not in self.links):
self.links.add(l)
def removeLink(self, l):
if (l in self.links):
self.links.remove(l)
| 28.070621 | 150 | 0.447016 |
e2f672ac6d30c611fad6639605013230c182f377
| 4,102 |
py
|
Python
|
seata/rm/datasource/ColumnUtils.py
|
opentrx/seata-python
|
66fb3382217a43effa3d1bc5ec2b62204d499dba
|
[
"Apache-2.0"
] | 8 |
2021-09-09T06:28:08.000Z
|
2022-03-06T04:58:40.000Z
|
seata/rm/datasource/ColumnUtils.py
|
opentrx/seata-python
|
66fb3382217a43effa3d1bc5ec2b62204d499dba
|
[
"Apache-2.0"
] | null | null | null |
seata/rm/datasource/ColumnUtils.py
|
opentrx/seata-python
|
66fb3382217a43effa3d1bc5ec2b62204d499dba
|
[
"Apache-2.0"
] | 4 |
2021-08-23T07:44:27.000Z
|
2022-02-11T08:42:54.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
from enum import Enum
class Escape(Enum):
STANDARD = '"'
MYSQL = '`'
class ColumnUtils(object):
DOT = "."
@classmethod
def del_escape_by_cols_dbtype(cls, cols, db_type):
new_cols = cls.del_escape_by_cols_escape(cols, Escape.STANDARD)
if cls.is_mysql_series(db_type):
new_cols = cls.del_escape_by_cols_escape(cols, Escape.STANDARD)
return new_cols
@classmethod
def del_escape_by_cols_escape(cls, cols, escape):
if cols is None or len(cols) == 0:
return cols
new_cols = []
for col in cols:
new_cols.append(cls.del_escape_by_col_escape(col, escape))
return new_cols
@classmethod
def del_escape_by_col_dbtype(cls, col_name, db_type):
new_col_name = cls.del_escape_by_col_escape(col_name, Escape.STANDARD)
if cls.is_mysql_series(db_type):
new_col_name = cls.del_escape_by_col_escape(new_col_name, Escape.MYSQL)
return new_col_name
@classmethod
def del_escape_by_col_escape(cls, col_name, escape):
if col_name is None or len(col_name.strip()) == 0:
return col_name
if col_name[0] == escape.value and col_name[len(col_name) - 1] == escape.value:
# like "scheme"."id" `scheme`.`id`
s = escape.value + cls.DOT + escape.value
index = col_name.find(s)
if index > -1:
return col_name[1: index] + cls.DOT + col_name[index + len(s): len(col_name) - 1]
return col_name[1:len(col_name) - 1]
else:
# like "scheme".id `scheme`.id
s = escape.value + cls.DOT
index = col_name.find(s)
if index > -1 and col_name[0] == escape.value:
return col_name[1: index] + cls.DOT + col_name[index + len(s):]
# like scheme."id" scheme.`id`
s = cls.DOT + escape.value
index = col_name.find(s)
if index > -1 and col_name[len(col_name) - 1] == escape.value:
return col_name[0: index] + cls.DOT + col_name[index + len(s):]
return col_name
@classmethod
def add_escape_by_cols_dbtype(cls, cols, db_type):
if cols is None or len(cols) == 0:
return cols
new_cols = []
for col in cols:
new_cols.append(cls.add_by_col_dbtype(col, db_type))
return new_cols
@classmethod
def add_by_col_dbtype(cls, col_name, db_type):
if cls.is_mysql_series(db_type):
return cls.add_by_col_dbtype_escape(col_name, db_type, Escape.MYSQL)
return cls.add_by_col_dbtype_escape(col_name, db_type, Escape.STANDARD)
@classmethod
def add_by_col_dbtype_escape(cls, col_name, db_type, escape):
if col_name is None or col_name == "":
return col_name
if col_name[0] == escape.value and col_name[len(col_name) - 1] == escape.value:
return col_name
# TODO check keyword by dbtype
if col_name.find(cls.DOT) > -1:
# like "scheme".id `scheme`.id
s = escape.value + cls.DOT
dot_index = col_name.find(s)
if dot_index > -1:
return col_name[0:dot_index + len(s)] + escape.value + col_name[dot_index + len(s):] + escape.value
# like scheme."id" scheme.`id`
s = cls.DOT + escape.value
dot_index = col_name.find(s)
if dot_index > -1:
return escape.value + col_name[0: dot_index] + escape.value + col_name[dot_index]
s = cls.DOT
dot_index = col_name.find(s)
if dot_index > -1:
return escape.value + col_name[0: dot_index] + escape.value + cls.DOT + \
escape.value + col_name[dot_index + len(s)] + escape.value
return escape.value + col_name + escape.value
@classmethod
def is_mysql_series(cls, db_type):
up = db_type.lower()
return up == 'mysql' or up == 'h2' or up == 'mariadb'
| 37.290909 | 115 | 0.591663 |
77411663a515a9268438c85921e22a661c684d07
| 5,050 |
py
|
Python
|
src/commands/errors.py
|
L0ad1n6/Banana-Bot
|
d76dff9d782b1fe614b539bf13d176daecc24c8c
|
[
"MIT"
] | 2 |
2021-12-26T05:24:37.000Z
|
2022-02-15T15:28:39.000Z
|
src/commands/errors.py
|
L0ad1n6/Banana-Bot
|
d76dff9d782b1fe614b539bf13d176daecc24c8c
|
[
"MIT"
] | null | null | null |
src/commands/errors.py
|
L0ad1n6/Banana-Bot
|
d76dff9d782b1fe614b539bf13d176daecc24c8c
|
[
"MIT"
] | 3 |
2022-01-15T19:05:49.000Z
|
2022-01-22T03:27:14.000Z
|
from discord.ext import commands
from discord.ext.commands.errors import *
from .music import HZ_BANDS
from wavelink.errors import ZeroConnectedNodes
import os
import sys
class InsufficientCredit(commands.CommandError):
pass
class InsufficientRole(commands.CommandError):
pass
class UserNotConnected(commands.CommandError):
pass
class NotConnected(commands.CommandError):
pass
class AlreadyConnected(commands.CommandError):
pass
class NotPlaying(commands.CommandError):
pass
class AlreadyPlaying(commands.CommandError):
pass
class EmptyQueue(commands.CommandError):
pass
class NoMoreSongs(commands.CommandError):
pass
class NoHistory(commands.CommandError):
pass
class InvalidTime(commands.CommandError):
pass
class VolumeTooLow(commands.CommandError):
pass
class VolumeTooHigh(commands.CommandError):
pass
class NoLyrics(commands.CommandError):
pass
class InvalidEQPreset(commands.CommandError):
pass
class NonExistentEQBand(commands.CommandError):
pass
class EQGainOutOfBounds(commands.CommandError):
pass
class Error(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error):
if isinstance(error, CommandOnCooldown):
msg = f"<:yellowx:938093739283451964> Command is on **cooldown**, try again in {round(error.retry_after, 1)} seconds"
elif isinstance(error, MissingPermissions):
msg = "<:yellowx:938093739283451964> Cannot run command, missing permissions"
elif isinstance(error, MissingRequiredArgument):
msg = f"<:yellowx:938093739283451964> Missing required argument: **{error.param}**"
elif isinstance(error, ConversionError):
msg = "<:yellowx:938093739283451964> Invalid argument type, could not convert"
elif isinstance(error, CommandNotFound):
msg = "<:yellowx:938093739283451964> The command you are trying to use does not exist"
elif isinstance(error, InsufficientCredit):
msg = "<:yellowx:938093739283451964> You do not have enough Social Credit to perform this action"
elif isinstance(error, InsufficientRole):
msg = "<:yellowx:938093739283451964> Your top role is not high enough to perform this action"
elif isinstance(error, UserNotConnected):
msg = "<:yellowx:938093739283451964> You are not connected to a voice channel"
elif isinstance(error, NotConnected):
msg = "<:yellowx:938093739283451964> I'm not connected to a voice channel"
elif isinstance(error, AlreadyConnected):
msg = "<:yellowx:938093739283451964> I'm already connected to a voice channel"
elif isinstance(error, NotPlaying):
msg = "<:yellowx:938093739283451964> Music is not playing"
elif isinstance(error, AlreadyPlaying):
msg = "<:yellowx:938093739283451964> Music is already playing"
elif isinstance(error, EmptyQueue):
msg = "<:yellowx:938093739283451964> Queue is empty"
elif isinstance(error, NoMoreSongs):
msg = "<:yellowx:938093739283451964> No more songs left in queue"
elif isinstance(error, NoHistory):
msg = "<:yellowx:938093739283451964> No songs have been played yet"
elif isinstance(error, InvalidTime):
msg = "<:yellowx:938093739283451964> Time formatting is invalid"
elif isinstance(error, VolumeTooHigh):
msg = "<:yellowx:938093739283451964> You will kill your ears if I set the volume any higher than 150% (capable of 1500%)"
elif isinstance(error, VolumeTooLow):
msg = "<:yellowx:938093739283451964> Volume must be greater than 0"
elif isinstance(error, NoLyrics):
msg = "<:yellowx:938093739283451964> No lyrics found for this song"
elif isinstance(error, InvalidEQPreset):
msg = "<:yellowx:938093739283451964> EQ preset must be one of the following: flat, boost, metal, piano"
elif isinstance(error, NonExistentEQBand):
msg = f"<:yellowx:938093739283451964> This is a 15 band equalizer, the band number should be between 1 and 15 or one of of the frequencies: {', '.join(str(band) for band in HZ_BANDS)}"
elif isinstance(error, EQGainOutOfBounds):
msg = "<:yellowx:938093739283451964> EQ Gain for any band should be between -10 db and 10 db. Though possible anything more will kill your ears"
elif isinstance(error, ZeroConnectedNodes):
msg = "<:yellowx:938093739283451964> Lavalink server is offline, bot will reboot to fix this."
print("Restarting...")
os.execl(sys.executable, os.path.abspath("src/main.py"))
else:
msg = "<:yellowx:938093739283451964> Something went wrong, try again or use --report"
await ctx.reply(msg, mention_author=False)
raise error
await ctx.reply(msg, mention_author=False)
| 39.76378 | 196 | 0.693663 |
c0aef349ff8358f5a205aeae6e888287623c646c
| 1,919 |
py
|
Python
|
training/checkpointing.py
|
chapter09/open_lth
|
53403fdb3fb82b833e336cf36b0292bfed61820a
|
[
"MIT"
] | null | null | null |
training/checkpointing.py
|
chapter09/open_lth
|
53403fdb3fb82b833e336cf36b0292bfed61820a
|
[
"MIT"
] | null | null | null |
training/checkpointing.py
|
chapter09/open_lth
|
53403fdb3fb82b833e336cf36b0292bfed61820a
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ..foundations import paths
from ..foundations.step import Step
from ..platforms.platform import get_platform
from ..training.metric_logger import MetricLogger
def save_checkpoint_callback(output_location, step, model, optimizer, logger):
if get_platform().is_primary_process:
get_platform().save_model({
'ep': step.ep,
'it': step.it,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'logger': str(logger),
}, paths.checkpoint(output_location))
get_platform().barrier()
def restore_checkpoint(output_location, model, optimizer, iterations_per_epoch):
checkpoint_location = paths.checkpoint(output_location)
if not get_platform().exists(checkpoint_location):
return None, None
checkpoint = get_platform().load_model(checkpoint_location, map_location=torch.device('cpu'))
# Handle DataParallel.
module_in_name = get_platform().is_parallel
if module_in_name and not all(k.startswith('module.') for k in checkpoint['model_state_dict']):
checkpoint['model_state_dict'] = {'module.' + k: v for k, v in checkpoint['model_state_dict'].items()}
elif all(k.startswith('module.') for k in checkpoint['model_state_dict']) and not module_in_name:
checkpoint['model_state_dict'] = {k[len('module.'):]: v for k, v in checkpoint['model_state_dict'].items()}
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
step = Step.from_epoch(checkpoint['ep'], checkpoint['it'], iterations_per_epoch)
logger = MetricLogger.create_from_string(checkpoint['logger'])
return step, logger
| 42.644444 | 115 | 0.722251 |
52e1eb99295fa488792259b54c47ab5d2ffacdca
| 3,742 |
py
|
Python
|
clevrer_dev/feature_extraction/generate_features.py
|
gabrielsluz/SlowFast
|
bd06eac47fa236b070fd9a3b39518eea08d02947
|
[
"Apache-2.0"
] | null | null | null |
clevrer_dev/feature_extraction/generate_features.py
|
gabrielsluz/SlowFast
|
bd06eac47fa236b070fd9a3b39518eea08d02947
|
[
"Apache-2.0"
] | null | null | null |
clevrer_dev/feature_extraction/generate_features.py
|
gabrielsluz/SlowFast
|
bd06eac47fa236b070fd9a3b39518eea08d02947
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import torch
from torch.utils.data import DataLoader
import sys
import os
from tqdm import tqdm
import h5py
from slowfast.datasets.clevrer_video import Clevrer_video
from slowfast.models.video_model_builder import SlowFast
import slowfast.utils.checkpoint as cu
from slowfast.utils.parser import load_config, parse_args
import slowfast.utils.logging as logging
"""
Generates SlowFast features
Example:
python3 clevrer_dev/feature_extraction/generate_features.py \
--cfg clevrer_dev/feature_extraction/slowfast.yaml \
DATA.PATH_TO_DATA_DIR /datasets/clevrer \
DATA.PATH_PREFIX /datasets/clevrer \
TRAIN.CHECKPOINT_FILE_PATH model_zoo_ck/SLOWFAST_4x16_R50.pkl \
TRAIN.CHECKPOINT_TYPE caffe2
"""
#SlowFast feature extraction from almost last layer
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
#x = self.s5(x)
return x
def gen_dataset(cfg, mode, root):
#Generates two datasets for a certain split. => Slow and fast features
#When using the generated file must indicate in which index the dataset starts to work
#torch.Size([50, 1280, 4, 14, 14]) torch.Size([50, 128, 32, 14, 14])
#Train starts in 0
#Val starts in 10000
#Test starts in 15000
dataset = Clevrer_video(cfg, mode)
print("Dataset {} len = {}".format(mode, len(dataset)))
dataloader = DataLoader(dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
shuffle=False, num_workers=cfg.DATA_LOADER.NUM_WORKERS)
size = len(dataloader)
batch_size = cfg.TRAIN.BATCH_SIZE
#h5py slow and fast datasets
#Slow
slow_path = os.path.join(root, '{}_slow_features.hdf5'.format(mode))
slow_h5 = h5py.File(slow_path, 'w', libver='latest')
slow_dset = slow_h5.create_dataset('data', (size * batch_size, 1280, 4, 16, 16),
dtype='f4')
#Fast
fast_path = os.path.join(root, '{}_fast_features.hdf5'.format(mode))
fast_h5 = h5py.File(fast_path, 'w', libver='latest')
fast_dset = fast_h5.create_dataset('data', (size * batch_size, 128, 32, 16, 16),
dtype='f4')
with torch.no_grad():
for i_batch, sampled_batch in tqdm(enumerate(dataloader)):
inputs = sampled_batch[0]
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
out = model(inputs)
slow_ft = out[0].detach().cpu().numpy()
fast_ft = out[1].detach().cpu().numpy()
slow_dset[i_batch * batch_size:(i_batch + 1) * batch_size] = slow_ft
fast_dset[i_batch * batch_size:(i_batch + 1) * batch_size] = fast_ft
slow_h5.close()
fast_h5.close()
if __name__ == "__main__":
args = parse_args()
cfg = load_config(args)
logger = logging.get_logger(__name__)
logging.setup_logging(cfg.OUTPUT_DIR)
use_gpu = cfg.NUM_GPUS > 0
#Set model
model = SlowFast(cfg)
if use_gpu:
cur_device = torch.cuda.current_device()
model = model.cuda(device=cur_device)
cu.load_test_checkpoint(cfg, model)
model.forward = forward.__get__(model, SlowFast)
model.eval()
#Proccess datasets
root = cfg.DATA.PATH_TO_DATA_DIR
gen_dataset(cfg, 'train', root)
gen_dataset(cfg, 'val', root)
| 33.410714 | 90 | 0.652859 |
d07e0b72a8cc5b6f32fa03cf3333d6b8fd181655
| 1,222 |
py
|
Python
|
advent_of_code/solutions/day_01.py
|
deadpyxel/advent-of-code-2020
|
0aa42fe49aa06e8822c6c61125425f387c330710
|
[
"MIT"
] | null | null | null |
advent_of_code/solutions/day_01.py
|
deadpyxel/advent-of-code-2020
|
0aa42fe49aa06e8822c6c61125425f387c330710
|
[
"MIT"
] | null | null | null |
advent_of_code/solutions/day_01.py
|
deadpyxel/advent-of-code-2020
|
0aa42fe49aa06e8822c6c61125425f387c330710
|
[
"MIT"
] | null | null | null |
from itertools import combinations
from typing import Collection
from typing import List
from advent_of_code.adapter import acquire_problem_input
from advent_of_code.utils import calc_product
from advent_of_code.utils import parse_input_as_integer
def _filter_possible_combinations(
original_list: List[int], n: int, combination_size: int = 2
) -> List[Collection[int]]:
return [
comb
for comb in combinations(original_list, r=combination_size)
if sum(comb) == n
]
def solve_day01() -> None:
problem_input = acquire_problem_input()
input_as_integers = parse_input_as_integer(problem_input)
# Part 1 - We were asked to find product of a pair from the input
# that has a sum of 2020
possible_values = _filter_possible_combinations(input_as_integers, n=2020)
for possibility in possible_values:
print(f"Possible answer for part1: {calc_product(possibility)}")
# Part 2 - We were asked the prompt, but now with a triplet
possible_values = _filter_possible_combinations(
input_as_integers, n=2020, combination_size=3
)
for possibility in possible_values:
print(f"Possible answer for part2: {calc_product(possibility)}")
| 33.944444 | 78 | 0.744681 |
b74069aea38bb72e0acf0e46d06ac2ae661c9559
| 2,183 |
py
|
Python
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/php/composer.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1,290 |
2020-05-28T21:24:43.000Z
|
2022-03-31T16:38:43.000Z
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/php/composer.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1 |
2020-07-03T21:14:52.000Z
|
2020-07-03T21:14:52.000Z
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/php/composer.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 280 |
2020-05-29T17:28:38.000Z
|
2022-03-31T13:54:15.000Z
|
# -*- coding: utf-8 -*-
import json
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
import os
class Composer(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'composer', 'php')
def extract_credentials(self, location):
"""
Extract the credentials from the "auth.json" file.
See "https://getcomposer.org/doc/articles/http-basic-authentication.md" for file format.
:param location: Full path to the "auth.json" file
:return: List of credentials founds
"""
creds_found = []
with open(location) as f:
creds = json.load(f)
for cred_type in creds:
for domain in creds[cred_type]:
values = {
"AuthenticationType" : cred_type,
"Domain" : domain,
}
# Extract basic authentication if we are on a "http-basic" section
# otherwise extract authentication token
if cred_type == "http-basic":
values["Login"] = creds[cred_type][domain]["username"]
values["Password"] = creds[cred_type][domain]["password"]
else:
values["Password"] = creds[cred_type][domain]
creds_found.append(values)
return creds_found
def run(self):
"""
Main function
"""
# Define the possible full path of the "auth.json" file when is defined at global level
# See "https://getcomposer.org/doc/articles/http-basic-authentication.md"
# See "https://seld.be/notes/authentication-management-in-composer"
location = ''
tmp_location = [
os.path.join(constant.profile["COMPOSER_HOME"], u'auth.json'),
os.path.join(constant.profile["APPDATA"], u'Composer\\auth.json')
]
for tmp in tmp_location:
if os.path.isfile(tmp):
location = tmp
break
if location:
return self.extract_credentials(location)
| 35.209677 | 96 | 0.554283 |
9d3709c5ccef6b681b8f43840012677d5a56f769
| 3,391 |
py
|
Python
|
scripts/nasjonal_speakers.py
|
RuntimeRacer/Real-Time-Voice-Cloning
|
c363926774cf3a110b70abef939d907dc8998ca9
|
[
"MIT"
] | null | null | null |
scripts/nasjonal_speakers.py
|
RuntimeRacer/Real-Time-Voice-Cloning
|
c363926774cf3a110b70abef939d907dc8998ca9
|
[
"MIT"
] | 1 |
2022-01-08T09:36:20.000Z
|
2022-01-08T09:36:20.000Z
|
scripts/nasjonal_speakers.py
|
RuntimeRacer/Real-Time-Voice-Cloning
|
c363926774cf3a110b70abef939d907dc8998ca9
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import argparse
from tqdm import tqdm
import random
from multiprocess.pool import ThreadPool
from shutil import copyfile
# should pull this from args
parser = argparse.ArgumentParser(description='Process nasjonalbank dataset for a language.')
parser.add_argument("datasets_root", type=Path, help=\
"Path to the directory containing your CommonVoice datasets.")
parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\
"Path to the ouput directory for this preprocessing script")
parser.add_argument('--lang', help=\
"Language to process", type=str)
parser.add_argument('--min', help=\
"Minimum number of files per speaker", type=int, default=12)
parser.add_argument('--max', help=\
"Maximum number of files per speaker", type=int, default=40)
parser.add_argument("-t", "--threads", type=int, default=8)
args = parser.parse_args()
# Stats
speaker_count = 0
language_count = 0
# Processing for a single language
if args.lang != None:
# dirs
base_dir = Path("{0}/{1}".format(args.datasets_root, args.lang))
else:
base_dir = args.datasets_root
# build our output dir
out_dir = base_dir
if out_dir != None:
out_dir = args.out_dir
# find our audio files
print("Searching for all wav files...")
source_files = [f for f in base_dir.glob("**/*.wav") if f.is_file()]
print(" - Found: {}".format(len(source_files)))
# group files based on speaker id r0000000
speaker_hash = {}
for file in source_files:
client_id = "{0}_{1}".format(file.parts[-3], file.parts[-2])
if client_id not in speaker_hash:
speaker_hash[client_id] = []
speaker_hash[client_id].append(file)
print("Found {} unique speakers".format(len(speaker_hash)))
print("Pruning speakers with less than {} files...".format(args.min))
speakers_to_remove = []
for speaker_id in speaker_hash:
if len(speaker_hash[speaker_id]) < args.min:
speakers_to_remove.append(speaker_id)
print(" - Pruning {} speakers...".format(len(speakers_to_remove)))
for id in speakers_to_remove:
del speaker_hash[id]
print("Reduced speaker pool to {}".format(len(speaker_hash)))
# sort the speaker_id/client_id by
sorted_speakers = sorted(speaker_hash.keys())
def process_speaker(speaker):
# print("Processing: i: {0} - {1}".format(si, speaker))
speaker_paths = speaker_hash[speaker]
if len(speaker_paths) > args.max:
# shuffle
random.shuffle(speaker_paths)
speaker_paths = speaker_paths[0:args.max]
for source_path in speaker_paths:
dest_path = out_dir.joinpath("speakers", speaker)
new_name = os.path.basename(source_path)
dest_file = dest_path.joinpath(new_name)
# print(" - Source: {0} - Dest: {1}".format(str(source_path), str(dest_file)))
# break
# ensure the dir exists
os.makedirs(dest_path, exist_ok=True)
# if the file already exists, skip
check = Path(dest_file)
if check.is_file():
continue
# Copy the files
copyfile(source_path, dest_file)
with ThreadPool(args.threads) as pool:
list(
tqdm(
pool.imap(
process_speaker,
sorted_speakers
),
"Nasjonalbank",
len(sorted_speakers),
unit="speakers"
)
)
print("Done, thanks for playing...")
| 30.00885 | 92 | 0.675022 |
ef3e963bb884b9c9ea4fe2f960724485185b9342
| 227 |
py
|
Python
|
skodaconnect/__version__.py
|
stefanuc111/skodaconnect
|
106c83825fa009a238cdedebd67d0157fc950e90
|
[
"Apache-2.0"
] | null | null | null |
skodaconnect/__version__.py
|
stefanuc111/skodaconnect
|
106c83825fa009a238cdedebd67d0157fc950e90
|
[
"Apache-2.0"
] | null | null | null |
skodaconnect/__version__.py
|
stefanuc111/skodaconnect
|
106c83825fa009a238cdedebd67d0157fc950e90
|
[
"Apache-2.0"
] | null | null | null |
"""
skodaconnect - A Python 3 library for interacting with Skoda Connect and Smartlink services.
For more details and documentation, visit the github page at https://github.com/lendy007/skodaconnect
"""
__version__ = "1.1.19"
| 32.428571 | 101 | 0.77533 |
41ca4c1aba343e59ae4ccfad7642bbc132cf901e
| 3,831 |
py
|
Python
|
solace/views/api.py
|
Plurk/Solace
|
752a98e3279f1e7e0dafd6281e90ba24cb733474
|
[
"BSD-3-Clause"
] | 4 |
2015-10-28T22:23:07.000Z
|
2021-08-25T15:21:25.000Z
|
solace/views/api.py
|
sfermigier/solace
|
32d0b398643344c797a40a094689300c364de3b0
|
[
"BSD-3-Clause"
] | null | null | null |
solace/views/api.py
|
sfermigier/solace
|
32d0b398643344c797a40a094689300c364de3b0
|
[
"BSD-3-Clause"
] | 1 |
2018-11-25T17:52:22.000Z
|
2018-11-25T17:52:22.000Z
|
# -*- coding: utf-8 -*-
"""
solace.views.api
~~~~~~~~~~~~~~~~
This module implements version 1.0 of the API. If we ever provide
a new version, it should be renamed.
Because the docstrings of this module are displayed on the API page
different rules apply. Format docstrings with creole markup, not with
rst!
:copyright: (c) 2009 by Plurk Inc., see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import redirect
from werkzeug.exceptions import NotFound
from solace.application import url_for
from solace.templating import render_template
from solace.utils.api import api_method, list_api_methods, XML_NS
from solace.models import User, Topic, Post
from solace.badges import badge_list, badges_by_id
def default_redirect(request):
return redirect(url_for('api.help'))
def help(request):
return render_template('api/help.html', methods=list_api_methods(),
xmlns=XML_NS)
@api_method()
def ping(request, value):
"""Helper function to simpliy test the API. Answers with the
same value. Once API limitations are in place this method will
continue to be "free" and as such suitable for connection checking.
"""
return dict(value=value)
@api_method()
def list_users(request):
"""Returns a list of users. You can retrieve up to 50 users at
once. Each user has the same format as a call to "get user".
==== Parameters ====
* {{{limit}}} — the number of items to load at once. Defaults to
10, maximum allowed number is 50.
* {{{offset}}} — the offset of the returned list. Defaults to 0
"""
offset = max(0, request.args.get('offset', type=int) or 0)
limit = max(0, min(50, request.args.get('limit', 10, type=int)))
q = User.query.order_by(User.username)
count = q.count()
q = q.limit(limit).offset(offset)
return dict(users=q.all(), total_count=count,
limit=limit, offset=offset)
@api_method()
def get_user(request, username=None, user_id=None):
"""Looks up a user by username or user id and returns it. If the user
is looked up by id, a plus symbol has to be prefixed to the ID.
"""
if username is not None:
user = User.query.filter_by(username=username).first()
else:
user = User.query.get(user_id)
if user is None:
raise NotFound()
return dict(user=user)
@api_method()
def list_badges(request):
"""Returns a list of all badges. Each badge in the returned list
has the same format as returned by the "get badge" method.
"""
return dict(badges=badge_list)
@api_method()
def get_badge(request, identifier):
"""Returns a single badge."""
badge = badges_by_id.get(identifier)
if badge is None:
raise NotFound()
return dict(badge=badge)
@api_method()
def list_questions(request):
"""Lists all questions or all questions in a section."""
q = Topic.query.order_by(Topic.date.desc())
if request.view_lang is not None:
q = q.filter_by(locale=request.view_lang)
offset = max(0, request.args.get('offset', type=int) or 0)
limit = max(0, min(50, request.args.get('limit', 10, type=int)))
count = q.count()
q = q.limit(limit).offset(offset)
return dict(questions=q.all(), total_count=count,
limit=limit, offset=offset)
@api_method()
def get_question(request, question_id):
"""Returns a single question and the replies."""
t = Topic.query.get(question_id)
if t is None:
raise NotFound()
return dict(question=t, replies=t.replies)
@api_method()
def get_reply(request, reply_id):
"""Returns a single reply."""
r = Post.query.get(reply_id)
if r is None or r.is_question:
raise NotFound()
return dict(reply=r)
| 30.648 | 74 | 0.666928 |
f651322201921e80c761a4927fb2c44106163dc7
| 6,108 |
py
|
Python
|
src/Products/PageTemplates/expression.py
|
CMYanko/Zope
|
bbd2f4ce565740bfc9d9cae00c147f963ba49085
|
[
"ZPL-2.1"
] | null | null | null |
src/Products/PageTemplates/expression.py
|
CMYanko/Zope
|
bbd2f4ce565740bfc9d9cae00c147f963ba49085
|
[
"ZPL-2.1"
] | null | null | null |
src/Products/PageTemplates/expression.py
|
CMYanko/Zope
|
bbd2f4ce565740bfc9d9cae00c147f963ba49085
|
[
"ZPL-2.1"
] | null | null | null |
"""``chameleon.tales`` expressions."""
from ast import NodeTransformer
from ast import parse
from chameleon.astutil import Static
from chameleon.astutil import Symbol
from chameleon.codegen import template
from chameleon.tales import NotExpr
from chameleon.tales import StringExpr
from AccessControl.ZopeGuards import guarded_apply
from AccessControl.ZopeGuards import guarded_getattr
from AccessControl.ZopeGuards import guarded_getitem
from AccessControl.ZopeGuards import guarded_iter
from AccessControl.ZopeGuards import protected_inplacevar
from OFS.interfaces import ITraversable
from RestrictedPython import RestrictingNodeTransformer
from RestrictedPython.Utilities import utility_builtins
from z3c.pt import expressions
from zExceptions import NotFound
from zExceptions import Unauthorized
from zope.interface import implementer
from zope.tales.tales import ExpressionEngine
from zope.traversing.adapters import traversePathElement
from zope.traversing.interfaces import TraversalError
from .Expressions import render
from .interfaces import IZopeAwareEngine
_marker = object()
zope2_exceptions = (
AttributeError,
LookupError,
NameError,
TypeError,
ValueError,
NotFound,
Unauthorized,
TraversalError,
)
def static(obj):
return Static(template("obj", obj=Symbol(obj), mode="eval"))
class BoboAwareZopeTraverse:
traverse_method = 'restrictedTraverse'
__slots__ = ()
@classmethod
def traverse(cls, base, request, path_items):
"""See ``zope.app.pagetemplate.engine``."""
path_items = list(path_items)
path_items.reverse()
while path_items:
name = path_items.pop()
if ITraversable.providedBy(base):
base = getattr(base, cls.traverseMethod)(name)
else:
base = traversePathElement(base, name, path_items,
request=request)
return base
def __call__(self, base, econtext, call, path_items):
request = econtext.get('request')
if path_items:
base = self.traverse(base, request, path_items)
if call is False:
return base
if getattr(base, '__call__', _marker) is not _marker or \
callable(base):
base = render(base, econtext)
return base
class TrustedBoboAwareZopeTraverse(BoboAwareZopeTraverse):
traverse_method = 'unrestrictedTraverse'
__slots__ = ()
def __call__(self, base, econtext, call, path_items):
request = econtext.get('request')
base = self.traverse(base, request, path_items)
if call is False:
return base
if getattr(base, '__call__', _marker) is not _marker or \
isinstance(base, type):
return base()
return base
class PathExpr(expressions.PathExpr):
exceptions = zope2_exceptions
traverser = Static(template(
"cls()", cls=Symbol(BoboAwareZopeTraverse), mode="eval"
))
class TrustedPathExpr(PathExpr):
traverser = Static(template(
"cls()", cls=Symbol(TrustedBoboAwareZopeTraverse), mode="eval"
))
class NocallExpr(expressions.NocallExpr, PathExpr):
pass
class ExistsExpr(expressions.ExistsExpr):
exceptions = zope2_exceptions
class RestrictionTransform(NodeTransformer):
secured = {
'_getattr_': guarded_getattr,
'_getitem_': guarded_getitem,
'_apply_': guarded_apply,
'_getiter_': guarded_iter,
'_inplacevar_': protected_inplacevar,
}
def visit_Name(self, node):
value = self.secured.get(node.id)
if value is not None:
return Symbol(value)
return node
class UntrustedPythonExpr(expressions.PythonExpr):
restricted_python_transformer = RestrictingNodeTransformer()
page_templates_expression_transformer = RestrictionTransform()
# Make copy of parent expression builtins
builtins = expressions.PythonExpr.builtins.copy()
# Update builtins with Restricted Python utility builtins
builtins.update({
name: static(builtin) for (name, builtin) in utility_builtins.items()
})
def parse(self, string):
encoded = string.encode('utf-8')
node = parse(encoded, mode='eval')
# Run Node Transformation from RestrictedPython:
self.restricted_python_transformer.visit(node)
# Run PageTemplate.expression RestrictedPython Transform:
self.page_templates_expression_transformer.visit(node)
return node
# Whether an engine is Zope aware does not depend on the class
# but how it is configured - especially, that is uses a Zope aware
# `PathExpr` implementation.
# Nevertheless, we mark the class as "Zope aware" for simplicity
# assuming that users of the class use a proper `PathExpr`
@implementer(IZopeAwareEngine)
class ChameleonEngine(ExpressionEngine):
"""Expression engine for ``chameleon.tales``.
Only partially implemented: its ``compile`` is currently unusable
"""
def compile(self, expression):
raise NotImplementedError()
types = dict(
python=UntrustedPythonExpr,
string=StringExpr,
not_=NotExpr,
exists=ExistsExpr,
path=PathExpr,
provider=expressions.ProviderExpr,
nocall=NocallExpr)
def createChameleonEngine(types=types, untrusted=True, **overrides):
e = ChameleonEngine()
def norm(k):
return k[:-1] if k.endswith("_") else k
e.untrusted = untrusted
ts = e.types
for k, v in types.items():
k = norm(k)
e.registerType(k, v)
for k, v in overrides.items():
k = norm(k)
if k in ts:
del ts[k]
e.registerType(k, v)
return e
def createTrustedChameleonEngine(**overrides):
ovr = dict(python=expressions.PythonExpr, path=TrustedPathExpr)
ovr.update(overrides)
return createChameleonEngine(untrusted=False, **ovr)
_engine = createChameleonEngine()
def getEngine():
return _engine
_trusted_engine = createTrustedChameleonEngine()
def getTrustedEngine():
return _trusted_engine
| 26.102564 | 77 | 0.695809 |
690fadcc0a20e867b674f68a250f6e3713d6497f
| 5,522 |
py
|
Python
|
Demo/threads/Coroutine.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1 |
2021-12-26T22:20:34.000Z
|
2021-12-26T22:20:34.000Z
|
Demo/threads/Coroutine.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1 |
2015-10-29T20:51:31.000Z
|
2015-10-29T20:51:31.000Z
|
Demo/threads/Coroutine.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 2 |
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
# Coroutine implementation using Python threads.
#
# Combines ideas from Guido's Generator module, and from the coroutine
# features of Icon and Simula 67.
#
# To run a collection of functions as coroutines, you need to create
# a Coroutine object to control them:
# co = Coroutine()
# and then 'create' a subsidiary object for each function in the
# collection:
# cof1 = co.create(f1 [, arg1, arg2, ...]) # [] means optional,
# cof2 = co.create(f2 [, arg1, arg2, ...]) #... not list
# cof3 = co.create(f3 [, arg1, arg2, ...])
# etc. The functions need not be distinct; 'create'ing the same
# function multiple times gives you independent instances of the
# function.
#
# To start the coroutines running, use co.tran on one of the create'd
# functions; e.g., co.tran(cof2). The routine that first executes
# co.tran is called the "main coroutine". It's special in several
# respects: it existed before you created the Coroutine object; if any of
# the create'd coroutines exits (does a return, or suffers an unhandled
# exception), EarlyExit error is raised in the main coroutine; and the
# co.detach() method transfers control directly to the main coroutine
# (you can't use co.tran() for this because the main coroutine doesn't
# have a name ...).
#
# Coroutine objects support these methods:
#
# handle = .create(func [, arg1, arg2, ...])
# Creates a coroutine for an invocation of func(arg1, arg2, ...),
# and returns a handle ("name") for the coroutine so created. The
# handle can be used as the target in a subsequent .tran().
#
# .tran(target, data=None)
# Transfer control to the create'd coroutine "target", optionally
# passing it an arbitrary piece of data. To the coroutine A that does
# the .tran, .tran acts like an ordinary function call: another
# coroutine B can .tran back to it later, and if it does A's .tran
# returns the 'data' argument passed to B's tran. E.g.,
#
# in coroutine coA in coroutine coC in coroutine coB
# x = co.tran(coC) co.tran(coB) co.tran(coA,12)
# print x # 12
#
# The data-passing feature is taken from Icon, and greatly cuts
# the need to use global variables for inter-coroutine communication.
#
# .back( data=None )
# The same as .tran(invoker, data=None), where 'invoker' is the
# coroutine that most recently .tran'ed control to the coroutine
# doing the .back. This is akin to Icon's "&source".
#
# .detach( data=None )
# The same as .tran(main, data=None), where 'main' is the
# (unnameable!) coroutine that started it all. 'main' has all the
# rights of any other coroutine: upon receiving control, it can
# .tran to an arbitrary coroutine of its choosing, go .back to
# the .detach'er, or .kill the whole thing.
#
# .kill()
# Destroy all the coroutines, and return control to the main
# coroutine. None of the create'ed coroutines can be resumed after a
# .kill(). An EarlyExit exception does a .kill() automatically. It's
# a good idea to .kill() coroutines you're done with, since the
# current implementation consumes a thread for each coroutine that
# may be resumed.
import _thread as thread
import sync
class _CoEvent:
def __init__(self, func):
self.f = func
self.e = sync.event()
def __repr__(self):
if self.f is None:
return 'main coroutine'
else:
return 'coroutine for func ' + self.f.__name__
def __hash__(self):
return id(self)
def __cmp__(x,y):
return cmp(id(x), id(y))
def resume(self):
self.e.post()
def wait(self):
self.e.wait()
self.e.clear()
class Killed(Exception): pass
class EarlyExit(Exception): pass
class Coroutine:
def __init__(self):
self.active = self.main = _CoEvent(None)
self.invokedby = {self.main: None}
self.killed = 0
self.value = None
self.terminated_by = None
def create(self, func, *args):
me = _CoEvent(func)
self.invokedby[me] = None
thread.start_new_thread(self._start, (me,) + args)
return me
def _start(self, me, *args):
me.wait()
if not self.killed:
try:
try:
me.f(*args)
except Killed:
pass
finally:
if not self.killed:
self.terminated_by = me
self.kill()
def kill(self):
if self.killed:
raise TypeError('kill() called on dead coroutines')
self.killed = 1
for coroutine in self.invokedby.keys():
coroutine.resume()
def back(self, data=None):
return self.tran( self.invokedby[self.active], data )
def detach(self, data=None):
return self.tran( self.main, data )
def tran(self, target, data=None):
if target not in self.invokedby:
raise TypeError('.tran target %r is not an active coroutine' % (target,))
if self.killed:
raise TypeError('.tran target %r is killed' % (target,))
self.value = data
me = self.active
self.invokedby[target] = me
self.active = target
target.resume()
me.wait()
if self.killed:
if self.main is not me:
raise Killed
if self.terminated_by is not None:
raise EarlyExit('%r terminated early' % (self.terminated_by,))
return self.value
# end of module
| 34.5125 | 85 | 0.628214 |
a89597857a42c548b374e4b23a9a419b08ce03c5
| 587 |
py
|
Python
|
netbox/payment/navigation.py
|
cbipoe3ka/new-netbox-2.9
|
ce54c710034d58f99dbcefc1ad16c947a8b78dc8
|
[
"Apache-2.0"
] | null | null | null |
netbox/payment/navigation.py
|
cbipoe3ka/new-netbox-2.9
|
ce54c710034d58f99dbcefc1ad16c947a8b78dc8
|
[
"Apache-2.0"
] | null | null | null |
netbox/payment/navigation.py
|
cbipoe3ka/new-netbox-2.9
|
ce54c710034d58f99dbcefc1ad16c947a8b78dc8
|
[
"Apache-2.0"
] | null | null | null |
from extras.plugins import PluginMenuButton, PluginMenuItem
from utilities.choices import ButtonColorChoices
menu_items = (
PluginMenuItem(
link='plugins:payment:payment_list',
link_text='Payments list',
permissions=['payment.view_payment'],
buttons= (
PluginMenuButton(
link='plugins:payment:payment_add',
permissions=['payment.add_payment'],
title='Add payment',
icon_class='fa fa-plus',
color=ButtonColorChoices.GREEN,
),
)
),
)
| 26.681818 | 59 | 0.586031 |
166922b5aee4550fea294cb262434e18ab441e2b
| 3,794 |
py
|
Python
|
uvicore/auth/services.py
|
uvicore/framework
|
9c21b85e9e470c6d789899340332a9abd0b26ab1
|
[
"MIT"
] | 11 |
2021-03-22T22:07:49.000Z
|
2022-03-08T16:18:33.000Z
|
uvicore/auth/services.py
|
uvicore/framework
|
9c21b85e9e470c6d789899340332a9abd0b26ab1
|
[
"MIT"
] | 12 |
2021-03-04T05:51:24.000Z
|
2021-09-22T05:16:18.000Z
|
uvicore/auth/services.py
|
uvicore/framework
|
9c21b85e9e470c6d789899340332a9abd0b26ab1
|
[
"MIT"
] | 2 |
2021-03-25T14:49:56.000Z
|
2021-11-17T23:20:29.000Z
|
import uvicore
from uvicore.http.provider import Http
from uvicore.database.provider import Db
from uvicore.package import ServiceProvider
from uvicore.support.dumper import dump, dd # type: ignore
@uvicore.provider()
class Auth(ServiceProvider, Db, Http):
def register(self) -> None:
# Register IoC bindings
# self.bind(
# name='Auth',
# object='uvicore.auth.auth._Auth',
# aliases=['auth']
# )
# Bind Tables
#self.bind('uvicore.auth.database.tables.groups.Groups', 'uvicore.auth.database.tables.groups._Groups', singleton=True)
#self.bind('uvicore.auth.database.tables.user_info.UserInfo', 'uvicore.auth.database.tables.user_info._UserInfo', singleton=True)
#self.bind('uvicore.auth.database.tables.users.Users', 'uvicore.auth.database.tables.users._Users', singleton=True)
# Bind Models
#self.bind('uvicore.auth.models.group.Group', 'uvicore.auth.models.group.GroupModel')
#self.bind('uvicore.auth.models.user.User', 'uvicore.auth.models.user.UserModel')
#self.bind('uvicore.auth.models.user_info.UserInfo', 'uvicore.auth.models.user_info.UserInfoModel')
# Register config
self.configs([
{'key': self.name, 'module': 'uvicore.auth.config.package.config'}
])
def boot(self) -> None:
# Define service provider registration control
self.registers(self.package.config.registers)
# Define Database Connections
self.connections(
connections=self.package.config.database.connections,
default=self.package.config.database.default
)
# Define all tables/models used by this package.
# The goal is to load up all SQLAlchemy tables for complete metedata definitions.
# If you separate tables vs models use self.tables(['myapp.database.tables.*])
# If you use models only, or models with inline tables then use self.models(['myapp.models.*])
# Optionally define your own models/__init__.py and import myapp.models to load every table.
# Order does not matter as they are sorted topologically for ForeignKey dependencies
# Using __init__.py now, so just import it
#from uvicore.auth import models
#dump(self.package)
self.models(['uvicore.auth.models'])
# Define data seeders
# NO - Auth shouldn't do its own seeding. Let the app do it all.
# You think? What if a package is an app, then it runs seeders, but if that app is used
# inside another package, you can't stop it from seeding. Need to figure out overrideing seeders array better
self.seeders(['uvicore.auth.database.seeders.seed'])
# Define view and asset paths and configure the templating system
self.define_views()
# Define Web and API routes and prefixes
self.define_routes()
def define_views(self) -> None:
"""Define view and asset paths and configure the templating system"""
# Define view paths
#self.views(['uvicore.auth.http.views'])
# Define public paths
self.public(['uvicore.auth.http.public'])
# Define asset paths
self.assets(['uvicore.auth.http.public.assets'])
def define_routes(self) -> None:
"""Define Web and API routes and prefixes"""
# Define web routes
# self.web_routes(
# module='uvicore.auth.http.routes.web.Web',
# prefix=self.package.config.web.prefix,
# #name_prefix=None,
# )
# Define api routes
self.api_routes(
module='uvicore.auth.http.routes.api.Api',
prefix=self.package.config.api.prefix,
#name_prefix='api',
)
| 39.936842 | 137 | 0.650501 |
a53dbf2dee0a55ca6ae0acd9dcea926b37c89ee5
| 11,270 |
py
|
Python
|
sudoku.py
|
JaakkoRoponen/sudoku-solver
|
44355b5ce176f4f2210b5f7a1206c3f5786088c5
|
[
"MIT"
] | null | null | null |
sudoku.py
|
JaakkoRoponen/sudoku-solver
|
44355b5ce176f4f2210b5f7a1206c3f5786088c5
|
[
"MIT"
] | null | null | null |
sudoku.py
|
JaakkoRoponen/sudoku-solver
|
44355b5ce176f4f2210b5f7a1206c3f5786088c5
|
[
"MIT"
] | null | null | null |
import itertools
import random
import numpy as np
class Sudoku:
"""Class for creating and solving Sudoku puzzles"""
def _get_square(self, row, col):
"""
Returns coordinates for the square where the row,col coordinate is
"""
top = int(row / 3) * 3 # (0 or 1 or 2) * 3
left = int(col / 3) * 3 # (0 or 1 or 2) * 3
return {
'top': top,
'left': left,
'bottom': top + 3, # slicing high bound is exclusive
'right': left + 3}
def _flip_sqrs_and_rows(self):
"""
Moves squares to rows and rows to squares.
Running twice returns original state.
[:,0,0:3]->[:,0,0:3], [:,1,0:3]->[:,0,3:6], [:,2,0:3]->[:,0,6:9],
[:,0,3:6]->[:,1,0:3], [:,1,3:6]->[:,1,3:6], [:,2,3:6]->[:,1,6:9],
...
[:,6,6:9]->[:,8,0:3], [:,7,6:9]->[:,8,3:6], [:,8,6:9]->[:,8,6:9]
"""
flipped = np.copy(self.positions)
for i, j in np.ndindex(3, 9):
flipped[:, int(j/3)+i*3, (j % 3)*3:(j % 3)*3+3] = \
self.positions[:, (j % 3)+i*3, int(j/3)*3:int(j/3)*3+3]
self.positions = flipped
def _search_locked(self, sqr, possibilities):
"""
Searches locked positions
Searches for squares where all possible positions for a number
are on a same row or col. Positions outside the square on the same
row / col can be eliminated.
"""
top = int(sqr / 3) * 3 # row of top left corner of sqr
left = sqr % 3 * 3 # col of top left corner of sqr
# numbers that have 2 or 3 possible positions in a square
numbers = [i for i in range(9) if 2 <= possibilities[i] <= 3]
for n in numbers:
coords = np.where(self.positions[n, top:top+3, left:left+3])
# put row coords to left column and col coords to right
coords = np.transpose(coords)
# check if all row or col coords are the same
# (all values in the coords columns are the same
# as the value on the first row)
row, col = np.all(coords == coords[0, :], axis=0)
if row:
# eliminate positions on the same row outside of sqr
outside_of_sqr = [i for i in range(9)
if i not in range(left, left + 3)]
self.positions[n, top + coords[0, 0], outside_of_sqr] = False
elif col:
# eliminate positions on the same col outside of sqr
outside_of_sqr = [i for i in range(9)
if i not in range(top, top + 3)]
self.positions[n, outside_of_sqr, left + coords[0, 1]] = False
def _search_hidden_and_naked(self, row, col):
"""
Searches for naked/hidden pairs/triples/quads
Hidden:
If the number of possible positions for a number matches with another
number (on the same row/col/sqr) with the same possible positions and
there are e.g. only three possible positions for the three numbers, a
hidden triple has been found. It is important to note that not all
three numbers must be in all three positions, but there must not be
more than three positions for the three numbers all together.
Naked:
If the number of possible numbers in a position matches with another
position (on the same row/col/sqr) with the same possible numbers, and
there are e.g. only three possible numbers and three positions, a
naked triple has been found. It is important to note that not all
three positions must contain all three numbers, but there must not be
more than three numbers in the three positions all together.
Pair and quads are searched the same way, but there must be two
or four allowed positions/numbers for the same numbers/positions.
After finding a pair/triple/quad, other numbers in the same
position / positions for the same numbers, can be set False.
Finally transposes numbers and rows/cols each time to search for
hidden/naked alternately.
"""
# how many possible positions/numbers for the given number/position
possibilities = np.sum(self.positions[:, row, col], axis=1)
# only search up to quads
numbers = np.array([i for i in range(9) if 2 <= possibilities[i] <= 4])
for n in numbers:
equal = np.all( # find equal (or subset) rows/cols/sqrs
np.logical_xor( # check for change after masking
self.positions[numbers, row, col],
self.positions[n, row, col] *
self.positions[numbers, row, col]
) == 0,
axis=1)
if np.sum(equal) == possibilities[n]: # pair/triple/quad found
self.positions[
[i for i in range(9) if i not in numbers[equal]],
row, col] *= np.invert(self.positions[n, row, col])
# search for hidden/naked by transposing numbers and cols/rows
if isinstance(row, int): # rows -> transpose numbers and cols
self.positions = np.transpose(self.positions, (2, 1, 0))
else: # cols -> transpose numbers and rows
self.positions = np.transpose(self.positions, (1, 0, 2))
def _set_number(self, number, row, col):
"""
Sets number at row,col position
Sets positions False for the given number on the same row, col and
square, and for all other numbers with the same row,col coordinate
(e.g. if 2,3 is 4 then 5 can't be at 2,3).
Number must be given in 1-9
"""
if number == 0:
return False
self.puzzle[row, col] = number
number -= 1 # from sudoku board numbers (1-9) to 0-based index
sqr = self._get_square(row, col)
# eliminate positions on same axes and square
self.positions[number, row, :] = False
self.positions[number, :, col] = False
self.positions[number, sqr['top']:sqr['bottom'],
sqr['left']:sqr['right']] = False
self.positions[:, row, col] = False
self.positions[number, row, col] = True
# eliminate naked/hidden/locked pairs/triples/quads
for x in range(9): # row / col / sqr
self._search_hidden_and_naked(x, slice(9)) # rows (hidden)
self._search_hidden_and_naked(x, slice(9)) # rows (naked)
self._search_hidden_and_naked(slice(9), x) # cols (hidden)
self._search_hidden_and_naked(slice(9), x) # cols (naked)
self._flip_sqrs_and_rows()
self._search_hidden_and_naked(x, slice(9)) # sqrs (hidden)
self._search_hidden_and_naked(x, slice(9)) # sqrs (naked)
# possible positions available for each number in a square
possibilities = np.sum(self.positions[:, x, :], axis=1)
self._flip_sqrs_and_rows()
self._search_locked(x, possibilities) # sqrs (locked)
return True
def _init_positions(self):
"""Sets positions for puzzle cells"""
self.positions = np.full((9, 9, 9), True)
non_zero_coords = zip(*np.where(self.puzzle != 0))
for row, col in non_zero_coords:
self._set_number(self.puzzle[row, col], row, col)
def _get_number(self, row, col):
"""
Gets number at row,col position
Checks positions, if row,col has a True value and that it's the only
True value on the same row / col / square. Also checks, if only one
number is possible based on the board.
Returns a number 1-9 or 0 (=empty)
"""
sqr = self._get_square(row, col)
for number in range(9):
if self.positions[number, row, col] and \
(np.sum(self.positions[:, row, col]) == 1 or
np.sum(self.positions[number, row, :]) == 1 or
np.sum(self.positions[number, :, col]) == 1 or
np.sum(self.positions[number, sqr['top']:sqr['bottom'],
sqr['left']:sqr['right']]) == 1):
return number + 1 # from 0-index to board numbers (1-9)
return 0
def _solve(self):
"""
Iterates Sudoku board until all positions are solved or no more
numbers are solvable
"""
numbers_solved = np.count_nonzero(self.puzzle)
zero_coords = zip(*np.where(self.puzzle == 0))
for row, col in zero_coords:
# get number by deducing it from other numbers and then set it
self._set_number(self._get_number(row, col), row, col)
if numbers_solved < np.count_nonzero(self.puzzle) < 9 * 9:
self._solve()
def solve(self, puzzle, solve=True):
"""Solves the given Sudoku puzzle"""
self.puzzle = np.copy(puzzle) # preserve puzzle given in arguments
self._init_positions()
if solve:
self._solve()
return self.puzzle
def get_random_number(self, puzzle, row, col):
"""
Gives "Random" number for the given row / col position
Returns:
1. the correct number (if only one)
2. one of the possibilities (if many)
3. 0 if no possible numbers
"""
number = self._get_number(row, col) # 1-9 or 0
if not number:
possible_numbers = np.where(self.positions[:, row, col])[0]
if possible_numbers.size == 0: # impossible position
return 0
number = np.random.choice(possible_numbers) + 1 # 0-8 -> 1-9
return number
def create_puzzle(self):
"""Creates a new sudoku puzzle"""
while True:
self.puzzle = np.zeros((9, 9), int)
self.positions = np.full((9, 9, 9), True)
non_deduced_values = []
# create list of board coordinates
coords = list(itertools.product(range(9), range(9)))
while coords:
# pop random coordinate
row, col = coords.pop(np.random.randint(len(coords)))
# put random number from possible numbers to the coordinate
possible_numbers = np.where(self.positions[:, row, col])[0]
if possible_numbers.size == 0: # impossible position -> retry
break
number = np.random.choice(possible_numbers)
self._set_number(number+1, row, col)
non_deduced_values.append((row, col))
# start solving after setting 8 numbers
if len(coords) <= 81 - 8:
self._solve()
# update coordinates with non-solved positions
coords = list(zip(*np.where(self.puzzle == 0)))
# try again if puzzle became unsolvable
if np.count_nonzero(self.puzzle) == 9 * 9:
break
# remove deduced values from puzzle
deduced = self.puzzle.copy()
deduced[tuple(zip(*non_deduced_values))] = 0
self.puzzle -= deduced
return self.puzzle
| 43.015267 | 79 | 0.565129 |
1b579f594f5cf06701a36a04347c2a0975c62ec7
| 2,534 |
py
|
Python
|
tidal_precession.py
|
ddeb32/APR_Testing
|
08cf346e6047f75ad57a7c9b497ea2dd9cb13f59
|
[
"MIT"
] | null | null | null |
tidal_precession.py
|
ddeb32/APR_Testing
|
08cf346e6047f75ad57a7c9b497ea2dd9cb13f59
|
[
"MIT"
] | null | null | null |
tidal_precession.py
|
ddeb32/APR_Testing
|
08cf346e6047f75ad57a7c9b497ea2dd9cb13f59
|
[
"MIT"
] | null | null | null |
#####################################################################
### Evaluating the tidal component to the precession rate ###
#####################################################################
import numpy as np
import pandas as pd
from numpy import pi
from scipy import integrate
import scipy.constants as const
from scipy import interpolate
import math
import matplotlib.pyplot as plt
from mpmath import *
from matplotlib.ticker import AutoMinorLocator
import pdfkit as pdf
M_sun = 1.98847*10**30
R_sun = 6.957*10**8
G = 6.67430*10**(-11)
c = 299792458 #--values taken from physics.nist.gov
k = G/c**2.0
fact = 1.476981739 #--to get G/c^2 in M_sun and Kms.
exp = math.exp
sin = math.sin
log = math.log #--takes arguments ( , ) where the second one is the base, by default e.
#--------------------------------------------------------------------#
def precessionEQ (Pb, Rns, aNS, Mbh, Mns, e, k2):
#--the value found will be in SI, angle/s.
fact1 = 30*pi/Pb
fact2 = (Rns/aNS)**5
fact3 = Mbh/Mns
fact4 = (1 + 3/2*(e**2) + 1/8*(e**4)) / ((1-e**2)**5)
fact5 = k2
return(fact1 * fact2 * fact3 * fact4 * fact5)
def aNSeq (Mbh, Mns, Pb):
aR = ( (G*(Mbh+Mns)*(Pb**2)) / (4*(pi**2)) )**(1/3)
aNS = ( Mbh/(Mbh+Mns) )*aR
return(aR, aNS)
#--Main Program--#
def main ():
print('\n Give the parameter values - ')
Pb = float(input('\tPb (Hr):\t'))
Rns = float(input('\tRns (km):\t'))
Mbh = float(input('\tMbh (M_sun):\t'))
Mns = float(input('\tMns (M_sun):\t'))
e = float(input('\te:\t'))
k2 = float(input('\tk2:\t'))
#--Converting to SI--#
Mbh, Mns = Mbh*M_sun, Mns*M_sun #--masses in Kg.
Rns = Rns*1000.0 #--distances in meter.
Pb = Pb*3600.0 #--times in second.
aR, aNS = aNSeq(Mbh, Mns, Pb)
precession = precessionEQ(Pb, Rns, aNS, Mbh, Mns, e, k2)
precession = precession*1.807e+9 #--rad/sec to deg/year.
print('We get - ')
"""print('Pb:\t', Pb, ' hours')
print('Rns:\t', Rns/1000, ' km')
print('Mbh:\t', Mbh/M_sun, ' M_sun')
print('Mns:\t', Mns/M_sun, ' M_sun')
print('e:\t', e)
print('k2:\t', k2)
print('aNS:\t', aNS/1000, ' km')"""
print(' omegadot_tidal:\t', precession, ' deg/yr')
main()
############################--End of Program--##########################
########################################################################
| 28.155556 | 98 | 0.480268 |
0bc0a183901e3a618faf1eab6fa1a8dd805e5e09
| 549 |
py
|
Python
|
foodcartapp/migrations/0048_auto_20201217_1212.py
|
Sam1808/Add-orders-to-the-online-store
|
2ca137915689df6012746c9d5e3b6307b53e112b
|
[
"MIT"
] | null | null | null |
foodcartapp/migrations/0048_auto_20201217_1212.py
|
Sam1808/Add-orders-to-the-online-store
|
2ca137915689df6012746c9d5e3b6307b53e112b
|
[
"MIT"
] | null | null | null |
foodcartapp/migrations/0048_auto_20201217_1212.py
|
Sam1808/Add-orders-to-the-online-store
|
2ca137915689df6012746c9d5e3b6307b53e112b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-12-17 12:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('foodcartapp', '0047_auto_20201216_2004'),
]
operations = [
migrations.AlterField(
model_name='orderdetails',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='foodcartapp.Product', verbose_name='бургер'),
),
]
| 27.45 | 169 | 0.666667 |
bae67ecabe1b047880d24559c3cb7b1aa27dfb89
| 461 |
py
|
Python
|
EMDetector/train/augment/contrast.py
|
jabae/detectEM
|
2d1a5116164d0bed0a8ea767a227d05a8970a448
|
[
"MIT"
] | 1 |
2021-02-14T06:41:05.000Z
|
2021-02-14T06:41:05.000Z
|
EMDetector/train/augment/contrast.py
|
jabae/detectEM
|
2d1a5116164d0bed0a8ea767a227d05a8970a448
|
[
"MIT"
] | null | null | null |
EMDetector/train/augment/contrast.py
|
jabae/detectEM
|
2d1a5116164d0bed0a8ea767a227d05a8970a448
|
[
"MIT"
] | null | null | null |
import numpy as np
def contrast_augment(sample):
"""Performs contrast/brightness augmentation on img.
Args:
sample: Dictionary of (np array: <ch,z,x,y>) image and mask
"""
f_s = 1
f_b = 0.5
a = 1 + (np.random.rand() - 0.5) * f_s
b = (np.random.rand() - 0.5) * f_b
g = (np.random.rand()*2 - 1)
sample["image"] = sample["image"] * a + b
sample["image"] = np.clip(sample["image"], 0, 1)
sample["image"] = sample["image"]**(2**g)
return sample
| 20.043478 | 61 | 0.607375 |
2d1b761229331ea1c9891c2193efe1b693996f13
| 4,416 |
py
|
Python
|
supplier/views.py
|
chinxianjun2016/GreaterWMS
|
aacd0e15e0114f103eb57002e93670c008cce63b
|
[
"Apache-2.0"
] | null | null | null |
supplier/views.py
|
chinxianjun2016/GreaterWMS
|
aacd0e15e0114f103eb57002e93670c008cce63b
|
[
"Apache-2.0"
] | null | null | null |
supplier/views.py
|
chinxianjun2016/GreaterWMS
|
aacd0e15e0114f103eb57002e93670c008cce63b
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from rest_framework.exceptions import APIException
class APIViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
queryset = ListModel.objects.all()
serializer_class = serializers.SupplierGetSerializer
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return self.queryset.filter(openid=self.request.auth.openid, is_delete=False)
else:
return self.queryset.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return self.queryset.none()
def get_serializer_class(self):
if self.action == 'list':
return serializers.SupplierGetSerializer
elif self.action == 'retrieve':
return serializers.SupplierGetSerializer
elif self.action == 'create':
return serializers.SupplierPostSerializer
elif self.action == 'update':
return serializers.SupplierUpdateSerializer
elif self.action == 'partial_update':
return serializers.SupplierPartialUpdateSerializer
elif self.action == 'destroy':
return serializers.SupplierGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
def create(self, request, *args, **kwargs):
data = request.data
data['openid'] = request.auth.openid
if self.queryset.filter(openid=data['openid'], supplier_name=data['supplier_name'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def update(self, request, pk):
qs = self.get_object()
if qs.openid != request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
else:
data = request.data
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def partial_update(self, request, pk):
qs = self.get_object()
if qs.openid != request.auth.openid:
raise APIException({"detail": "Cannot partial_update data which not yours"})
else:
data = request.data
serializer = self.get_serializer(qs, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def destroy(self, request, pk):
qs = self.get_object()
if qs.openid != request.auth.openid:
raise APIException({"detail": "Cannot delete data which not yours"})
else:
qs.is_delete = True
qs.save()
serializer = self.get_serializer(qs, many=False)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
| 37.74359 | 118 | 0.639719 |
9a64af6060ed3aa8f41e22948952679e479f3b7e
| 466 |
py
|
Python
|
data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_blinded_a.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_blinded_a.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_blinded_a.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/crafted/shared_medpack_blinded_a.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","medic_blinded_a")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.411765 | 82 | 0.738197 |
923b31bcd10a8d4a66d790edf51fc2941b1c3ad5
| 2,154 |
py
|
Python
|
src/app.py
|
amiiiirrrr/Machine_learning_task
|
2d066d06ea8622b06bd9e4e9f791074a44f80173
|
[
"Apache-2.0"
] | null | null | null |
src/app.py
|
amiiiirrrr/Machine_learning_task
|
2d066d06ea8622b06bd9e4e9f791074a44f80173
|
[
"Apache-2.0"
] | null | null | null |
src/app.py
|
amiiiirrrr/Machine_learning_task
|
2d066d06ea8622b06bd9e4e9f791074a44f80173
|
[
"Apache-2.0"
] | null | null | null |
"""
app.py
"""
import pika
import json
from test_model import TestModel
__author__ = "Seyed Amir Mousavi"
__license__ = "Public Domain"
__version__ = "1.0.0"
__status__ = "Production"
def run():
"""
To process requests
"""
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=""))
channel = connection.channel()
channel.queue_declare(queue="")
test_model = TestModel()
def on_request(ch, method, props, body):
try:
data = body.decode("utf-8")
# BackEnd Data
input_json = json.loads(data)
# AI output data
if "data" in input_json and input_json['data'] == "checkHealth":
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(
correlation_id=props.correlation_id),
body=json.dumps({"result": "ok"}))
ch.basic_ack(delivery_tag=method.delivery_tag)
result = test_model.run_flask(input_json)
ch.basic_publish(exchange="",
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=props.correlation_id),
body=json.dumps({"result": result}))
ch.basic_ack(delivery_tag=method.delivery_tag)
except Exception as e:
print(e)
ch.basic_publish(exchange="",
routing_key=props.reply_to,
properties=pika.BasicProperties(
correlation_id=props.correlation_id),
body=json.dumps({"result": "error"}))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue="",
on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming()
if __name__ == "__main__":
run()
| 33.65625 | 99 | 0.539926 |
3bb557f49546b91fd8610b75d10f654ae858771a
| 4,755 |
py
|
Python
|
python/hopsworks/engine/execution_engine.py
|
robzor92/hopsworks-api
|
94a0cfabedc0278e5d5e0eec699317073a65a126
|
[
"Apache-2.0"
] | null | null | null |
python/hopsworks/engine/execution_engine.py
|
robzor92/hopsworks-api
|
94a0cfabedc0278e5d5e0eec699317073a65a126
|
[
"Apache-2.0"
] | 9 |
2022-03-18T08:21:41.000Z
|
2022-03-28T14:46:31.000Z
|
python/hopsworks/engine/execution_engine.py
|
robzor92/hopsworks-api
|
94a0cfabedc0278e5d5e0eec699317073a65a126
|
[
"Apache-2.0"
] | 3 |
2022-03-14T08:20:45.000Z
|
2022-03-16T11:15:04.000Z
|
#
# Copyright 2022 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hopsworks.core import dataset_api, execution_api
import os
import logging
import time
import uuid
class ExecutionEngine:
def __init__(self, project_id=None):
self._dataset_api = dataset_api.DatasetApi(project_id)
self._execution_api = execution_api.ExecutionsApi(project_id)
self._log = logging.getLogger(__name__)
def download_logs(self, execution):
"""Download execution logs to current directory
:param execution: execution to download logs for
:type execution: Execution
:return: downloaded stdout and stderr log path
:rtype: str, str
"""
job_logs_dir = "logs-job-{}-exec-{}_{}".format(
execution.job_name, str(execution.id), str(uuid.uuid4())[:16]
)
download_log_dir = os.path.join(os.getcwd(), job_logs_dir)
if not os.path.exists(download_log_dir):
os.mkdir(download_log_dir)
out_path = None
if execution.stdout_path is not None and self._dataset_api.exists(
execution.stdout_path
):
out_path = self._dataset_api.download(
execution.stdout_path, download_log_dir
)
err_path = None
if execution.stderr_path is not None and self._dataset_api.exists(
execution.stderr_path
):
err_path = self._dataset_api.download(
execution.stderr_path, download_log_dir
)
return out_path, err_path
def wait_until_finished(self, job, execution):
"""Wait until execution reaches terminal state
:param execution: job of the execution
:type execution: Job
:param execution: execution to monitor
:type execution: Execution
:return: The final Execution object
:rtype: str, str
"""
is_yarn_job = (
job.job_type.lower() == "spark"
or job.job_type.lower() == "pyspark"
or job.job_type.lower() == "flink"
)
updated_execution = self._execution_api._get(job, execution.id)
execution_state = None
while updated_execution.success is None:
updated_execution = self._execution_api._get(job, execution.id)
if execution_state != updated_execution.state:
if is_yarn_job:
self._log.info(
"Waiting for execution to finish. Current state: {}. Final status: {}".format(
updated_execution.state, updated_execution.final_status
)
)
else:
self._log.info(
"Waiting for execution to finish. Current state: {}".format(
updated_execution.state
)
)
execution_state = updated_execution.state
time.sleep(3)
# wait for log files to be aggregated, max 2 minute
await_time = 40
log_aggregation_files_exist = False
self._log.info("Waiting for log aggregation to finish.")
while not log_aggregation_files_exist and await_time >= 0:
updated_execution = self._execution_api._get(job, execution.id)
log_aggregation_files_exist = self._dataset_api.exists(
updated_execution.stdout_path
) and self._dataset_api.exists(updated_execution.stderr_path)
await_time -= 1
time.sleep(3)
if is_yarn_job and not updated_execution.success:
self._log.error(
"Execution failed with status: {}. See the logs for more information.".format(
updated_execution.final_status
)
)
elif not is_yarn_job and not updated_execution.success:
self._log.error(
"Execution failed with status: {}. See the logs for more information.".format(
updated_execution.state
)
)
else:
self._log.info("Execution finished successfully.")
return updated_execution
| 37.440945 | 102 | 0.609043 |
7f297181c692fc9537ab582c38bc14cccd652d05
| 3,546 |
py
|
Python
|
tests/data/test_load_wyscout.py
|
BWyckaert/socceraction
|
5fd6bd7e29df8d230103d586f8f9449bdb0ca500
|
[
"MIT"
] | null | null | null |
tests/data/test_load_wyscout.py
|
BWyckaert/socceraction
|
5fd6bd7e29df8d230103d586f8f9449bdb0ca500
|
[
"MIT"
] | null | null | null |
tests/data/test_load_wyscout.py
|
BWyckaert/socceraction
|
5fd6bd7e29df8d230103d586f8f9449bdb0ca500
|
[
"MIT"
] | null | null | null |
import os
from socceraction.data import wyscout as wy
from socceraction.data.wyscout import (
WyscoutCompetitionSchema,
WyscoutEventSchema,
WyscoutGameSchema,
WyscoutPlayerSchema,
WyscoutTeamSchema,
)
class TestPublicWyscoutLoader:
def setup_method(self) -> None:
data_dir = os.path.join(
os.path.dirname(__file__), os.pardir, 'datasets', 'wyscout_public', 'raw'
)
self.WSL = wy.PublicWyscoutLoader(root=data_dir, download=False)
def test_competitions(self) -> None:
df_competitions = self.WSL.competitions()
assert len(df_competitions) > 0
WyscoutCompetitionSchema.validate(df_competitions)
def test_matches(self) -> None:
df_matches = self.WSL.games(28, 10078) # World Cup, 2018
assert len(df_matches) == 64
WyscoutGameSchema.validate(df_matches)
def test_teams(self) -> None:
df_teams = self.WSL.teams(2058007)
assert len(df_teams) == 2
WyscoutTeamSchema.validate(df_teams)
def test_players(self) -> None:
df_players = self.WSL.players(2058007)
assert len(df_players) == 26
assert df_players.minutes_played.sum() == 22 * 96
WyscoutPlayerSchema.validate(df_players)
def test_minutes_played(self) -> None:
# Injury time should be added
df_players = self.WSL.players(2058007).set_index("player_id")
assert df_players.at[122, "minutes_played"] == 66
assert df_players.at[8249, "minutes_played"] == 96 - 66
# Penalty shoot-outs should no be added
df_players = self.WSL.players(2058005).set_index("player_id")
assert df_players.minutes_played.sum() / 22 == 127
# COL - JAP: red card in '3
df_players = self.WSL.players(2057997).set_index("player_id")
assert df_players.at[26518, "minutes_played"] == 3
# GER - SWE: double yellow card in '82 + 2' injury time
df_players = self.WSL.players(2057986).set_index("player_id")
assert df_players.at[14716, "minutes_played"] == 84
def test_events(self) -> None:
df_events = self.WSL.events(2058007)
assert len(df_events) > 0
WyscoutEventSchema.validate(df_events)
class TestWyscoutLoader:
def setup_method(self) -> None:
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'datasets', 'wyscout_api')
feeds = {
'competitions': 'competitions.json',
'seasons': 'seasons_{competition_id}.json',
# "games": "matches_{season_id}.json",
'events': 'events_{game_id}.json',
}
self.WSL = wy.WyscoutLoader(root=data_dir, getter='local', feeds=feeds)
def test_competitions(self) -> None:
df_competitions = self.WSL.competitions()
assert len(df_competitions) > 0
WyscoutCompetitionSchema.validate(df_competitions)
def test_matches(self) -> None:
df_matches = self.WSL.games(10, 10174)
assert len(df_matches) == 1
WyscoutGameSchema.validate(df_matches)
def test_teams(self) -> None:
df_teams = self.WSL.teams(2852835)
assert len(df_teams) == 2
WyscoutTeamSchema.validate(df_teams)
def test_players(self) -> None:
df_players = self.WSL.players(2852835)
assert len(df_players) == 30
WyscoutPlayerSchema.validate(df_players)
def test_events(self) -> None:
df_events = self.WSL.events(2852835)
assert len(df_events) > 0
WyscoutEventSchema.validate(df_events)
| 36.556701 | 96 | 0.654258 |
7c7a0346e794b19ed578943f5ccb8ba49e761dc0
| 3,905 |
py
|
Python
|
ai_flow/runtime/job_runtime_util.py
|
SteNicholas/ai-flow
|
2c70547981f1516f0e37bbe6936a1b7cccd31822
|
[
"Apache-2.0"
] | null | null | null |
ai_flow/runtime/job_runtime_util.py
|
SteNicholas/ai-flow
|
2c70547981f1516f0e37bbe6936a1b7cccd31822
|
[
"Apache-2.0"
] | null | null | null |
ai_flow/runtime/job_runtime_util.py
|
SteNicholas/ai-flow
|
2c70547981f1516f0e37bbe6936a1b7cccd31822
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import time
from ai_flow.plugin_interface.scheduler_interface import JobExecutionInfo
from ai_flow.context.project_context import ProjectContext
from ai_flow.runtime.job_runtime_env import JobRuntimeEnv
def prepare_job_runtime_env(workflow_generated_dir,
workflow_name,
project_context: ProjectContext,
job_execution_info: JobExecutionInfo,
root_working_dir=None,
base_log_folder=None) -> JobRuntimeEnv:
"""
Prepare the operating environment for the ai flow job(ai_flow.workflow.job.Job)
:param workflow_generated_dir: The generated directory of workflow.
:param workflow_name: The name of the workflow(ai_flow.workflow.workflow.Workflow).
:param project_context: The context of the project which the job belongs.
:param job_execution_info: The information of the execution of the job.
:param root_working_dir: The working directory of the execution of the job(ai_flow.workflow.job.Job).
:param base_log_folder: The base folder of the logs.
:return: ai_flow.runtime.job_runtime_env.JobRuntimeEnv object.
"""
working_dir = os.path.join(root_working_dir,
workflow_name,
job_execution_info.job_name,
str(time.strftime("%Y%m%d%H%M%S", time.localtime())))
job_runtime_env: JobRuntimeEnv = JobRuntimeEnv(working_dir=working_dir,
job_execution_info=job_execution_info,
project_context=project_context,
base_log_folder=base_log_folder)
if not os.path.exists(working_dir):
os.makedirs(working_dir)
job_runtime_env.save_job_execution_info()
if not os.path.exists(job_runtime_env.log_dir):
os.makedirs(job_runtime_env.log_dir)
if not os.path.exists(os.path.dirname(job_runtime_env.workflow_dir)):
os.makedirs(os.path.dirname(job_runtime_env.workflow_dir))
if not os.path.exists(job_runtime_env.workflow_dir):
os.symlink(project_context.get_workflow_path(workflow_name=workflow_name),
job_runtime_env.workflow_dir)
if os.path.exists(project_context.get_generated_path()):
os.symlink(os.path.join(project_context.get_generated_path(),
workflow_generated_dir,
job_execution_info.job_name),
job_runtime_env.generated_dir)
if os.path.exists(project_context.get_resources_path()):
os.symlink(project_context.get_resources_path(), job_runtime_env.resource_dir)
if os.path.exists(project_context.get_dependencies_path()):
os.symlink(project_context.get_dependencies_path(), job_runtime_env.dependencies_dir)
os.symlink(project_context.get_project_config_file(), job_runtime_env.project_config_file)
return job_runtime_env
| 55.785714 | 105 | 0.680154 |
a6a60863aa381bd88c89131760c48a129334db8e
| 7,166 |
py
|
Python
|
analysis/create_plots/detect.py
|
shibaji7/clustering_superdarn_data
|
02bc31dd85f66319bb46b632e0e7ac51ed98c432
|
[
"BSD-3-Clause"
] | 1 |
2020-12-02T20:13:14.000Z
|
2020-12-02T20:13:14.000Z
|
analysis/create_plots/detect.py
|
shibaji7/clustering_superdarn_data
|
02bc31dd85f66319bb46b632e0e7ac51ed98c432
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/create_plots/detect.py
|
shibaji7/clustering_superdarn_data
|
02bc31dd85f66319bb46b632e0e7ac51ed98c432
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
sys.path.append("./")
sys.path.append("../")
sys.path.append("create_plots/")
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib.dates import num2date
import matplotlib.pyplot as plt
from scipy.stats import beta
import pickle
import pydarn
from pysolar.solar import get_altitude
import utils
import rad_fov
################################################
# Inputs date and radar name
################################################
Rad, Dn = "cvw", dt.datetime(2012,1,2)
LFS = "LFS/LFS_clustering_superdarn_data/"
gmm = True
a_name = "dbscan"
rads = [Rad]
dates = [Dn]
remove_file = False
maxGate = None
gate = 7
sza_thresh, wid = {"is":85., "gs":105.}, 1.
bin_thick = 1.
tag = False
scale = 1.
def get_data_pkl(rad, dn, conn):
local_file = "../data/%s_%s_scans.pickle"%(rad, dn.strftime("%Y-%m-%d"))
#utils.from_remote_FS(conn, local_file, LFS)
with open(local_file, "rb") as f: pkl = pickle.load(f)
dates, elvs = [], []
for d, e in zip(pkl["time"], pkl["elv"]):
dates.extend(num2date(d))
elvs.extend(e)
ox = pd.DataFrame()
ox["date"], ox["elevation"] = dates, elvs
return ox
def get_sza(row, lats, lons):
print(num2date(row.time), row.bmnum)
lat, lon = lats[int(row["bmnum"]), int(row["slist"])],\
lons[int(row["bmnum"]), int(row["slist"])]
dn = num2date(row["time"])
d = dn.replace(tzinfo=dt.timezone.utc)
sza = 90.-get_altitude(lat, lon, d)
return sza
def calculate_total_uncertainity(_is, _gs):
def calc_prob(m, l, th, low=True):
h, be = np.histogram(m, bins=l, density=True)
bc = np.diff(be)
idx = be[1:] < th if low else be[1:] > th
edges = (be[1:])[be[1:] < th] if low else (be[1:])[be[1:] > th]
pr = np.sum(h[idx]*bc[idx])
height = h[idx]
#return pr, h, be[1:], bc
return pr, height, edges, bc
L = len(np.unique(_is.sza)) if len(np.unique(_is.sza)) > len(np.unique(_gs.sza)) else len(np.unique(_gs.sza))
L = int(L/bin_thick)
pr_a, h_a, be_a, bc = calc_prob(_is.sza.tolist(), L, sza_thresh["is"], low=True)
pr_b, h_b, be_b, _ = calc_prob(_gs.sza.tolist(), L, sza_thresh["gs"], low=False)
out = []
for pa, pb, b in zip(h_a, h_b, bc):
if pa < pb: out.append(pa*b)
else: out.append(pb*b)
#prb = np.round(np.sum(out), 3)
prb = np.round(pr_a+pr_b, 3)
print("Total Uncertainty:", prb)
fig = plt.figure(figsize=(4, 4), dpi=100)
mpl.rcParams.update({"font.size": 10})
ax = fig.add_subplot(111)
ax.hist(_is.sza.tolist(), bins=L, histtype="step", color="red", density=True, alpha=0.5, label="IS")
ax.hist(_gs.sza.tolist(), bins=L, histtype="step", color="blue", density=True, alpha=0.5, label="GS")
ax.fill_between(be_a, y1=np.zeros(len(be_a)), y2=h_a, color="r", alpha=0.3, step="pre")
ax.fill_between(be_b, y1=np.zeros(len(be_b)), y2=h_b, color="b", alpha=0.3, step="pre")
#ax.fill_between(be_a, y1=np.zeros(len(be_a)), y2=out, color="violet", alpha=0.3, step="pre")
ax.legend(loc=1)
ax.axvline(sza_thresh["is"], color="r", ls="--", lw=0.6)
ax.axvline(sza_thresh["gs"], color="b", ls="--", lw=0.6)
ax.set_xlabel(r"SZA, $\chi$ ($^o$)")
ax.set_ylabel("Density of IS, GS")
ax.text(0.99, 1.03, r"$\theta$~ %.2f"%prb, ha="right", va="center", transform=ax.transAxes)
ax.text(0.01, 1.03, rads[0].upper()+", %s"%dates[0].strftime("%Y-%m-%d"), ha="left", va="center", transform=ax.transAxes)
png = "create_plots/images/detection_%s_%s.png"%(Rad, Dn.strftime("%Y%m%d"))
ax.set_ylim(0,.1)
if tag: png = png.replace(".png", ".missing.png")
fig.savefig(png, bbox_inches="tight")
return prb
def calculate_uq_elv(_is, _gs):
L = len(np.unique(_is.elv)) if len(np.unique(_is.elv)) > len(np.unique(_gs.elv)) else len(np.unique(_gs.elv))
L = int(L/5)
#pr_a, h_a, be_a, bc = calc_prob(_is.elv.tolist(), L, sza_thresh["is"], low=True)
#pr_b, h_b, be_b, _ = calc_prob(_gs.elv.tolist(), L, sza_thresh["gs"], low=False)
#out = []
#for pa, pb, b in zip(h_a, h_b, bc):
# if pa < pb: out.append(pa*b)
# else: out.append(pb*b)
#prb = np.round(np.sum(out), 3)
#prb = np.round(pr_a+pr_b, 3)
#print("Total Uncertainty:", prb)
fig = plt.figure(figsize=(4, 7), dpi=100)
mpl.rcParams.update({"font.size": 10})
ax = fig.add_subplot(211)
ax.set_xlabel(r"AoA, $\alpha$ ($^o$)")
ax.set_ylabel("Gate Location (IS)")
ax.hist2d(_is.elv.tolist(), _is.slist.tolist(), bins=(20,20), cmap=plt.cm.Greys, norm=mpl.colors.LogNorm())#, histtype="step", color="red", density=True, alpha=0.5, label="IS")
ax = fig.add_subplot(212)
ax.set_xlabel(r"AoA, $\alpha$ ($^o$)")
ax.set_ylabel("Gate Location (GS)")
ax.hist2d(_gs.elv.tolist(), _gs.slist.tolist(), bins=(20,20), cmap=plt.cm.Greys, norm=mpl.colors.LogNorm())#, histtype="step", color="blue", density=True, alpha=0.5, label="GS")
#ax.fill_between(be_a, y1=np.zeros(len(be_a)), y2=h_a, color="r", alpha=0.3, step="pre")
#ax.fill_between(be_b, y1=np.zeros(len(be_b)), y2=h_b, color="b", alpha=0.3, step="pre")
#ax.fill_between(be_a, y1=np.zeros(len(be_a)), y2=out, color="violet", alpha=0.3, step="pre")
#ax.legend(loc=1)
#ax.axvline(sza_thresh["is"], color="r", ls="--", lw=0.6)
#ax.axvline(sza_thresh["gs"], color="b", ls="--", lw=0.6)
ax.set_xlabel(r"AoA, $\alpha$ ($^o$)")
ax.set_ylabel("Gate Location")
#ax.text(0.99, 1.03, r"$\theta$~ %.2f"%prb, ha="right", va="center", transform=ax.transAxes)
#ax.text(0.01, 1.03, rads[0].upper()+", %s"%dates[0].strftime("%Y-%m-%d"), ha="left", va="center", transform=ax.transAxes)
png = "create_plots/images/uq_%s_%s.png"%(Rad, Dn.strftime("%Y%m%d"))
#ax.set_ylim(0,.1)
if tag: png = png.replace(".png", ".missing.png")
fig.savefig(png, bbox_inches="tight")
return
pubfile = utils.get_pubfile()
conn = utils.get_session(key_filename=pubfile)
if gmm: fname = "../outputs/figures_for_papers/{rad}.{a_name}.gmm.{dn}.csv"
else: fname = "../outputs/figures_for_papers/{rad}.{a_name}.{dn}.csv"
if tag: fname = fname.replace(".csv", ".missing.csv")
for rad, dn in zip(rads, dates):
ox = get_data_pkl(rad, dn, conn)
floc = fname.format(rad=rad, a_name=a_name, dn=dn.strftime("%Y%m%d"))
if not os.path.exists(floc): utils.fetch_file(conn, floc, LFS)
X = pd.read_csv(floc)
hdw = pydarn.read_hdw_file(rad)
egate = hdw.gates if not maxGate else maxGate
rfov = rad_fov.CalcFov(hdw=hdw, ngates=egate)
if "sza" not in X.columns:
X["sza"] = X.apply(lambda r: get_sza(r, rfov.latFull, rfov.lonFull), axis=1)
X.to_csv(floc)
X = utils._run_riberio_threshold_on_rad(X)
X["elv"] = ox.elevation
print(X.head())
conn.close()
if remove_file: os.system("rm -rf ../outputs/cluster_tags/*")
X.sza = (np.array(X.sza)/wid).astype(int)*wid
X = X[X.slist > gate]
X = X[["sza", "ribiero_gflg", "elv", "slist"]]
_is, _gs = X[X.ribiero_gflg==0], X[X.ribiero_gflg==1]
#Pr_ab = calculate_total_uncertainity(_is, _gs)
calculate_uq_elv(_is, _gs)
| 41.421965 | 181 | 0.611359 |
ad29833d6957aa81efada3975b1c470dfc2bd2e6
| 14,228 |
py
|
Python
|
integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py
|
coderanger/dagster
|
d3e323f8ed55cd906d6f44f19595348ea1580b2d
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py
|
coderanger/dagster
|
d3e323f8ed55cd906d6f44f19595348ea1580b2d
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py
|
coderanger/dagster
|
d3e323f8ed55cd906d6f44f19595348ea1580b2d
|
[
"Apache-2.0"
] | null | null | null |
# pylint doesn't know about pytest fixtures
# pylint: disable=unused-argument
import datetime
import os
import time
import boto3
import pytest
from dagster import DagsterEventType
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.test_utils import create_run_for_test
from dagster.utils import merge_dicts
from dagster.utils.yaml_utils import merge_yamls
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s.test import wait_for_job_and_get_raw_logs
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import (
ReOriginatedExternalPipelineForTest,
get_test_project_environments_path,
get_test_project_external_pipeline,
)
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
def get_celery_engine_config(dagster_docker_image, job_namespace):
return {
"execution": {
"celery-k8s": {
"config": {
"job_image": dagster_docker_image,
"job_namespace": job_namespace,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
}
}
},
}
def test_execute_on_celery_k8s_default( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "demo_pipeline_celery"
run = create_run_for_test(
dagster_instance, pipeline_name=pipeline_name, run_config=run_config, mode="default",
)
dagster_instance.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(get_test_project_external_pipeline(pipeline_name)),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def test_execute_subset_on_celery_k8s( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_subset.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "demo_pipeline_celery"
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
solids_to_execute={"count_letters"},
)
dagster_instance.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(get_test_project_external_pipeline(pipeline_name)),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def test_execute_on_celery_k8s_retry_pipeline( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml")]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "retry_pipeline"
run = create_run_for_test(
dagster_instance, pipeline_name=pipeline_name, run_config=run_config, mode="default",
)
dagster_instance.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(get_test_project_external_pipeline(pipeline_name)),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
stats = dagster_instance.get_run_stats(run.run_id)
assert stats.steps_succeeded == 1
assert DagsterEventType.STEP_START in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_UP_FOR_RETRY in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_RESTARTED in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_SUCCESS in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
def test_execute_on_celery_k8s_with_resource_requirements( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml"),]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "resources_limit_pipeline_celery"
run = create_run_for_test(
dagster_instance, pipeline_name=pipeline_name, run_config=run_config, mode="default",
)
dagster_instance.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(get_test_project_external_pipeline(pipeline_name)),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def _test_termination(dagster_instance, run_config):
pipeline_name = "resource_pipeline"
run = create_run_for_test(
dagster_instance, pipeline_name=pipeline_name, run_config=run_config, mode="default",
)
dagster_instance.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(get_test_project_external_pipeline(pipeline_name)),
)
assert isinstance(dagster_instance.run_launcher, CeleryK8sRunLauncher)
# Wait for pipeline run to start
timeout = datetime.timedelta(0, 120)
start_time = datetime.datetime.now()
can_terminate = False
while datetime.datetime.now() < start_time + timeout:
if dagster_instance.run_launcher.can_terminate(run_id=run.run_id):
can_terminate = True
break
time.sleep(5)
assert can_terminate
# Wait for step to start
step_start_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if (
event_record.dagster_event
and event_record.dagster_event.event_type == DagsterEventType.STEP_START
):
step_start_found = True
break
time.sleep(5)
assert step_start_found
# Terminate run
assert dagster_instance.run_launcher.can_terminate(run_id=run.run_id)
assert dagster_instance.run_launcher.terminate(run_id=run.run_id)
# Check that pipeline run is marked as failed
pipeline_run_status_failure = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.FAILURE:
pipeline_run_status_failure = True
break
time.sleep(5)
assert pipeline_run_status_failure
# Check that terminate cannot be called again
assert not dagster_instance.run_launcher.can_terminate(run_id=run.run_id)
assert not dagster_instance.run_launcher.terminate(run_id=run.run_id)
# Check for step failure and resource tear down
expected_events_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
step_failures_count = 0
resource_tear_down_count = 0
resource_init_count = 0
termination_request_count = 0
termination_success_count = 0
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if event_record.dagster_event:
if event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE:
step_failures_count += 1
elif event_record.dagster_event.event_type == DagsterEventType.ENGINE_EVENT:
if (
event_record.dagster_event.message
== "[CeleryK8sRunLauncher] Received pipeline termination request."
):
termination_request_count += 1
elif (
event_record.dagster_event.message
== "[CeleryK8sRunLauncher] Pipeline was terminated successfully."
):
termination_success_count += 1
elif event_record.message:
if "initializing s3_resource_with_context_manager" in event_record.message:
resource_init_count += 1
if "tearing down s3_resource_with_context_manager" in event_record.message:
resource_tear_down_count += 1
if (
step_failures_count == 1
and resource_init_count == 1
and resource_tear_down_count == 1
and termination_request_count == 2
and termination_success_count == 1
):
expected_events_found = True
break
time.sleep(5)
assert expected_events_found
s3 = boto3.resource("s3", region_name="us-west-1", use_ssl=True, endpoint_url=None).meta.client
bucket = "dagster-scratch-80542c2"
key = "resource_termination_test/{}".format(run.run_id)
assert s3.get_object(Bucket=bucket, Key=key)
def test_execute_on_celery_k8s_with_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml"),]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
_test_termination(dagster_instance, run_config)
@pytest.fixture(scope="function")
def set_dagster_k8s_pipeline_run_namespace_env(helm_namespace):
try:
old_value = os.getenv("DAGSTER_K8S_PIPELINE_RUN_NAMESPACE")
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = helm_namespace
yield
finally:
if old_value is not None:
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = old_value
def test_execute_on_celery_k8s_with_env_var_and_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml"),]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
)
_test_termination(dagster_instance, run_config)
def test_execute_on_celery_k8s_with_hard_failure( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env
):
run_config = merge_dicts(
merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml"),]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
),
{"solids": {"hard_fail_or_0": {"config": {"fail": True}}}},
)
pipeline_name = "hard_failer"
run = create_run_for_test(
dagster_instance, pipeline_name=pipeline_name, run_config=run_config, mode="default",
)
dagster_instance.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(get_test_project_external_pipeline(pipeline_name)),
)
assert isinstance(dagster_instance.run_launcher, CeleryK8sRunLauncher)
# Check that pipeline run is marked as failed
pipeline_run_status_failure = False
start_time = datetime.datetime.now()
timeout = datetime.timedelta(0, 120)
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.FAILURE:
pipeline_run_status_failure = True
break
time.sleep(5)
assert pipeline_run_status_failure
# Check for step failure for hard_fail_or_0.compute
start_time = datetime.datetime.now()
step_failure_found = False
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if event_record.dagster_event:
if (
event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE
and event_record.dagster_event.step_key == "hard_fail_or_0"
):
step_failure_found = True
break
time.sleep(5)
assert step_failure_found
| 36.670103 | 100 | 0.691032 |
1a0d9ba96d42971f51efaca127b411dd2c1a5ff9
| 3,056 |
py
|
Python
|
matcher/utils.py
|
aehrc/clinicaltrialsearch
|
03eb78923dd22c10c77257ffb8aeaf60b2f2c62d
|
[
"MIT"
] | null | null | null |
matcher/utils.py
|
aehrc/clinicaltrialsearch
|
03eb78923dd22c10c77257ffb8aeaf60b2f2c62d
|
[
"MIT"
] | null | null | null |
matcher/utils.py
|
aehrc/clinicaltrialsearch
|
03eb78923dd22c10c77257ffb8aeaf60b2f2c62d
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as etree
import re
import argparse
from typing import Dict, Union, Optional, List, Any
from collections import defaultdict
SPECIAL_MEDICAL_TERMS = ['covid', 'covid-19', 'coronavirus', 'corona']
def get_queries(query_file: str):
"""
Reads a Indri formatted query file.
:param query_file: An Indri formatted query file.
:return A dict of queryId -> queryTxt
"""
with open(query_file, 'r') as xml_file:
tree = etree.parse(xml_file)
queries = dict([ (query.findtext('number'), query.findtext('text')) for query in tree.getroot()])
return queries
def trec_pm_to_indri(query_file: str) -> Dict[str, str]:
"""
Takes a file in TREC PM format and converts to Dict[query_id -> query string]
"""
topic_year:str = re.findall('[0-9]{4}', query_file)[0] if re.findall('[0-9]{4}', query_file) else ''
queries: Dict[str, str] = {}
with open(query_file, 'r') as xml_file:
tree: etree.ElementTree = etree.parse(xml_file)
for query in tree.findall('topic'): #type: etree.Element
# query: etree.Element = query
qId = topic_year + query.attrib['number']
query_text: str = '. '.join([i.text for i in query.iter() if i.text.strip() and i.text.strip() != 'None'])
queries[qId] = query_text
return queries
def get_rankings(ranking_file: str):
"""
Reads a TREC style ranking file and returns as dict.
:param ranking_file: TREC style ranking file.
:return Dict of queryId -> [d_0, ..., d_n]
"""
# rankings: Dict[str, Optional[List[Any]]] = {}
rankings = defaultdict(list)
with open(ranking_file) as fh:
for line in fh:
qid, zero, doc, rank, score, runId = line.strip().split()
rankings[qid].append((doc, int(rank), float(score), runId))
return rankings
def format_trec_results(qid: str, doc: str, rank: int, score: float, run_id='RunId'):
"""
Produce a TREC formatted str of results.
:param qid: Query Id
:param doc: Document
:param rank: Rank position
:param score: Ranking score
:param run_id: Name for this run
:return String in TREC format
"""
return '{}\t0\t{}\t{}\t{:.4f}\t{}'.format(qid, doc, rank, float(score), run_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Helper utils for matcher.')
parser.add_argument('-q', '--query_file', help='Print queries.')
parser.add_argument('-t', '--trec_pm', help='Convert TREC PM to Indri format for matcher.')
args = parser.parse_args()
if args.query_file:
for qId, text in get_queries(args.query_file).items():
print('{}: {}'.format(qId, text))
elif args.trec_pm:
print('<queries>')
for qid, query_txt in trec_pm_to_indri(args.trec_pm).items():
print('<query>')
print('<number>{}</number>'.format(qid))
print('<text>{}</text>'.format(query_txt))
print('</query>')
print('</queries>')
| 36.819277 | 118 | 0.622382 |
21acb432c48fc6fb4dd56ea2ef6aa2903b327314
| 1,746 |
py
|
Python
|
django/aihackingbad/videos/views.py
|
GauravB159/Recommend
|
1bfc1826239e1fe2ccdab7d75f3a4320a70290db
|
[
"MIT"
] | null | null | null |
django/aihackingbad/videos/views.py
|
GauravB159/Recommend
|
1bfc1826239e1fe2ccdab7d75f3a4320a70290db
|
[
"MIT"
] | null | null | null |
django/aihackingbad/videos/views.py
|
GauravB159/Recommend
|
1bfc1826239e1fe2ccdab7d75f3a4320a70290db
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
import uuid, json ,os,time
# Create your views here.
def index(request):
# return HttpResponse('videos')
return render(request, 'videos/index.html')
def videoGen(category):
path = 'data/videos/'+category+'/';
baseUrl='http://127.0.0.1:8000/media/'
fileList = os.listdir(path)
fileList = [os.path.join(path,i) for i in fileList]
videoUrls = sorted(fileList,reverse=True)
videoUrls = [baseUrl + s for s in videoUrls]
videoUrls = [s[:27] + s[32:] for s in videoUrls]
videoUrls = [category]+videoUrls
return videoUrls
def upload(request):
print(request)
if(request.method == 'POST'):
file = request.FILES['video']
fileName = str(int(time.time()))+'.mp4'
filePath = 'data/temp/'
with open(filePath + fileName, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
category = classify()
os.rename('./data/temp/'+fileName,'./data/videos/'+category+'/'+fileName)
if(category == 'none'):
os.remove('./data/temp/'+fileName)
return HttpResponse(json.dumps(['none']),content_type='application/json')
return HttpResponse(json.dumps(category),content_type='application/json')
else:
category=request.GET.get('category','')
if(request.GET.get('category') != 'all'):
videoUrls=videoGen(category)
print(videoUrls)
return HttpResponse(json.dumps(videoUrls), content_type='application/json')
else:
arr=['sports','dance','cookery']
videoUrls=[]
for i in arr:
videoUrls+=videoGen(i)[1:]
print(videoUrls,'...')
videoUrls.reverse()
print(videoUrls,'.,.,.,.,')
return HttpResponse(json.dumps(videoUrls), content_type='application/json')
# classify
def classify():
return 'sports'
| 29.1 | 77 | 0.700458 |
76802d81407fc97c7ce3c8cd3d3fd8b34784deec
| 1,310 |
py
|
Python
|
app/core/test/test_admin.py
|
Umutbek/crm-food
|
96a59b2624aef46b9c4fa250812626948edb4d88
|
[
"MIT"
] | null | null | null |
app/core/test/test_admin.py
|
Umutbek/crm-food
|
96a59b2624aef46b9c4fa250812626948edb4d88
|
[
"MIT"
] | null | null | null |
app/core/test/test_admin.py
|
Umutbek/crm-food
|
96a59b2624aef46b9c4fa250812626948edb4d88
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='test123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='test123',
name='Test user full name'
)
def test_users_listed(self):
""" Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.190476 | 68 | 0.636641 |
33938c9651069841dddc01e6634e7715b459ead7
| 2,688 |
py
|
Python
|
lib/web/exceptions.py
|
kpark1/dynamo
|
cb713fac5438b125b6bd05cbe38316fde5868a18
|
[
"MIT"
] | 1 |
2018-08-02T03:06:27.000Z
|
2018-08-02T03:06:27.000Z
|
lib/web/exceptions.py
|
kpark1/dynamo
|
cb713fac5438b125b6bd05cbe38316fde5868a18
|
[
"MIT"
] | 16 |
2017-11-24T21:09:26.000Z
|
2019-05-14T15:13:57.000Z
|
lib/web/exceptions.py
|
kpark1/dynamo
|
cb713fac5438b125b6bd05cbe38316fde5868a18
|
[
"MIT"
] | 11 |
2016-08-03T10:37:31.000Z
|
2018-08-21T14:32:25.000Z
|
class MissingParameter(Exception):
"""Raise if request is missing a required parameter."""
def __init__(self, param_name, context = None):
self.param_name = param_name
self.context = context
def __str__(self):
msg = 'Missing required parameter "%s"' % self.param_name
if self.context is not None:
msg += ' in %s' % self.context
msg += '.\n'
return msg
class ExtraParameter(Exception):
"""Raise if there is an excess parameter."""
def __init__(self, param_name, context = None):
"""
@param param_name A string of a list
@param context
"""
self.param_name = param_name
self.context = context
def __str__(self):
if type(self.param_name) is list:
msg = 'Parameters %s not expected' % str(self.param_name)
else:
msg = 'Parameter "%s" not expected' % str(self.param_name)
if self.context is not None:
msg += ' in %s' % self.context
msg += '.\n'
return msg
class IllFormedRequest(Exception):
"""Raise if a request parameter value does not conform to a format."""
def __init__(self, param_name, value, hint = None, allowed = None):
self.param_name = param_name
self.value = value
self.hint = hint
self.allowed = allowed
def __str__(self):
msg = 'Parameter "%s" has illegal value "%s".' % (self.param_name, self.value)
if self.hint is not None:
msg += ' ' + self.hint + '.'
if self.allowed is not None:
msg += ' Allowed values: [%s]' % ['"%s"' % v for v in self.allowed]
msg += '\n'
return msg
class InvalidRequest(Exception):
"""Raise if the request values are invalid."""
def __str__(self):
if len(self.args) != 0:
return self.args[0] + '.\n'
else:
return 'InvalidRequest\n'
class AuthorizationError(Exception):
"""Raise if the user is not authorized for the request."""
def __str__(self):
return 'User not authorized to perform the request.\n'
class ResponseDenied(Exception):
"""Raise when there is nothing technically wrong but response is denied (e.g. return string too long)"""
def __str__(self):
return 'Server denied response due to: %s\n' % str(self)
class TryAgain(Exception):
"""Raise when the request cannot be served temporarily."""
def __str__(self, message = None):
if message is None:
return 'Server temporarily not available. Please try again in a few moments.'
else:
return 'Server temporarily not available: ' + message
| 34.025316 | 108 | 0.60119 |
ded4a5fa8c826c8a05b1dd5e86c5ae42611f5785
| 1,253 |
py
|
Python
|
gozokia/i_o/input.py
|
avara1986/gozokia
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
[
"MIT-0",
"MIT"
] | null | null | null |
gozokia/i_o/input.py
|
avara1986/gozokia
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
[
"MIT-0",
"MIT"
] | null | null | null |
gozokia/i_o/input.py
|
avara1986/gozokia
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
[
"MIT-0",
"MIT"
] | null | null | null |
import sys
from .io_base import InputBase
from .io_voice import VoiceRecognizerMixin
from gozokia.conf import settings
class InputTerminalText(InputBase):
def listen(self, *args, **kwargs):
super(InputTerminalText, self).listen(*args, **kwargs)
"""
Normalize reading input between python 2 and 3.
'raw_input' is just 'input' in python3
"""
if sys.version_info[0] < 3:
input_text = str(raw_input("> "))
else:
input_text = input("> ")
return input_text
class InputValue(InputBase):
def listen(self, *args, **kwargs):
super(InputValue, self).listen(*args, **kwargs)
if 'value' in kwargs:
return kwargs.pop('value')
return ""
class InputTerminalVoice(InputBase, VoiceRecognizerMixin):
def __init__(self, *args, **kwargs):
super(InputTerminalVoice, self).__init__(*args, **kwargs)
self.set_voice_recognizer()
def listen(self, *args, **kwargs):
language = kwargs.get('language', settings.GOZOKIA_LANGUAGE)
super(InputTerminalVoice, self).listen(*args, **kwargs)
self.set_voice_recognizer()
input_result = self.listen_audio(language)
return input_result
| 29.139535 | 68 | 0.64166 |
6e8482d99a7cf6fbdf23dba1b17bde54c69208ae
| 817 |
py
|
Python
|
backend/migrations/0026_auto_20190928_2138.py
|
Raniac/NEURO-LEARN
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
[
"Apache-2.0"
] | 8 |
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
backend/migrations/0026_auto_20190928_2138.py
|
Raniac/neurolearn_dev
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
[
"Apache-2.0"
] | 12 |
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
backend/migrations/0026_auto_20190928_2138.py
|
Raniac/NEURO-LEARN
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
[
"Apache-2.0"
] | 1 |
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
# Generated by Django 2.1.7 on 2019-09-28 13:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0025_auto_20190926_2049'),
]
operations = [
migrations.RenameModel(
old_name='Data_Demo',
new_name='Data_Old',
),
migrations.RenameModel(
old_name='Projects_Demo',
new_name='Projects_Old',
),
migrations.RenameModel(
old_name='Submissions_Demo',
new_name='Submissions_Old',
),
migrations.RenameModel(
old_name='Submissions_SA_Demo',
new_name='Submissions_SA_Old',
),
migrations.RenameModel(
old_name='User_Demo',
new_name='User_Old',
),
]
| 24.029412 | 47 | 0.561812 |
692c6134783df3fcbd3a1574bfb27054eeba5062
| 1,082 |
py
|
Python
|
setup.py
|
Photonios/py-momit-cool
|
b702f53c3f5d16f02c3d7fd95c0d980f2683271b
|
[
"MIT"
] | 2 |
2018-09-05T17:17:26.000Z
|
2019-03-26T18:38:41.000Z
|
setup.py
|
Photonios/py-momit-cool
|
b702f53c3f5d16f02c3d7fd95c0d980f2683271b
|
[
"MIT"
] | 2 |
2018-09-05T05:41:19.000Z
|
2020-01-05T14:45:39.000Z
|
setup.py
|
Photonios/py-momit-cool
|
b702f53c3f5d16f02c3d7fd95c0d980f2683271b
|
[
"MIT"
] | 1 |
2019-03-26T18:38:46.000Z
|
2019-03-26T18:38:46.000Z
|
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
setup(
name='py-momit-cool-remote',
version='1.1',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='Library for remotely interfacing with the Momit Cool air conditioning controller.',
long_description=README,
url='https://github.com/Photonios/py-momit-cool-remote',
author='Swen Kooij',
author_email='[email protected]',
keywords=['momit', 'cool', 'remote', 'control'],
entry_points={
'console_scripts': [
'momit-cool=momitcool.cli:main'
]
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| 30.914286 | 100 | 0.634011 |
475df6c915b0228b10ae229a8ea6f0937e5f61a0
| 67,374 |
py
|
Python
|
taco/full_node/weight_proof.py
|
grayfallstown/taco-blockchain
|
6196d73af7a982301a0a1ad5c1bed836b2f95a8d
|
[
"Apache-2.0"
] | 18 |
2021-07-14T09:56:37.000Z
|
2022-02-09T04:32:58.000Z
|
taco/full_node/weight_proof.py
|
grayfallstown/taco-blockchain
|
6196d73af7a982301a0a1ad5c1bed836b2f95a8d
|
[
"Apache-2.0"
] | 9 |
2021-07-14T15:48:28.000Z
|
2021-10-10T02:32:59.000Z
|
taco/full_node/weight_proof.py
|
grayfallstown/taco-blockchain
|
6196d73af7a982301a0a1ad5c1bed836b2f95a8d
|
[
"Apache-2.0"
] | 10 |
2021-07-18T03:22:43.000Z
|
2022-03-15T08:40:06.000Z
|
import asyncio
import dataclasses
import logging
import math
import random
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Tuple
from taco.consensus.block_header_validation import validate_finished_header_block
from taco.consensus.block_record import BlockRecord
from taco.consensus.blockchain_interface import BlockchainInterface
from taco.consensus.constants import ConsensusConstants
from taco.consensus.deficit import calculate_deficit
from taco.consensus.full_block_to_block_record import header_block_to_sub_block_record
from taco.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from taco.consensus.vdf_info_computation import get_signage_point_vdf_info
from taco.types.blockchain_format.classgroup import ClassgroupElement
from taco.types.blockchain_format.sized_bytes import bytes32
from taco.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot
from taco.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from taco.types.blockchain_format.vdf import VDFInfo
from taco.types.end_of_slot_bundle import EndOfSubSlotBundle
from taco.types.header_block import HeaderBlock
from taco.types.weight_proof import (
SubEpochChallengeSegment,
SubEpochData,
SubSlotData,
WeightProof,
SubEpochSegments,
RecentChainData,
)
from taco.util.block_cache import BlockCache
from taco.util.hash import std_hash
from taco.util.ints import uint8, uint32, uint64, uint128
from taco.util.streamable import dataclass_from_dict, recurse_jsonify
log = logging.getLogger(__name__)
class WeightProofHandler:
LAMBDA_L = 100
C = 0.5
MAX_SAMPLES = 20
def __init__(
self,
constants: ConsensusConstants,
blockchain: BlockchainInterface,
):
self.tip: Optional[bytes32] = None
self.proof: Optional[WeightProof] = None
self.constants = constants
self.blockchain = blockchain
self.lock = asyncio.Lock()
async def get_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("unknown tip")
return None
if tip_rec.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
log.debug("chain to short for weight proof")
return None
async with self.lock:
if self.proof is not None:
if self.proof.recent_chain_data[-1].header_hash == tip:
return self.proof
wp = await self._create_proof_of_weight(tip)
if wp is None:
return None
self.proof = wp
self.tip = tip
return wp
def get_sub_epoch_data(self, tip_height: uint32, summary_heights: List[uint32]) -> List[SubEpochData]:
sub_epoch_data: List[SubEpochData] = []
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_height:
break
ses = self.blockchain.get_ses(ses_height)
log.debug(f"handle sub epoch summary {sub_epoch_n} at height: {ses_height} ses {ses}")
sub_epoch_data.append(_create_sub_epoch_data(ses))
return sub_epoch_data
async def _create_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
"""
Creates a weight proof object
"""
assert self.blockchain is not None
sub_epoch_segments: List[SubEpochChallengeSegment] = []
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("failed not tip in cache")
return None
log.info(f"create weight proof peak {tip} {tip_rec.height}")
recent_chain = await self._get_recent_chain(tip_rec.height)
if recent_chain is None:
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
sub_epoch_data = self.get_sub_epoch_data(tip_rec.height, summary_heights)
# use second to last ses as seed
seed = self.get_seed_for_proof(summary_heights, tip_rec.height)
rng = random.Random(seed)
weight_to_check = _get_weights_for_sampling(rng, tip_rec.weight, recent_chain)
sample_n = 0
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_rec.height:
break
# if we have enough sub_epoch samples, dont sample
if sample_n >= self.MAX_SAMPLES:
log.debug("reached sampled sub epoch cap")
break
# sample sub epoch
# next sub block
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
if _sample_sub_epoch(prev_ses_block.weight, ses_block.weight, weight_to_check): # type: ignore
sample_n += 1
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(
f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} "
)
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
log.debug(f"sub epoch {sub_epoch_n} has {len(segments)} segments")
sub_epoch_segments.extend(segments)
prev_ses_block = ses_block
log.debug(f"sub_epochs: {len(sub_epoch_data)}")
return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain)
def get_seed_for_proof(self, summary_heights: List[uint32], tip_height) -> bytes32:
count = 0
ses = None
for sub_epoch_n, ses_height in enumerate(reversed(summary_heights)):
if ses_height <= tip_height:
count += 1
if count == 2:
ses = self.blockchain.get_ses(ses_height)
break
assert ses is not None
seed = ses.get_hash()
return seed
async def _get_recent_chain(self, tip_height: uint32) -> Optional[List[HeaderBlock]]:
recent_chain: List[HeaderBlock] = []
ses_heights = self.blockchain.get_ses_heights()
min_height = 0
count_ses = 0
for ses_height in reversed(ses_heights):
if ses_height <= tip_height:
count_ses += 1
if count_ses == 2:
min_height = ses_height - 1
break
log.debug(f"start {min_height} end {tip_height}")
headers = await self.blockchain.get_header_blocks_in_range(min_height, tip_height, tx_filter=False)
blocks = await self.blockchain.get_block_records_in_range(min_height, tip_height)
ses_count = 0
curr_height = tip_height
blocks_n = 0
while ses_count < 2:
if curr_height == 0:
break
# add to needed reward chain recent blocks
header_block = headers[self.blockchain.height_to_hash(curr_height)]
block_rec = blocks[header_block.header_hash]
if header_block is None:
log.error("creating recent chain failed")
return None
recent_chain.insert(0, header_block)
if block_rec.sub_epoch_summary_included:
ses_count += 1
curr_height = uint32(curr_height - 1) # type: ignore
blocks_n += 1
header_block = headers[self.blockchain.height_to_hash(curr_height)]
recent_chain.insert(0, header_block)
log.info(
f"recent chain, "
f"start: {recent_chain[0].reward_chain_block.height} "
f"end: {recent_chain[-1].reward_chain_block.height} "
)
return recent_chain
async def create_prev_sub_epoch_segments(self):
log.debug("create prev sub_epoch_segments")
heights = self.blockchain.get_ses_heights()
if len(heights) < 3:
return None
count = len(heights) - 2
ses_sub_block = self.blockchain.height_to_block_record(heights[-2])
prev_ses_sub_block = self.blockchain.height_to_block_record(heights[-3])
assert prev_ses_sub_block.sub_epoch_summary_included is not None
segments = await self.__create_sub_epoch_segments(ses_sub_block, prev_ses_sub_block, uint32(count))
assert segments is not None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_sub_block.header_hash, segments)
log.debug("sub_epoch_segments done")
return None
async def create_sub_epoch_segments(self):
log.debug("check segments in db")
"""
Creates a weight proof object
"""
assert self.blockchain is not None
peak_height = self.blockchain.get_peak_height()
if peak_height is None:
log.error("no peak yet")
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
log.debug(f"check db for sub epoch {sub_epoch_n}")
if ses_height > peak_height:
break
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
await self.__create_persist_segment(prev_ses_block, ses_block, ses_height, sub_epoch_n)
prev_ses_block = ses_block
await asyncio.sleep(2)
log.debug("done checking segments")
return None
async def __create_persist_segment(self, prev_ses_block, ses_block, ses_height, sub_epoch_n):
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} ")
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
async def __create_sub_epoch_segments(
self, ses_block: BlockRecord, se_start: BlockRecord, sub_epoch_n: uint32
) -> Optional[List[SubEpochChallengeSegment]]:
segments: List[SubEpochChallengeSegment] = []
start_height = await self.get_prev_two_slots_height(se_start)
blocks = await self.blockchain.get_block_records_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS
)
header_blocks = await self.blockchain.get_header_blocks_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS, tx_filter=False
)
curr: Optional[HeaderBlock] = header_blocks[se_start.header_hash]
height = se_start.height
assert curr is not None
first = True
idx = 0
while curr.height < ses_block.height:
if blocks[curr.header_hash].is_challenge_block(self.constants):
log.debug(f"challenge segment {idx}, starts at {curr.height} ")
seg, height = await self._create_challenge_segment(curr, sub_epoch_n, header_blocks, blocks, first)
if seg is None:
log.error(f"failed creating segment {curr.header_hash} ")
return None
segments.append(seg)
idx += 1
first = False
else:
height = height + uint32(1) # type: ignore
curr = header_blocks[self.blockchain.height_to_hash(height)]
if curr is None:
return None
log.debug(f"next sub epoch starts at {height}")
return segments
async def get_prev_two_slots_height(self, se_start: BlockRecord) -> uint32:
# find prev 2 slots height
slot = 0
batch_size = 50
curr_rec = se_start
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
while slot < 2 and curr_rec.height > 0:
if curr_rec.first_in_sub_slot:
slot += 1
if end - curr_rec.height == batch_size - 1:
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
curr_rec = blocks[self.blockchain.height_to_hash(uint32(curr_rec.height - 1))]
return curr_rec.height
async def _create_challenge_segment(
self,
header_block: HeaderBlock,
sub_epoch_n: uint32,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_segment_in_sub_epoch: bool,
) -> Tuple[Optional[SubEpochChallengeSegment], uint32]:
assert self.blockchain is not None
sub_slots: List[SubSlotData] = []
log.debug(f"create challenge segment block {header_block.header_hash} block height {header_block.height} ")
# VDFs from sub slots before challenge block
first_sub_slots, first_rc_end_of_slot_vdf = await self.__first_sub_slot_vdfs(
header_block, header_blocks, blocks, first_segment_in_sub_epoch
)
if first_sub_slots is None:
log.error("failed building first sub slots")
return None, uint32(0)
sub_slots.extend(first_sub_slots)
ssd = await _challenge_block_vdfs(
self.constants,
header_block,
blocks[header_block.header_hash],
blocks,
)
sub_slots.append(ssd)
# # VDFs from slot after challenge block to end of slot
log.debug(f"create slot end vdf for block {header_block.header_hash} height {header_block.height} ")
challenge_slot_end_sub_slots, end_height = await self.__slot_end_vdf(
uint32(header_block.height + 1), header_blocks, blocks
)
if challenge_slot_end_sub_slots is None:
log.error("failed building slot end ")
return None, uint32(0)
sub_slots.extend(challenge_slot_end_sub_slots)
if first_segment_in_sub_epoch and sub_epoch_n != 0:
return (
SubEpochChallengeSegment(sub_epoch_n, sub_slots, first_rc_end_of_slot_vdf),
end_height,
)
return SubEpochChallengeSegment(sub_epoch_n, sub_slots, None), end_height
# returns a challenge chain vdf from slot start to signage point
async def __first_sub_slot_vdfs(
self,
header_block: HeaderBlock,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_in_sub_epoch: bool,
) -> Tuple[Optional[List[SubSlotData]], Optional[VDFInfo]]:
# combine cc vdfs of all reward blocks from the start of the sub slot to end
header_block_sub_rec = blocks[header_block.header_hash]
# find slot start
curr_sub_rec = header_block_sub_rec
first_rc_end_of_slot_vdf = None
if first_in_sub_epoch and curr_sub_rec.height > 0:
while not curr_sub_rec.sub_epoch_summary_included:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks)
else:
if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot:
sub_slots_num = 2
while sub_slots_num > 0 and curr_sub_rec.height > 0:
if curr_sub_rec.first_in_sub_slot:
assert curr_sub_rec.finished_challenge_slot_hashes is not None
sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes)
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
else:
while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
curr = header_blocks[curr_sub_rec.header_hash]
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while curr.height < header_block.height:
if curr is None:
log.error("failed fetching block")
return None, None
if curr.first_in_sub_slot:
# if not blue boxed
if not blue_boxed_end_of_slot(curr.finished_sub_slots[0]):
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(curr.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
tmp_sub_slots_data = []
ssd = SubSlotData(
None,
None,
None,
None,
None,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
curr.reward_chain_block.infused_challenge_chain_ip_vdf,
curr.total_iters,
)
tmp_sub_slots_data.append(ssd)
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(header_block.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
return sub_slots_data, first_rc_end_of_slot_vdf
def first_rc_end_of_slot_vdf(
self,
header_block,
blocks: Dict[bytes32, BlockRecord],
header_blocks: Dict[bytes32, HeaderBlock],
) -> Optional[VDFInfo]:
curr = blocks[header_block.header_hash]
while curr.height > 0 and not curr.sub_epoch_summary_included:
curr = blocks[curr.prev_hash]
return header_blocks[curr.header_hash].finished_sub_slots[-1].reward_chain.end_of_slot_vdf
async def __slot_end_vdf(
self, start_height: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord]
) -> Tuple[Optional[List[SubSlotData]], uint32]:
# gets all vdfs first sub slot after challenge block to last sub slot
log.debug(f"slot end vdf start height {start_height}")
curr = header_blocks[self.blockchain.height_to_hash(start_height)]
curr_header_hash = curr.header_hash
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while not blocks[curr_header_hash].is_challenge_block(self.constants):
if curr.first_in_sub_slot:
sub_slots_data.extend(tmp_sub_slots_data)
curr_prev_header_hash = curr.prev_header_hash
# add collected vdfs
for idx, sub_slot in enumerate(curr.finished_sub_slots):
prev_rec = blocks[curr_prev_header_hash]
eos_vdf_iters = prev_rec.sub_slot_iters
if idx == 0:
eos_vdf_iters = uint64(prev_rec.sub_slot_iters - prev_rec.ip_iters(self.constants))
sub_slots_data.append(handle_end_of_slot(sub_slot, eos_vdf_iters))
tmp_sub_slots_data = []
tmp_sub_slots_data.append(self.handle_block_vdfs(curr, blocks))
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
curr_header_hash = curr.header_hash
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
log.debug(f"slot end vdf end height {curr.height} slots {len(sub_slots_data)} ")
return sub_slots_data, curr.height
def handle_block_vdfs(self, curr: HeaderBlock, blocks: Dict[bytes32, BlockRecord]):
cc_sp_proof = None
icc_ip_proof = None
cc_sp_info = None
icc_ip_info = None
block_record = blocks[curr.header_hash]
if curr.infused_challenge_chain_ip_proof is not None:
assert curr.reward_chain_block.infused_challenge_chain_ip_vdf
icc_ip_proof = curr.infused_challenge_chain_ip_proof
icc_ip_info = curr.reward_chain_block.infused_challenge_chain_ip_vdf
if curr.challenge_chain_sp_proof is not None:
assert curr.reward_chain_block.challenge_chain_sp_vdf
cc_sp_vdf_info = curr.reward_chain_block.challenge_chain_sp_vdf
if not curr.challenge_chain_sp_proof.normalized_to_identity:
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
self.constants,
curr.finished_sub_slots,
block_record.overflow,
None if curr.height == 0 else blocks[curr.prev_header_hash],
BlockCache(blocks),
block_record.sp_total_iters(self.constants),
block_record.sp_iters(self.constants),
)
cc_sp_vdf_info = VDFInfo(
curr.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
curr.reward_chain_block.challenge_chain_sp_vdf.output,
)
cc_sp_proof = curr.challenge_chain_sp_proof
cc_sp_info = cc_sp_vdf_info
return SubSlotData(
None,
cc_sp_proof,
curr.challenge_chain_ip_proof,
icc_ip_proof,
cc_sp_info,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
icc_ip_info,
curr.total_iters,
)
def validate_weight_proof_single_proc(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed sub epoch data validation")
return False, uint32(0)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
log.info("validate sub epoch challenge segments")
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0)
if not _validate_sub_epoch_segments(constants, rng, wp_segment_bytes, summary_bytes):
return False, uint32(0)
log.info("validate weight proof recent blocks")
if not _validate_recent_blocks(constants, wp_recent_chain_bytes, summary_bytes):
return False, uint32(0)
return True, self.get_fork_point(summaries)
def get_fork_point_no_validations(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
log.debug("get fork point skip validations")
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed to validate sub epoch summaries")
return False, uint32(0)
return True, self.get_fork_point(summaries)
async def validate_weight_proof(self, weight_proof: WeightProof) -> Tuple[bool, uint32, List[SubEpochSummary]]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0), []
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.error("weight proof failed sub epoch data validation")
return False, uint32(0), []
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0), []
executor = ProcessPoolExecutor(1)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
segment_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_sub_epoch_segments, constants, rng, wp_segment_bytes, summary_bytes
)
recent_blocks_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_recent_blocks, constants, wp_recent_chain_bytes, summary_bytes
)
valid_segment_task = segment_validation_task
valid_recent_blocks_task = recent_blocks_validation_task
valid_recent_blocks = await valid_recent_blocks_task
if not valid_recent_blocks:
log.error("failed validating weight proof recent blocks")
return False, uint32(0), []
valid_segments = await valid_segment_task
if not valid_segments:
log.error("failed validating weight proof sub epoch segments")
return False, uint32(0), []
return True, self.get_fork_point(summaries), summaries
def get_fork_point(self, received_summaries: List[SubEpochSummary]) -> uint32:
# iterate through sub epoch summaries to find fork point
fork_point_index = 0
ses_heights = self.blockchain.get_ses_heights()
for idx, summary_height in enumerate(ses_heights):
log.debug(f"check summary {idx} height {summary_height}")
local_ses = self.blockchain.get_ses(summary_height)
if idx == len(received_summaries) - 1:
# end of wp summaries, local chain is longer or equal to wp chain
break
if local_ses is None or local_ses.get_hash() != received_summaries[idx].get_hash():
break
fork_point_index = idx
if fork_point_index > 2:
# Two summeries can have different blocks and still be identical
# This gets resolved after one full sub epoch
height = ses_heights[fork_point_index - 2]
else:
height = uint32(0)
return height
def _get_weights_for_sampling(
rng: random.Random, total_weight: uint128, recent_chain: List[HeaderBlock]
) -> Optional[List[uint128]]:
weight_to_check = []
last_l_weight = recent_chain[-1].reward_chain_block.weight - recent_chain[0].reward_chain_block.weight
delta = last_l_weight / total_weight
prob_of_adv_succeeding = 1 - math.log(WeightProofHandler.C, delta)
if prob_of_adv_succeeding <= 0:
return None
queries = -WeightProofHandler.LAMBDA_L * math.log(2, prob_of_adv_succeeding)
for i in range(int(queries) + 1):
u = rng.random()
q = 1 - delta ** u
# todo check division and type conversions
weight = q * float(total_weight)
weight_to_check.append(uint128(int(weight)))
weight_to_check.sort()
return weight_to_check
def _sample_sub_epoch(
start_of_epoch_weight: uint128,
end_of_epoch_weight: uint128,
weight_to_check: List[uint128],
) -> bool:
"""
weight_to_check: List[uint128] is expected to be sorted
"""
if weight_to_check is None:
return True
if weight_to_check[-1] < start_of_epoch_weight:
return False
if weight_to_check[0] > end_of_epoch_weight:
return False
choose = False
for weight in weight_to_check:
if weight > end_of_epoch_weight:
return False
if start_of_epoch_weight < weight < end_of_epoch_weight:
log.debug(f"start weight: {start_of_epoch_weight}")
log.debug(f"weight to check {weight}")
log.debug(f"end weight: {end_of_epoch_weight}")
choose = True
break
return choose
# wp creation methods
def _create_sub_epoch_data(
sub_epoch_summary: SubEpochSummary,
) -> SubEpochData:
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
# Number of subblocks overflow in previous slot
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
# New work difficulty and iterations per sub-slot
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
async def _challenge_block_vdfs(
constants: ConsensusConstants,
header_block: HeaderBlock,
block_rec: BlockRecord,
sub_blocks: Dict[bytes32, BlockRecord],
):
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
constants,
header_block.finished_sub_slots,
block_rec.overflow,
None if header_block.height == 0 else sub_blocks[header_block.prev_header_hash],
BlockCache(sub_blocks),
block_rec.sp_total_iters(constants),
block_rec.sp_iters(constants),
)
cc_sp_info = None
if header_block.reward_chain_block.challenge_chain_sp_vdf:
cc_sp_info = header_block.reward_chain_block.challenge_chain_sp_vdf
assert header_block.challenge_chain_sp_proof
if not header_block.challenge_chain_sp_proof.normalized_to_identity:
cc_sp_info = VDFInfo(
header_block.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
ssd = SubSlotData(
header_block.reward_chain_block.proof_of_space,
header_block.challenge_chain_sp_proof,
header_block.challenge_chain_ip_proof,
None,
cc_sp_info,
header_block.reward_chain_block.signage_point_index,
None,
None,
None,
None,
header_block.reward_chain_block.challenge_chain_ip_vdf,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
block_rec.total_iters,
)
return ssd
def handle_finished_slots(end_of_slot: EndOfSubSlotBundle, icc_end_of_slot_info):
return SubSlotData(
None,
None,
None,
None,
None,
None,
None
if end_of_slot.proofs.challenge_chain_slot_proof is None
else end_of_slot.proofs.challenge_chain_slot_proof,
None
if end_of_slot.proofs.infused_challenge_chain_slot_proof is None
else end_of_slot.proofs.infused_challenge_chain_slot_proof,
end_of_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
icc_end_of_slot_info,
None,
None,
None,
)
def handle_end_of_slot(
sub_slot: EndOfSubSlotBundle,
eos_vdf_iters: uint64,
):
assert sub_slot.infused_challenge_chain
assert sub_slot.proofs.infused_challenge_chain_slot_proof
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
else:
icc_info = VDFInfo(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
cc_info = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf
else:
cc_info = VDFInfo(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
return SubSlotData(
None,
None,
None,
None,
None,
None,
sub_slot.proofs.challenge_chain_slot_proof,
sub_slot.proofs.infused_challenge_chain_slot_proof,
cc_info,
icc_info,
None,
None,
None,
)
def compress_segments(full_segment_index, segments: List[SubEpochChallengeSegment]) -> List[SubEpochChallengeSegment]:
compressed_segments = []
compressed_segments.append(segments[0])
for idx, segment in enumerate(segments[1:]):
if idx != full_segment_index:
# remove all redundant values
segment = compress_segment(segment)
compressed_segments.append(segment)
return compressed_segments
def compress_segment(segment: SubEpochChallengeSegment) -> SubEpochChallengeSegment:
# find challenge slot
comp_seg = SubEpochChallengeSegment(segment.sub_epoch_n, [], segment.rc_slot_end_info)
for slot in segment.sub_slots:
comp_seg.sub_slots.append(slot)
if slot.is_challenge():
break
return segment
# wp validation methods
def _validate_sub_epoch_summaries(
constants: ConsensusConstants,
weight_proof: WeightProof,
) -> Tuple[Optional[List[SubEpochSummary]], Optional[List[uint128]]]:
last_ses_hash, last_ses_sub_height = _get_last_ses_hash(constants, weight_proof.recent_chain_data)
if last_ses_hash is None:
log.warning("could not find last ses block")
return None, None
summaries, total, sub_epoch_weight_list = _map_sub_epoch_summaries(
constants.SUB_EPOCH_BLOCKS,
constants.GENESIS_CHALLENGE,
weight_proof.sub_epochs,
constants.DIFFICULTY_STARTING,
)
log.info(f"validating {len(summaries)} sub epochs")
# validate weight
if not _validate_summaries_weight(constants, total, summaries, weight_proof):
log.error("failed validating weight")
return None, None
last_ses = summaries[-1]
log.debug(f"last ses sub height {last_ses_sub_height}")
# validate last ses_hash
if last_ses.get_hash() != last_ses_hash:
log.error(f"failed to validate ses hashes block height {last_ses_sub_height}")
return None, None
return summaries, sub_epoch_weight_list
def _map_sub_epoch_summaries(
sub_blocks_for_se: uint32,
ses_hash: bytes32,
sub_epoch_data: List[SubEpochData],
curr_difficulty: uint64,
) -> Tuple[List[SubEpochSummary], uint128, List[uint128]]:
total_weight: uint128 = uint128(0)
summaries: List[SubEpochSummary] = []
sub_epoch_weight_list: List[uint128] = []
for idx, data in enumerate(sub_epoch_data):
ses = SubEpochSummary(
ses_hash,
data.reward_chain_hash,
data.num_blocks_overflow,
data.new_difficulty,
data.new_sub_slot_iters,
)
if idx < len(sub_epoch_data) - 1:
delta = 0
if idx > 0:
delta = sub_epoch_data[idx].num_blocks_overflow
log.debug(f"sub epoch {idx} start weight is {total_weight+curr_difficulty} ")
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
total_weight = total_weight + uint128( # type: ignore
curr_difficulty * (sub_blocks_for_se + sub_epoch_data[idx + 1].num_blocks_overflow - delta)
)
# if new epoch update diff and iters
if data.new_difficulty is not None:
curr_difficulty = data.new_difficulty
# add to dict
summaries.append(ses)
ses_hash = std_hash(ses)
# add last sub epoch weight
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
return summaries, total_weight, sub_epoch_weight_list
def _validate_summaries_weight(constants: ConsensusConstants, sub_epoch_data_weight, summaries, weight_proof) -> bool:
num_over = summaries[-1].num_blocks_overflow
ses_end_height = (len(summaries) - 1) * constants.SUB_EPOCH_BLOCKS + num_over - 1
curr = None
for block in weight_proof.recent_chain_data:
if block.reward_chain_block.height == ses_end_height:
curr = block
if curr is None:
return False
return curr.reward_chain_block.weight == sub_epoch_data_weight
def _validate_sub_epoch_segments(
constants_dict: Dict,
rng: random.Random,
weight_proof_bytes: bytes,
summaries_bytes: List[bytes],
):
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
sub_epoch_segments: SubEpochSegments = SubEpochSegments.from_bytes(weight_proof_bytes)
rc_sub_slot_hash = constants.GENESIS_CHALLENGE
total_blocks, total_ip_iters = 0, 0
total_slot_iters, total_slots = 0, 0
total_ip_iters = 0
prev_ses: Optional[SubEpochSummary] = None
segments_by_sub_epoch = map_segments_by_sub_epoch(sub_epoch_segments.challenge_segments)
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for sub_epoch_n, segments in segments_by_sub_epoch.items():
prev_ssi = curr_ssi
curr_difficulty, curr_ssi = _get_curr_diff_ssi(constants, sub_epoch_n, summaries)
log.debug(f"validate sub epoch {sub_epoch_n}")
# recreate RewardChainSubSlot for next ses rc_hash
sampled_seg_index = rng.choice(range(len(segments)))
if sub_epoch_n > 0:
rc_sub_slot = __get_rc_sub_slot(constants, segments[0], summaries, curr_ssi)
prev_ses = summaries[sub_epoch_n - 1]
rc_sub_slot_hash = rc_sub_slot.get_hash()
if not summaries[sub_epoch_n].reward_chain_hash == rc_sub_slot_hash:
log.error(f"failed reward_chain_hash validation sub_epoch {sub_epoch_n}")
return False
for idx, segment in enumerate(segments):
valid_segment, ip_iters, slot_iters, slots = _validate_segment(
constants, segment, curr_ssi, prev_ssi, curr_difficulty, prev_ses, idx == 0, sampled_seg_index == idx
)
if not valid_segment:
log.error(f"failed to validate sub_epoch {segment.sub_epoch_n} segment {idx} slots")
return False
prev_ses = None
total_blocks += 1
total_slot_iters += slot_iters
total_slots += slots
total_ip_iters += ip_iters
return True
def _validate_segment(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
curr_ssi: uint64,
prev_ssi: uint64,
curr_difficulty: uint64,
ses: Optional[SubEpochSummary],
first_segment_in_se: bool,
sampled: bool,
) -> Tuple[bool, int, int, int]:
ip_iters, slot_iters, slots = 0, 0, 0
after_challenge = False
for idx, sub_slot_data in enumerate(segment.sub_slots):
if sampled and sub_slot_data.is_challenge():
after_challenge = True
required_iters = __validate_pospace(constants, segment, idx, curr_difficulty, ses, first_segment_in_se)
if required_iters is None:
return False, uint64(0), uint64(0), uint64(0)
assert sub_slot_data.signage_point_index is not None
ip_iters = ip_iters + calculate_ip_iters( # type: ignore
constants, curr_ssi, sub_slot_data.signage_point_index, required_iters
)
if not _validate_challenge_block_vdfs(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate challenge slot {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
elif sampled and after_challenge:
if not _validate_sub_slot_data(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate sub slot data {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
slot_iters = slot_iters + curr_ssi # type: ignore
slots = slots + uint64(1) # type: ignore
return True, ip_iters, slot_iters, slots
def _validate_challenge_block_vdfs(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
if sub_slot_data.cc_signage_point is not None and sub_slot_data.cc_sp_vdf_info:
assert sub_slot_data.signage_point_index
sp_input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity and sub_slot_idx >= 1:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
prev_ssd = sub_slots[sub_slot_idx - 1]
sp_input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, sp_input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed to validate challenge chain signage point 2 {sub_slot_data.cc_sp_vdf_info}")
return False
assert sub_slot_data.cc_infusion_point
assert sub_slot_data.cc_ip_vdf_info
ip_input = ClassgroupElement.get_default_element()
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and sub_slot_idx >= 1:
prev_ssd = sub_slots[sub_slot_idx - 1]
if prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
assert prev_ssd.total_iters
assert sub_slot_data.total_iters
ip_input = prev_ssd.cc_ip_vdf_info.output
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, ip_input, cc_ip_vdf_info):
log.error(f"failed to validate challenge chain infusion point {sub_slot_data.cc_ip_vdf_info}")
return False
return True
def _validate_sub_slot_data(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
assert sub_slot_idx > 0
prev_ssd = sub_slots[sub_slot_idx - 1]
if sub_slot_data.is_end_of_slot():
if sub_slot_data.icc_slot_end is not None:
input = ClassgroupElement.get_default_element()
if not sub_slot_data.icc_slot_end.normalized_to_identity and prev_ssd.icc_ip_vdf_info is not None:
assert prev_ssd.icc_ip_vdf_info
input = prev_ssd.icc_ip_vdf_info.output
assert sub_slot_data.icc_slot_end_info
if not sub_slot_data.icc_slot_end.is_valid(constants, input, sub_slot_data.icc_slot_end_info, None):
log.error(f"failed icc slot end validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.cc_slot_end_info
assert sub_slot_data.cc_slot_end
input = ClassgroupElement.get_default_element()
if (not prev_ssd.is_end_of_slot()) and (not sub_slot_data.cc_slot_end.normalized_to_identity):
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
if not sub_slot_data.cc_slot_end.is_valid(constants, input, sub_slot_data.cc_slot_end_info):
log.error(f"failed cc slot end validation {sub_slot_data.cc_slot_end_info}")
return False
else:
# find end of slot
idx = sub_slot_idx
while idx < len(sub_slots) - 1:
curr_slot = sub_slots[idx]
if curr_slot.is_end_of_slot():
# dont validate intermediate vdfs if slot is blue boxed
assert curr_slot.cc_slot_end
if curr_slot.cc_slot_end.normalized_to_identity is True:
log.debug(f"skip intermediate vdfs slot {sub_slot_idx}")
return True
else:
break
idx += 1
if sub_slot_data.icc_infusion_point is not None and sub_slot_data.icc_ip_vdf_info is not None:
input = ClassgroupElement.get_default_element()
if not prev_ssd.is_challenge() and prev_ssd.icc_ip_vdf_info is not None:
input = prev_ssd.icc_ip_vdf_info.output
if not sub_slot_data.icc_infusion_point.is_valid(constants, input, sub_slot_data.icc_ip_vdf_info, None):
log.error(f"failed icc infusion point vdf validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.signage_point_index is not None
if sub_slot_data.cc_signage_point:
assert sub_slot_data.cc_sp_vdf_info
input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed cc signage point vdf validation {sub_slot_data.cc_sp_vdf_info}")
return False
input = ClassgroupElement.get_default_element()
assert sub_slot_data.cc_ip_vdf_info
assert sub_slot_data.cc_infusion_point
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
assert sub_slot_data.total_iters
assert prev_ssd.total_iters
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, input, cc_ip_vdf_info):
log.error(f"failed cc infusion point vdf validation {sub_slot_data.cc_slot_end_info}")
return False
return True
def sub_slot_data_vdf_input(
constants: ConsensusConstants,
sub_slot_data: SubSlotData,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
is_overflow: bool,
new_sub_slot: bool,
ssi: uint64,
) -> ClassgroupElement:
cc_input = ClassgroupElement.get_default_element()
sp_total_iters = get_sp_total_iters(constants, is_overflow, ssi, sub_slot_data)
ssd: Optional[SubSlotData] = None
if is_overflow and new_sub_slot:
if sub_slot_idx >= 2:
if sub_slots[sub_slot_idx - 2].cc_slot_end_info is None:
for ssd_idx in reversed(range(0, sub_slot_idx - 1)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
if ssd and ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not is_overflow and not new_sub_slot:
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not new_sub_slot and is_overflow:
slots_seen = 0
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
slots_seen += 1
if slots_seen == 2:
return ClassgroupElement.get_default_element()
if ssd.cc_slot_end_info is None and not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
def _validate_recent_blocks(constants_dict: Dict, recent_chain_bytes: bytes, summaries_bytes: List[bytes]) -> bool:
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
recent_chain: RecentChainData = RecentChainData.from_bytes(recent_chain_bytes)
sub_blocks = BlockCache({})
first_ses_idx = _get_ses_idx(recent_chain.recent_chain_data)
ses_idx = len(summaries) - len(first_ses_idx)
ssi: uint64 = constants.SUB_SLOT_ITERS_STARTING
diff: Optional[uint64] = constants.DIFFICULTY_STARTING
last_blocks_to_validate = 100 # todo remove cap after benchmarks
for summary in summaries[:ses_idx]:
if summary.new_sub_slot_iters is not None:
ssi = summary.new_sub_slot_iters
if summary.new_difficulty is not None:
diff = summary.new_difficulty
ses_blocks, sub_slots, transaction_blocks = 0, 0, 0
challenge, prev_challenge = None, None
tip_height = recent_chain.recent_chain_data[-1].height
prev_block_record = None
deficit = uint8(0)
for idx, block in enumerate(recent_chain.recent_chain_data):
required_iters = uint64(0)
overflow = False
ses = False
height = block.height
for sub_slot in block.finished_sub_slots:
prev_challenge = challenge
challenge = sub_slot.challenge_chain.get_hash()
deficit = sub_slot.reward_chain.deficit
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
ses = True
assert summaries[ses_idx].get_hash() == sub_slot.challenge_chain.subepoch_summary_hash
ses_idx += 1
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
ssi = sub_slot.challenge_chain.new_sub_slot_iters
if sub_slot.challenge_chain.new_difficulty is not None:
diff = sub_slot.challenge_chain.new_difficulty
if (challenge is not None) and (prev_challenge is not None):
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = get_deficit(constants, deficit, prev_block_record, overflow, len(block.finished_sub_slots))
log.debug(f"wp, validate block {block.height}")
if sub_slots > 2 and transaction_blocks > 11 and (tip_height - block.height < last_blocks_to_validate):
required_iters, error = validate_finished_header_block(
constants, sub_blocks, block, False, diff, ssi, ses_blocks > 2
)
if error is not None:
log.error(f"block {block.header_hash} failed validation {error}")
return False
else:
required_iters = _validate_pospace_recent_chain(
constants, block, challenge, diff, overflow, prev_challenge
)
if required_iters is None:
return False
curr_block_ses = None if not ses else summaries[ses_idx - 1]
block_record = header_block_to_sub_block_record(
constants, required_iters, block, ssi, overflow, deficit, height, curr_block_ses
)
log.debug(f"add block {block_record.height} to tmp sub blocks")
sub_blocks.add_block_record(block_record)
if block.first_in_sub_slot:
sub_slots += 1
if block.is_transaction_block:
transaction_blocks += 1
if ses:
ses_blocks += 1
prev_block_record = block_record
return True
def _validate_pospace_recent_chain(
constants: ConsensusConstants,
block: HeaderBlock,
challenge: bytes32,
diff: uint64,
overflow: bool,
prev_challenge: bytes32,
):
if block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
assert cc_sp_hash is not None
q_str = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants,
challenge if not overflow else prev_challenge,
cc_sp_hash,
)
if q_str is None:
log.error(f"could not verify proof of space block {block.height} {overflow}")
return None
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
diff,
cc_sp_hash,
)
return required_iters
def __validate_pospace(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
idx: int,
curr_diff: uint64,
ses: Optional[SubEpochSummary],
first_in_sub_epoch: bool,
) -> Optional[uint64]:
if first_in_sub_epoch and segment.sub_epoch_n == 0 and idx == 0:
cc_sub_slot_hash = constants.GENESIS_CHALLENGE
else:
cc_sub_slot_hash = __get_cc_sub_slot(segment.sub_slots, idx, ses).get_hash()
sub_slot_data: SubSlotData = segment.sub_slots[idx]
if sub_slot_data.signage_point_index and is_overflow_block(constants, sub_slot_data.signage_point_index):
curr_slot = segment.sub_slots[idx - 1]
assert curr_slot.cc_slot_end_info
challenge = curr_slot.cc_slot_end_info.challenge
else:
challenge = cc_sub_slot_hash
if sub_slot_data.cc_sp_vdf_info is None:
cc_sp_hash = cc_sub_slot_hash
else:
cc_sp_hash = sub_slot_data.cc_sp_vdf_info.output.get_hash()
# validate proof of space
assert sub_slot_data.proof_of_space is not None
q_str = sub_slot_data.proof_of_space.verify_and_get_quality_string(
constants,
challenge,
cc_sp_hash,
)
if q_str is None:
log.error("could not verify proof of space")
return None
return calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
sub_slot_data.proof_of_space.size,
curr_diff,
cc_sp_hash,
)
def __get_rc_sub_slot(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
summaries: List[SubEpochSummary],
curr_ssi: uint64,
) -> RewardChainSubSlot:
ses = summaries[uint32(segment.sub_epoch_n - 1)]
# find first challenge in sub epoch
first_idx = None
first = None
for idx, curr in enumerate(segment.sub_slots):
if curr.cc_slot_end is None:
first_idx = idx
first = curr
break
assert first_idx
idx = first_idx
slots = segment.sub_slots
# number of slots to look for
slots_n = 1
assert first
assert first.signage_point_index is not None
if is_overflow_block(constants, first.signage_point_index):
if idx >= 2 and slots[idx - 2].cc_slot_end is None:
slots_n = 2
new_diff = None if ses is None else ses.new_difficulty
new_ssi = None if ses is None else ses.new_sub_slot_iters
ses_hash = None if ses is None else ses.get_hash()
overflow = is_overflow_block(constants, first.signage_point_index)
if overflow:
if idx >= 2 and slots[idx - 2].cc_slot_end is not None and slots[idx - 1].cc_slot_end is not None:
ses_hash = None
new_ssi = None
new_diff = None
sub_slot = slots[idx]
while True:
if sub_slot.cc_slot_end:
slots_n -= 1
if slots_n == 0:
break
idx -= 1
sub_slot = slots[idx]
icc_sub_slot_hash: Optional[bytes32] = None
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
assert segment.rc_slot_end_info is not None
if idx != 0:
cc_vdf_info = VDFInfo(sub_slot.cc_slot_end_info.challenge, curr_ssi, sub_slot.cc_slot_end_info.output)
if sub_slot.icc_slot_end_info is not None:
icc_slot_end_info = VDFInfo(
sub_slot.icc_slot_end_info.challenge, curr_ssi, sub_slot.icc_slot_end_info.output
)
icc_sub_slot_hash = icc_slot_end_info.get_hash()
else:
cc_vdf_info = sub_slot.cc_slot_end_info
if sub_slot.icc_slot_end_info is not None:
icc_sub_slot_hash = sub_slot.icc_slot_end_info.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf_info,
icc_sub_slot_hash,
ses_hash,
new_ssi,
new_diff,
)
rc_sub_slot = RewardChainSubSlot(
segment.rc_slot_end_info,
cc_sub_slot.get_hash(),
icc_sub_slot_hash,
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return rc_sub_slot
def __get_cc_sub_slot(sub_slots: List[SubSlotData], idx, ses: Optional[SubEpochSummary]) -> ChallengeChainSubSlot:
sub_slot: Optional[SubSlotData] = None
for i in reversed(range(0, idx)):
sub_slot = sub_slots[i]
if sub_slot.cc_slot_end_info is not None:
break
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
icc_vdf = sub_slot.icc_slot_end_info
icc_vdf_hash: Optional[bytes32] = None
if icc_vdf is not None:
icc_vdf_hash = icc_vdf.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
sub_slot.cc_slot_end_info,
icc_vdf_hash,
None if ses is None else ses.get_hash(),
None if ses is None else ses.new_sub_slot_iters,
None if ses is None else ses.new_difficulty,
)
return cc_sub_slot
def _get_curr_diff_ssi(constants: ConsensusConstants, idx, summaries):
curr_difficulty = constants.DIFFICULTY_STARTING
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for ses in reversed(summaries[0:idx]):
if ses.new_sub_slot_iters is not None:
curr_ssi = ses.new_sub_slot_iters
curr_difficulty = ses.new_difficulty
break
return curr_difficulty, curr_ssi
def vars_to_bytes(constants, summaries, weight_proof):
constants_dict = recurse_jsonify(dataclasses.asdict(constants))
wp_recent_chain_bytes = bytes(RecentChainData(weight_proof.recent_chain_data))
wp_segment_bytes = bytes(SubEpochSegments(weight_proof.sub_epoch_segments))
summary_bytes = []
for summary in summaries:
summary_bytes.append(bytes(summary))
return constants_dict, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes
def bytes_to_vars(constants_dict, summaries_bytes):
summaries = []
for summary in summaries_bytes:
summaries.append(SubEpochSummary.from_bytes(summary))
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
return constants, summaries
def _get_last_ses_hash(
constants: ConsensusConstants, recent_reward_chain: List[HeaderBlock]
) -> Tuple[Optional[bytes32], uint32]:
for idx, block in enumerate(reversed(recent_reward_chain)):
if (block.reward_chain_block.height % constants.SUB_EPOCH_BLOCKS) == 0:
idx = len(recent_reward_chain) - 1 - idx # reverse
# find first block after sub slot end
while idx < len(recent_reward_chain):
curr = recent_reward_chain[idx]
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
return (
slot.challenge_chain.subepoch_summary_hash,
curr.reward_chain_block.height,
)
idx += 1
return None, uint32(0)
def _get_ses_idx(recent_reward_chain: List[HeaderBlock]) -> List[int]:
idxs: List[int] = []
for idx, curr in enumerate(recent_reward_chain):
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
idxs.append(idx)
return idxs
def get_deficit(
constants: ConsensusConstants,
curr_deficit: uint8,
prev_block: BlockRecord,
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
if prev_block is None:
if curr_deficit >= 1 and not (overflow and curr_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK):
curr_deficit -= 1
return curr_deficit
return calculate_deficit(constants, uint32(prev_block.height + 1), prev_block, overflow, num_finished_sub_slots)
def get_sp_total_iters(constants: ConsensusConstants, is_overflow: bool, ssi: uint64, sub_slot_data: SubSlotData):
assert sub_slot_data.cc_ip_vdf_info is not None
assert sub_slot_data.total_iters is not None
assert sub_slot_data.signage_point_index is not None
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
if is_overflow:
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
return sp_sub_slot_total_iters + sp_iters
def blue_boxed_end_of_slot(sub_slot: EndOfSubSlotBundle):
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None:
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
return True
else:
return True
return False
def validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
tip = weight_proof.recent_chain_data[-1]
weight_to_check = _get_weights_for_sampling(rng, tip.weight, weight_proof.recent_chain_data)
sampled_sub_epochs: dict[int, bool] = {}
for idx in range(1, len(sub_epoch_weight_list)):
if _sample_sub_epoch(sub_epoch_weight_list[idx - 1], sub_epoch_weight_list[idx], weight_to_check):
sampled_sub_epochs[idx - 1] = True
if len(sampled_sub_epochs) == WeightProofHandler.MAX_SAMPLES:
break
curr_sub_epoch_n = -1
for sub_epoch_segment in weight_proof.sub_epoch_segments:
if curr_sub_epoch_n < sub_epoch_segment.sub_epoch_n:
if sub_epoch_segment.sub_epoch_n in sampled_sub_epochs:
del sampled_sub_epochs[sub_epoch_segment.sub_epoch_n]
curr_sub_epoch_n = sub_epoch_segment.sub_epoch_n
if len(sampled_sub_epochs) > 0:
return False
return True
def map_segments_by_sub_epoch(sub_epoch_segments) -> Dict[int, List[SubEpochChallengeSegment]]:
segments: Dict[int, List[SubEpochChallengeSegment]] = {}
curr_sub_epoch_n = -1
for idx, segment in enumerate(sub_epoch_segments):
if curr_sub_epoch_n < segment.sub_epoch_n:
curr_sub_epoch_n = segment.sub_epoch_n
segments[curr_sub_epoch_n] = []
segments[curr_sub_epoch_n].append(segment)
return segments
def validate_total_iters(
segment: SubEpochChallengeSegment,
sub_slot_data_idx,
expected_sub_slot_iters: uint64,
finished_sub_slots_since_prev: int,
prev_b: SubSlotData,
prev_sub_slot_data_iters,
genesis,
) -> bool:
sub_slot_data = segment.sub_slots[sub_slot_data_idx]
if genesis:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
elif segment.sub_slots[sub_slot_data_idx - 1].is_end_of_slot():
assert prev_b.total_iters
assert prev_b.cc_ip_vdf_info
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_sub_slot_data_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
assert prev_b.cc_ip_vdf_info
assert prev_b.total_iters
total_iters = uint128(prev_b.total_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
total_iters = uint128(total_iters + sub_slot_data.cc_ip_vdf_info.number_of_iterations)
return total_iters == sub_slot_data.total_iters
| 42.373585 | 120 | 0.67247 |
15eea01b6d32560e7552a6a3c6876f04ca6a418d
| 3,893 |
py
|
Python
|
DJANGO/trainingapp/trainingapp/settings.py
|
NIXON707/Frameworks-7a-2020B
|
6892f8dd14b4b6f54eaf06ee5365c95006d815db
|
[
"MIT"
] | null | null | null |
DJANGO/trainingapp/trainingapp/settings.py
|
NIXON707/Frameworks-7a-2020B
|
6892f8dd14b4b6f54eaf06ee5365c95006d815db
|
[
"MIT"
] | null | null | null |
DJANGO/trainingapp/trainingapp/settings.py
|
NIXON707/Frameworks-7a-2020B
|
6892f8dd14b4b6f54eaf06ee5365c95006d815db
|
[
"MIT"
] | null | null | null |
"""
Django settings for trainingapp project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import pymysql
import os
pymysql.install_as_MySQLdb()
pymysql.version_info = (1, 3, 13, 'final', 0)
from pathlib import Path
#import dj_database_url
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r@@%!&$@__8wqb@ulww-)4(*6u2#53ik-gcnv9ygl0v+=l*@qc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'market.apps.MarketConfig',
'warehouse.apps.WarehouseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trainingapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trainingapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE' : 'django.db.backends.postgresql_psycopg2',
'NAME' : 'trainingapp',
'USER' : 'postgres',
'PASSWORD' : 'nixon',
'HOST' : 'localhost',
'PORT' : '5432',
}
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
#}
#
#'default': {
# 'ENGINE' : 'django.db.backends.mysql',
# 'NAME' : 'trainingapp',
# 'USER' : 'root',
# 'PASSWORD' : 'password',
# 'HOST' : 'localhost',
# 'PORT' : '3306',
#}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os. path.join(BASE_DIR, 'static'),
]
| 25.444444 | 91 | 0.658618 |
8d87b1712970beae100a3b5dc1a249e0b3210081
| 936 |
py
|
Python
|
clean.py
|
ychnlgy/MailUpdater
|
4cac2e62a699ade0257418be68c76c450e5c7d5a
|
[
"MIT"
] | null | null | null |
clean.py
|
ychnlgy/MailUpdater
|
4cac2e62a699ade0257418be68c76c450e5c7d5a
|
[
"MIT"
] | null | null | null |
clean.py
|
ychnlgy/MailUpdater
|
4cac2e62a699ade0257418be68c76c450e5c7d5a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os, shutil
ROOT = "."
PATTERNS = [".pyc", "__pycache__"]
BOLD = '\033[1m'
ENDC = '\033[0m'
TEMPLATE = "%s -- %s"
def main():
recurse(ROOT, PATTERNS)
def bold(s):
return BOLD + s + ENDC
def report(d, t, f):
t = ["F", "D"][t]
temp = TEMPLATE % (t, f)
if d:
temp = bold(temp)
print(temp)
def recurse(f, patterns):
t = os.path.isdir(f)
if t:
d = matches(f, patterns)
report(d, t, f)
if d:
shutil.rmtree(f)
else:
for fname in os.listdir(f):
newf = os.path.join(f, fname)
recurse(newf, patterns)
else:
d = matches(f, patterns)
if d:
os.remove(f)
report(d, t, f)
def matches(f, patterns):
for pattern in patterns:
if f.endswith(pattern):
return True
return False
if __name__ == "__main__":
main()
| 17.660377 | 45 | 0.497863 |
0dd47dde3df618b0769da1e05b388749efaf95a8
| 10,361 |
py
|
Python
|
tests/garage/tf/policies/test_categorical_lstm_policy_with_model_transit.py
|
arbenton/garage
|
5c398255fbfae375370483f18216996d82590a88
|
[
"MIT"
] | null | null | null |
tests/garage/tf/policies/test_categorical_lstm_policy_with_model_transit.py
|
arbenton/garage
|
5c398255fbfae375370483f18216996d82590a88
|
[
"MIT"
] | null | null | null |
tests/garage/tf/policies/test_categorical_lstm_policy_with_model_transit.py
|
arbenton/garage
|
5c398255fbfae375370483f18216996d82590a88
|
[
"MIT"
] | 1 |
2020-02-05T00:34:07.000Z
|
2020-02-05T00:34:07.000Z
|
"""
Unit test for Categorical LSTM Policy with Model.
This test consists of four different CategoricalLSTMPolicy: P1, P2, P3
and P4. P1 and P2 are from CategoricalLSTMPolicy, which does not use
garage.tf.models.LSTMModel while P3 and P4 do use.
This test ensures the outputs from all the policies are the same,
for the transition from using CategoricalLSTMPolicy to
CategoricalLSTMPolicyWithModel.
It covers get_action, get_actions, dist_info_sym, kl_sym,
log_likelihood_sym, entropy_sym and likelihood_ratio_sym.
"""
from unittest import mock
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.misc import tensor_utils
from garage.tf.policies import CategoricalLSTMPolicy
from garage.tf.policies import CategoricalLSTMPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDiscreteEnv
class TestCategoricalLSTMPolicyWithModelTransit(TfGraphTestCase):
def setup_method(self):
super().setup_method()
env = TfEnv(DummyDiscreteEnv(obs_dim=(1, ), action_dim=1))
self.default_initializer = tf.constant_initializer(1)
self.default_hidden_nonlinearity = tf.nn.tanh
self.default_recurrent_nonlinearity = tf.nn.sigmoid
self.default_output_nonlinearity = None
self.time_step = 1
self.policy1 = CategoricalLSTMPolicy(
env_spec=env.spec,
hidden_dim=4,
hidden_nonlinearity=self.default_hidden_nonlinearity,
recurrent_nonlinearity=self.default_recurrent_nonlinearity,
recurrent_w_x_init=self.default_initializer,
recurrent_w_h_init=self.default_initializer,
output_nonlinearity=self.default_output_nonlinearity,
output_w_init=self.default_initializer,
state_include_action=True,
name='P1')
self.policy2 = CategoricalLSTMPolicy(
env_spec=env.spec,
hidden_dim=4,
hidden_nonlinearity=self.default_hidden_nonlinearity,
recurrent_nonlinearity=self.default_recurrent_nonlinearity,
recurrent_w_x_init=self.default_initializer,
recurrent_w_h_init=self.default_initializer,
output_nonlinearity=self.default_output_nonlinearity,
output_w_init=tf.constant_initializer(2),
state_include_action=True,
name='P2')
self.sess.run(tf.global_variables_initializer())
self.policy3 = CategoricalLSTMPolicyWithModel(
env_spec=env.spec,
hidden_dim=4,
hidden_nonlinearity=self.default_hidden_nonlinearity,
hidden_w_init=self.default_initializer,
recurrent_nonlinearity=self.default_recurrent_nonlinearity,
recurrent_w_init=self.default_initializer,
output_nonlinearity=self.default_output_nonlinearity,
output_w_init=self.default_initializer,
state_include_action=True,
name='P3')
self.policy4 = CategoricalLSTMPolicyWithModel(
env_spec=env.spec,
hidden_dim=4,
hidden_nonlinearity=self.default_hidden_nonlinearity,
hidden_w_init=self.default_initializer,
recurrent_nonlinearity=self.default_recurrent_nonlinearity,
recurrent_w_init=self.default_initializer,
output_nonlinearity=self.default_output_nonlinearity,
output_w_init=tf.constant_initializer(2),
state_include_action=True,
name='P4')
self.policy1.reset()
self.policy2.reset()
self.policy3.reset()
self.policy4.reset()
self.obs = [env.reset()]
self.obs = np.concatenate([self.obs for _ in range(self.time_step)],
axis=0)
self.obs_ph = tf.placeholder(
tf.float32, shape=(None, None, env.observation_space.flat_dim))
self.action_ph = tf.placeholder(
tf.float32, shape=(None, None, env.action_space.flat_dim))
self.dist1_sym = self.policy1.dist_info_sym(
obs_var=self.obs_ph,
state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))},
name='p1_sym')
self.dist2_sym = self.policy2.dist_info_sym(
obs_var=self.obs_ph,
state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))},
name='p2_sym')
self.dist3_sym = self.policy3.dist_info_sym(
obs_var=self.obs_ph,
state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))},
name='p3_sym')
self.dist4_sym = self.policy4.dist_info_sym(
obs_var=self.obs_ph,
state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))},
name='p4_sym')
def test_dist_info_sym_output(self):
# batch size = 2
dist1 = self.sess.run(
self.dist1_sym, feed_dict={self.obs_ph: [self.obs, self.obs]})
dist2 = self.sess.run(
self.dist2_sym, feed_dict={self.obs_ph: [self.obs, self.obs]})
dist3 = self.sess.run(
self.dist3_sym, feed_dict={self.obs_ph: [self.obs, self.obs]})
dist4 = self.sess.run(
self.dist4_sym, feed_dict={self.obs_ph: [self.obs, self.obs]})
assert np.array_equal(dist1['prob'], dist3['prob'])
assert np.array_equal(dist2['prob'], dist4['prob'])
@mock.patch('numpy.random.choice')
def test_get_action(self, mock_rand):
mock_rand.return_value = 0
action1, agent_info1 = self.policy1.get_action(self.obs)
action2, agent_info2 = self.policy2.get_action(self.obs)
action3, agent_info3 = self.policy3.get_action(self.obs)
action4, agent_info4 = self.policy4.get_action(self.obs)
assert action1 == action3
assert action2 == action4
assert np.array_equal(agent_info1['prob'], agent_info3['prob'])
assert np.array_equal(agent_info2['prob'], agent_info4['prob'])
actions1, agent_infos1 = self.policy1.get_actions([self.obs])
actions2, agent_infos2 = self.policy2.get_actions([self.obs])
actions3, agent_infos3 = self.policy3.get_actions([self.obs])
actions4, agent_infos4 = self.policy4.get_actions([self.obs])
assert np.array_equal(actions1, actions3)
assert np.array_equal(actions2, actions4)
assert np.array_equal(agent_infos1['prob'], agent_infos3['prob'])
assert np.array_equal(agent_infos2['prob'], agent_infos4['prob'])
def test_kl_sym(self):
kl_diff_sym1 = self.policy1.distribution.kl_sym(
self.dist1_sym, self.dist2_sym)
objective1 = tf.reduce_mean(kl_diff_sym1)
kl_func = tensor_utils.compile_function([self.obs_ph], objective1)
kl1 = kl_func([self.obs, self.obs])
kl_diff_sym2 = self.policy3.distribution.kl_sym(
self.dist3_sym, self.dist4_sym)
objective2 = tf.reduce_mean(kl_diff_sym2)
kl_func = tensor_utils.compile_function([self.obs_ph], objective2)
kl2 = kl_func([self.obs, self.obs])
assert np.array_equal(kl1, kl2)
def test_log_likehihood_sym(self):
log_prob_sym1 = self.policy1.distribution.log_likelihood_sym(
self.action_ph, self.dist1_sym)
log_prob_func = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym1)
log_prob1 = log_prob_func([self.obs, self.obs],
np.ones((2, self.time_step, 1)))
log_prob_sym2 = self.policy3.distribution.log_likelihood_sym(
self.action_ph, self.dist3_sym)
log_prob_func2 = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym2)
log_prob2 = log_prob_func2([self.obs, self.obs],
np.ones((2, self.time_step, 1)))
assert np.array_equal(log_prob1, log_prob2)
log_prob_sym1 = self.policy2.distribution.log_likelihood_sym(
self.action_ph, self.dist2_sym)
log_prob_func = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym1)
log_prob1 = log_prob_func([self.obs, self.obs],
np.ones((2, self.time_step, 1)))
log_prob_sym2 = self.policy4.distribution.log_likelihood_sym(
self.action_ph, self.dist4_sym)
log_prob_func2 = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym2)
log_prob2 = log_prob_func2([self.obs, self.obs],
np.ones((2, self.time_step, 1)))
assert np.array_equal(log_prob1, log_prob2)
def test_policy_entropy_sym(self):
entropy_sym1 = self.policy1.distribution.entropy_sym(
self.dist1_sym, name='entropy_sym1')
entropy_func = tensor_utils.compile_function([self.obs_ph],
entropy_sym1)
entropy1 = entropy_func([self.obs, self.obs])
entropy_sym2 = self.policy3.distribution.entropy_sym(
self.dist3_sym, name='entropy_sym1')
entropy_func = tensor_utils.compile_function([self.obs_ph],
entropy_sym2)
entropy2 = entropy_func([self.obs, self.obs])
assert np.array_equal(entropy1, entropy2)
def test_likelihood_ratio_sym(self):
likelihood_ratio_sym1 = self.policy1.distribution.likelihood_ratio_sym(
self.action_ph,
self.dist1_sym,
self.dist2_sym,
name='li_ratio_sym1')
likelihood_ratio_func = tensor_utils.compile_function(
[self.action_ph, self.obs_ph], likelihood_ratio_sym1)
likelihood_ratio1 = likelihood_ratio_func(
np.ones((2, 1, 1)), [self.obs, self.obs])
likelihood_ratio_sym2 = self.policy3.distribution.likelihood_ratio_sym(
self.action_ph,
self.dist3_sym,
self.dist4_sym,
name='li_ratio_sym2')
likelihood_ratio_func = tensor_utils.compile_function(
[self.action_ph, self.obs_ph], likelihood_ratio_sym2)
likelihood_ratio2 = likelihood_ratio_func(
np.ones((2, 1, 1)), [self.obs, self.obs])
assert np.array_equal(likelihood_ratio1, likelihood_ratio2)
| 43.533613 | 79 | 0.657562 |
958768b8dd14855315f22560afe6620767e37771
| 17 |
py
|
Python
|
benchmarks/models/__init__.py
|
markozeman/TransformerSuperposition
|
a0638c8cb7a346dc9bd0ac992ce5bf306d44303b
|
[
"MIT"
] | null | null | null |
benchmarks/models/__init__.py
|
markozeman/TransformerSuperposition
|
a0638c8cb7a346dc9bd0ac992ce5bf306d44303b
|
[
"MIT"
] | null | null | null |
benchmarks/models/__init__.py
|
markozeman/TransformerSuperposition
|
a0638c8cb7a346dc9bd0ac992ce5bf306d44303b
|
[
"MIT"
] | null | null | null |
from . import mlp
| 17 | 17 | 0.764706 |
ba90e2ebaf9c707b86e448928f2850de34296a6b
| 1,170 |
py
|
Python
|
osbenchmark/builder/launchers/launcher.py
|
sharp-pixel/opensearch-benchmark
|
32b2a68c3672f680fbc90a591f6c15b46701142e
|
[
"Apache-2.0"
] | 26 |
2021-12-09T06:58:53.000Z
|
2022-03-29T15:01:37.000Z
|
osbenchmark/builder/launchers/launcher.py
|
sharp-pixel/opensearch-benchmark
|
32b2a68c3672f680fbc90a591f6c15b46701142e
|
[
"Apache-2.0"
] | 63 |
2021-12-08T20:47:17.000Z
|
2022-03-31T18:21:31.000Z
|
osbenchmark/builder/launchers/launcher.py
|
sharp-pixel/opensearch-benchmark
|
32b2a68c3672f680fbc90a591f6c15b46701142e
|
[
"Apache-2.0"
] | 5 |
2021-12-09T10:17:30.000Z
|
2022-03-03T05:31:12.000Z
|
from abc import ABC, abstractmethod
class Launcher(ABC):
"""
Launchers are used to start and stop OpenSearch on the nodes in a self-managed cluster.
"""
def __init__(self, shell_executor):
self.shell_executor = shell_executor
@abstractmethod
def start(self, host, node_configurations):
"""
Starts the OpenSearch nodes on a given host
;param host: A Host object defining the host on which to start the nodes
;param node_configurations: A list of NodeConfiguration objects detailing the installation data of the nodes on the host
;return nodes: A list of Node objects defining the nodes running on a host
"""
raise NotImplementedError
@abstractmethod
def stop(self, host, nodes):
"""
Stops the OpenSearch nodes on a given host
;param host: A Host object defining the host on which to stop the nodes
;param nodes: A list of Node objects defining the nodes running on a host
;return nodes: A list of Node objects representing OpenSearch nodes that were successfully stopped on the host
"""
raise NotImplementedError
| 36.5625 | 128 | 0.687179 |
f6eca5db0b003550ce8068afa299c42f2a0019dc
| 325 |
py
|
Python
|
librerias_mauro/maths.py
|
paleomau/MGOL_BOOTCAMP
|
8c2b018f49fd12a255ea6f323141260d04d4421d
|
[
"MIT"
] | null | null | null |
librerias_mauro/maths.py
|
paleomau/MGOL_BOOTCAMP
|
8c2b018f49fd12a255ea6f323141260d04d4421d
|
[
"MIT"
] | null | null | null |
librerias_mauro/maths.py
|
paleomau/MGOL_BOOTCAMP
|
8c2b018f49fd12a255ea6f323141260d04d4421d
|
[
"MIT"
] | null | null | null |
''' Esta funcion eleve a la potencia deseada un rango de numeros que hay que definir'''
def math_powers (range, power):
the_list = [(a ** power) for a in range(range)]
print(the_list)
math_powers(10,3)
# me da error int objetc is not callable. Prefiero dejar la funcion asi y resolver la duda el lunes en tutoria.
| 46.428571 | 111 | 0.72 |
ae726ac52d8f2253720213951983bdab64a7ae87
| 29,790 |
py
|
Python
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/base_date.py
|
acblacktea/Recognizers-Text
|
2170b8e35216f3fd56cce98fb33cde5339c9f088
|
[
"MIT"
] | 1 |
2019-06-19T10:45:24.000Z
|
2019-06-19T10:45:24.000Z
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/base_date.py
|
mcunille/Recognizers-Text
|
c5375796ab3f00bee4d9ac1cf8873fe2cc29121b
|
[
"MIT"
] | null | null | null |
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/base_date.py
|
mcunille/Recognizers-Text
|
c5375796ab3f00bee4d9ac1cf8873fe2cc29121b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List, Optional, Pattern, Dict
from datetime import datetime, timedelta
import calendar
from datedelta import datedelta
import regex
from recognizers_text.extractor import ExtractResult
from recognizers_text.utilities import RegExpUtility
from recognizers_number import BaseNumberExtractor, BaseNumberParser
from recognizers_number.number import Constants as NumberConstants
from .constants import Constants, TimeTypeConstants
from .extractors import DateTimeExtractor
from .parsers import DateTimeParser, DateTimeParseResult
from .utilities import DateTimeUtilityConfiguration, Token, merge_all_tokens, get_tokens_from_regex, DateUtils, AgoLaterUtil, DateTimeFormatUtil, DateTimeResolutionResult, DayOfWeek, AgoLaterMode
class DateExtractorConfiguration(ABC):
@property
@abstractmethod
def date_regex_list(self) -> List[Pattern]:
raise NotImplementedError
@property
@abstractmethod
def implicit_date_list(self) -> List[Pattern]:
raise NotImplementedError
@property
@abstractmethod
def month_end(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def of_month(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def date_unit_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def for_the_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def week_day_and_day_of_month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def relative_month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def week_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def day_of_week(self) -> Dict[str, int]:
raise NotImplementedError
@property
@abstractmethod
def ordinal_extractor(self) -> BaseNumberExtractor:
raise NotImplementedError
@property
@abstractmethod
def integer_extractor(self) -> BaseNumberExtractor:
raise NotImplementedError
@property
@abstractmethod
def number_parser(self) -> BaseNumberParser:
raise NotImplementedError
@property
@abstractmethod
def duration_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def utility_configuration(self) -> DateTimeUtilityConfiguration:
raise NotImplementedError
class BaseDateExtractor(DateTimeExtractor):
@property
def extractor_type_name(self) -> str:
return Constants.SYS_DATETIME_DATE
def __init__(self, config: DateExtractorConfiguration):
self.config = config
def extract(self, source: str, reference: datetime = None) -> List[ExtractResult]:
if reference is None:
reference = datetime.now()
tokens = self.basic_regex_match(source)
tokens.extend(self.implicit_date(source))
tokens.extend(self.number_with_month(source, reference))
tokens.extend(self.duration_with_before_and_after(source, reference))
result = merge_all_tokens(tokens, source, self.extractor_type_name)
return result
def basic_regex_match(self, source: str) -> List[Token]:
ret: List[Token] = list()
for regexp in self.config.date_regex_list:
ret.extend(get_tokens_from_regex(regexp, source))
return ret
def implicit_date(self, source: str) -> List[Token]:
ret: List[Token] = list()
for regexp in self.config.implicit_date_list:
ret.extend(get_tokens_from_regex(regexp, source))
return ret
def number_with_month(self, source: str, reference: datetime) -> List[Token]:
ret: List[Token] = list()
extract_results = self.config.ordinal_extractor.extract(source)
extract_results.extend(self.config.integer_extractor.extract(source))
for result in extract_results:
num = int(self.config.number_parser.parse(result).value)
if num < 1 or num > 31:
continue
if result.start >= 0:
front_string = source[0:result.start or 0]
match = regex.search(self.config.month_end, front_string)
if match is not None:
ret.append(Token(match.start(), match.end() + result.length))
continue
# handling cases like 'for the 25th'
matches = regex.finditer(self.config.for_the_regex, source)
is_found = False
for match_case in matches:
if match_case is not None:
ordinal_num = RegExpUtility.get_group(match_case, 'DayOfMonth')
if ordinal_num == result.text:
length = len(RegExpUtility.get_group(match_case, 'end'))
ret.append(Token(match_case.start(), match_case.end() - length))
is_found = True
if is_found:
continue
# handling cases like 'Thursday the 21st', which both 'Thursday' and '21st' refer to a same date
matches = regex.finditer(self.config.week_day_and_day_of_month_regex, source)
for match_case in matches:
if match_case is not None:
ordinal_num = RegExpUtility.get_group(match_case, 'DayOfMonth')
if ordinal_num == result.text:
month = reference.month
year = reference.year
# get week of day for the ordinal number which is regarded as a date of reference month
date = DateUtils.safe_create_from_min_value(year, month, num)
num_week_day_str: str = calendar.day_name[date.weekday()].lower()
# get week day from text directly, compare it with the weekday generated above
# to see whether they refer to a same week day
extracted_week_day_str = RegExpUtility.get_group(match_case, 'weekday').lower()
if (date != DateUtils.min_value and
self.config.day_of_week[num_week_day_str] ==
self.config.day_of_week[extracted_week_day_str]):
ret.append(
Token(match_case.start(), match_case.end()))
is_found = True
if is_found:
continue
# handling cases like '20th of next month'
suffix_str: str = source[result.start + result.length:].lower()
match = regex.match(self.config.relative_month_regex, suffix_str.strip())
space_len = len(suffix_str) - len(suffix_str.strip())
if match is not None and match.start() == 0:
ret.append(
Token(result.start, result.start + result.length + space_len + len(match.group())))
# handling cases like 'second Sunday'
match = regex.match(
self.config.week_day_regex, suffix_str.strip())
if (match is not None and match.start() == 0 and
num >= 1 and num <= 5 and
result.type == NumberConstants.SYS_NUM_ORDINAL):
week_day_str = RegExpUtility.get_group(match, 'weekday')
if week_day_str in self.config.day_of_week:
ret.append(
Token(result.start, result.start + result.length + space_len + len(match.group())))
if result.start + result.length < len(source):
after_string = source[result.start + result.length:]
match = regex.match(self.config.of_month, after_string)
if match is not None:
ret.append(Token(result.start, result.start + result.length + len(match.group())))
return ret
def duration_with_before_and_after(self, source: str, reference: datetime) -> List[Token]:
ret: List[Token] = list()
duration_results = self.config.duration_extractor.extract(source, reference)
for result in duration_results:
match = regex.search(self.config.date_unit_regex, result.text)
if match is None:
continue
ret = AgoLaterUtil.extractor_duration_with_before_and_after(source, result, ret, self.config.utility_configuration)
return ret
class DateParserConfiguration(ABC):
@property
@abstractmethod
def ordinal_extractor(self) -> BaseNumberExtractor:
raise NotImplementedError
@property
@abstractmethod
def integer_extractor(self) -> BaseNumberExtractor:
raise NotImplementedError
@property
@abstractmethod
def cardinal_extractor(self) -> BaseNumberExtractor:
raise NotImplementedError
@property
@abstractmethod
def duration_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def duration_parser(self) -> DateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def number_parser(self) -> BaseNumberParser:
raise NotImplementedError
@property
@abstractmethod
def month_of_year(self) -> Dict[str, int]:
raise NotImplementedError
@property
@abstractmethod
def day_of_month(self) -> Dict[str, int]:
raise NotImplementedError
@property
@abstractmethod
def day_of_week(self) -> Dict[str, int]:
raise NotImplementedError
@property
@abstractmethod
def unit_map(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def cardinal_map(self) -> Dict[str, int]:
raise NotImplementedError
@property
@abstractmethod
def date_regex(self) -> List[Pattern]:
raise NotImplementedError
@property
@abstractmethod
def on_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def special_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def next_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def unit_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def week_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def last_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def this_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def week_day_of_month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def for_the_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def week_day_and_day_of_month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def relative_month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def utility_configuration(self) -> DateTimeUtilityConfiguration:
raise NotImplementedError
@property
@abstractmethod
def date_token_prefix(self) -> str:
raise NotImplementedError
@abstractmethod
def get_swift_day(self, source: str) -> int:
raise NotImplementedError
@abstractmethod
def get_swift_month(self, source: str) -> int:
raise NotImplementedError
@abstractmethod
def is_cardinal_last(self, source: str) -> bool:
raise NotImplementedError
class BaseDateParser(DateTimeParser):
@property
def parser_type_name(self) -> str:
return Constants.SYS_DATETIME_DATE
def __init__(self, config: DateParserConfiguration):
self.config = config
def parse(self, source: ExtractResult, reference: datetime = None) -> Optional[DateTimeParseResult]:
if reference is None:
reference = datetime.now()
result_value: DateTimeParseResult = None
if source.type is self.parser_type_name:
source_text = source.text.lower()
inner_result = self.parse_basic_regex_match(source_text, reference)
if not inner_result.success:
inner_result = self.parse_implicit_date(source_text, reference)
if not inner_result.success:
inner_result = self.parse_weekday_of_month(source_text, reference)
if not inner_result.success:
inner_result = self.parser_duration_with_ago_and_later(source_text, reference)
if not inner_result.success:
inner_result = self.parse_number_with_month(source_text, reference)
if not inner_result.success:
inner_result = self.parse_single_number(source_text, reference)
if inner_result.success:
inner_result.future_resolution: Dict[str, str] = dict()
inner_result.future_resolution[TimeTypeConstants.DATE] = DateTimeFormatUtil.format_date(inner_result.future_value)
inner_result.past_resolution: Dict[str, str] = dict()
inner_result.past_resolution[TimeTypeConstants.DATE] = DateTimeFormatUtil.format_date(inner_result.past_value)
result_value = inner_result
result = DateTimeParseResult(source)
result.value = result_value
result.timex_str = result_value.timex if result_value is not None else ''
result.resolution_str = ''
return result
def parse_basic_regex_match(self, source: str, reference: datetime) -> DateTimeParseResult:
trimmed_source = source.strip()
result = DateTimeResolutionResult()
for regexp in self.config.date_regex:
offset = 0
match = regex.search(regexp, trimmed_source)
if match is None:
match = regex.search(regexp, self.config.date_token_prefix + trimmed_source)
offset = len(self.config.date_token_prefix)
if match and match.start() == offset and len(match.group()) == len(trimmed_source):
result = self.match_to_date(match, reference)
break
return result
def match_to_date(self, match, reference: datetime)-> DateTimeResolutionResult:
result = DateTimeResolutionResult()
year_str = RegExpUtility.get_group(match, 'year')
month_str = RegExpUtility.get_group(match, 'month')
day_str = RegExpUtility.get_group(match, 'day')
month = 0
day = 0
year = 0
if month_str in self.config.month_of_year and day_str in self.config.day_of_month:
month = self.config.month_of_year.get(month_str)
day = self.config.day_of_month.get(day_str)
if year_str:
year = int(year_str) if year_str.isnumeric() else 0
if year < 100 and year >= Constants.MinTwoDigitYearPastNum:
year += 1900
elif year >= 0 and year < Constants.MaxTwoDigitYearFutureNum:
year += 2000
no_year = False
if year == 0:
year = reference.year
result.timex = DateTimeFormatUtil.luis_date(-1, month, day)
no_year = True
else:
result.timex = DateTimeFormatUtil.luis_date(year, month, day)
future_date = DateUtils.safe_create_from_min_value(year, month, day)
past_date = DateUtils.safe_create_from_min_value(year, month, day)
if no_year and future_date < reference:
future_date = DateUtils.safe_create_from_min_value(year + 1, month, day)
if no_year and past_date >= reference:
past_date = DateUtils.safe_create_from_min_value(year - 1, month, day)
result.future_value = future_date
result.past_value = past_date
result.success = True
return result
def parse_implicit_date(self, source: str, reference: datetime) -> DateTimeParseResult:
trimmed_source = source.strip()
result = DateTimeResolutionResult()
# handle "on 12"
match = regex.search(self.config.on_regex, self.config.date_token_prefix + trimmed_source)
if match and match.start() == len(self.config.date_token_prefix) and len(match.group()) == len(trimmed_source):
day = 0
month = reference.month
year = reference.year
day_str = match.group('day')
day = self.config.day_of_month.get(day_str)
result.timex = DateTimeFormatUtil.luis_date(-1, -1, day)
try_str = DateTimeFormatUtil.luis_date(year, month, day)
try_date = datetime.strptime(try_str, '%Y-%m-%d')
future_date: datetime
past_date: datetime
if try_date:
future_date = DateUtils.safe_create_from_min_value(year, month, day)
past_date = DateUtils.safe_create_from_min_value(year, month, day)
if future_date < reference:
future_date += datedelta(months=1)
if past_date >= reference:
past_date += datedelta(months=-1)
else:
future_date = DateUtils.safe_create_from_min_value(year, month + 1, day)
past_date = DateUtils.safe_create_from_min_value(year, month - 1, day)
result.future_value = future_date
result.past_value = past_date
result.success = True
return result
# handle "today", "the day before yesterday"
match = regex.match(self.config.special_day_regex, trimmed_source)
if match and match.start() == 0 and len(match.group()) == len(trimmed_source):
swift = self.config.get_swift_day(match.group())
today = DateUtils.safe_create_from_min_value(reference.year, reference.month, reference.day)
value = today + timedelta(days=swift)
result.timex = DateTimeFormatUtil.luis_date_from_datetime(value)
result.future_value = value
result.past_value = value
result.success = True
return result
# handle "next Sunday"
match = regex.match(self.config.next_regex, trimmed_source)
if match and match.start() == 0 and len(match.group()) == len(trimmed_source):
weekday_str = match.group('weekday')
value = DateUtils.next(reference, self.config.day_of_week.get(weekday_str))
result.timex = DateTimeFormatUtil.luis_date_from_datetime(value)
result.future_value = value
result.past_value = value
result.success = True
return result
# handle "this Friday"
match = regex.match(self.config.this_regex, trimmed_source)
if match and match.start() == 0 and len(match.group()) == len(trimmed_source):
weekday_str = match.group('weekday')
value = DateUtils.this(reference, self.config.day_of_week.get(weekday_str))
result.timex = DateTimeFormatUtil.luis_date_from_datetime(value)
result.future_value = value
result.past_value = value
result.success = True
return result
# handle "last Friday", "last mon"
match = regex.match(self.config.last_regex, trimmed_source)
if match and match.start() == 0 and len(match.group()) == len(trimmed_source):
weekday_str = match.group('weekday')
value = DateUtils.last(reference, self.config.day_of_week.get(weekday_str))
result.timex = DateTimeFormatUtil.luis_date_from_datetime(value)
result.future_value = value
result.past_value = value
result.success = True
return result
# handle "Friday"
match = regex.match(self.config.week_day_regex, trimmed_source)
if match and match.start() == 0 and len(match.group()) == len(trimmed_source):
weekday_str = match.group('weekday')
weekday = self.config.day_of_week.get(weekday_str)
value = DateUtils.this(reference, weekday)
if weekday < int(DayOfWeek.Monday):
weekday = int(DayOfWeek.Sunday)
if weekday < reference.isoweekday():
value = DateUtils.next(reference, weekday)
result.timex = 'XXXX-WXX-' + str(weekday)
future_date = value
past_date = value
if future_date < reference:
future_date += timedelta(weeks=1)
if past_date >= reference:
past_date -= timedelta(weeks=1)
result.future_value = future_date
result.past_value = past_date
result.success = True
return result
# handle "for the 27th."
match = regex.match(self.config.for_the_regex, trimmed_source)
if match:
day_str = match.group('DayOfMonth')
er = ExtractResult.get_from_text(day_str)
day = int(self.config.number_parser.parse(er).value)
month = reference.month
year = reference.year
result.timex = DateTimeFormatUtil.luis_date(-1, -1, day)
date = datetime(year, month, day)
result.future_value = date
result.past_value = date
result.success = True
return result
# handling cases like 'Thursday the 21st', which both 'Thursday' and '21st' refer to a same date
match = regex.match(self.config.week_day_and_day_of_month_regex, trimmed_source)
if match:
day_str = match.group('DayOfMonth')
er = ExtractResult.get_from_text(day_str)
day = int(self.config.number_parser.parse(er).value)
month = reference.month
year = reference.year
# the validity of the phrase is guaranteed in the Date Extractor
result.timex = DateTimeFormatUtil.luis_date(year, month, day)
date = datetime(year, month, day)
result.future_value = date
result.past_value = date
result.success = True
return result
return result
def parse_weekday_of_month(self, source: str, reference: datetime) -> DateTimeParseResult:
trimmed_source = source.strip()
result = DateTimeResolutionResult()
match = regex.match(self.config.week_day_of_month_regex, trimmed_source)
if not match:
return result
cardinal_str = RegExpUtility.get_group(match, 'cardinal')
weekday_str = RegExpUtility.get_group(match, 'weekday')
month_str = RegExpUtility.get_group(match, 'month')
no_year = False
cardinal = 5 if self.config.is_cardinal_last(cardinal_str) else self.config.cardinal_map.get(cardinal_str)
weekday = self.config.day_of_week.get(weekday_str)
month = reference.month
year = reference.year
if not month_str:
swift = self.config.get_swift_month(trimmed_source)
temp = reference.replace(month=reference.month + swift)
month = temp.month
year = temp.year
else:
month = self.config.month_of_year.get(month_str)
no_year = True
value = self._compute_date(cardinal, weekday, month, year)
if value.month != month:
cardinal -= 1
value = value.replace(day=value.day - 7)
future_date = value
past_date = value
if no_year and future_date < reference:
future_date = self._compute_date(cardinal, weekday, month, year + 1)
if future_date.month != month:
future_date = future_date.replace(day=future_date.day - 7)
if no_year and past_date >= reference:
past_date = self._compute_date(cardinal, weekday, month, year - 1)
if past_date.month != month:
past_date = past_date.replace(day=past_date.date - 7)
result.timex = '-'.join(['XXXX', DateTimeFormatUtil.to_str(month, 2), 'WXX', str(weekday), '#' + str(cardinal)])
result.future_value = future_date
result.past_value = past_date
result.success = True
return result
def _compute_date(self, cardinal: int, weekday: DayOfWeek, month: int, year: int):
first_day = datetime(year, month, 1)
first_weekday = DateUtils.this(first_day, weekday)
if weekday == 0:
weekday = int(DayOfWeek.Sunday)
if weekday < first_day.isoweekday():
first_weekday = DateUtils.next(first_day, weekday)
first_weekday = first_weekday.replace(day=first_weekday.day + (7 * (cardinal - 1)))
return first_weekday
def parser_duration_with_ago_and_later(self, source: str, reference: datetime) -> DateTimeParseResult:
return AgoLaterUtil.parse_duration_with_ago_and_later(
source,
reference,
self.config.duration_extractor,
self.config.duration_parser,
self.config.unit_map,
self.config.unit_regex,
self.config.utility_configuration,
AgoLaterMode.DATE)
def parse_number_with_month(self, source: str, reference: datetime) -> DateTimeParseResult:
trimmed_source = source.strip()
ambiguous = True
result = DateTimeResolutionResult()
ers = self.config.ordinal_extractor.extract(trimmed_source)
if not ers:
ers = self.config.integer_extractor.extract(trimmed_source)
if not ers:
return result
num = int(self.config.number_parser.parse(ers[0]).value)
day = 1
month = 0
match = regex.search(self.config.month_regex, trimmed_source)
if match:
month = self.config.month_of_year.get(match.group())
day = num
else:
# handling relative month
match = regex.search(self.config.relative_month_regex, trimmed_source)
if match:
month_str = match.group('order')
swift = self.config.get_swift_month(month_str)
date = reference.replace(month=reference.month+swift)
month = date.month
day = num
ambiguous = False
# handling casesd like 'second Sunday'
if not match:
match = regex.search(self.config.week_day_regex, trimmed_source)
if match:
month = reference.month
# resolve the date of wanted week day
wanted_week_day = self.config.day_of_week.get(match.group('weekday'))
first_date = DateUtils.safe_create_from_min_value(reference.year, reference.month, 1)
first_weekday = first_date.isoweekday()
delta_days = wanted_week_day - first_weekday if wanted_week_day > first_weekday else wanted_week_day - first_weekday + 7
first_wanted_week_day = first_date + timedelta(days=delta_days)
day = first_wanted_week_day.day + ((num - 1) * 7)
ambiguous = False
if not match:
return result
year = reference.year
# for LUIS format value string
date = DateUtils.safe_create_from_min_value(year, month, day)
future_date = date
past_date = date
if ambiguous:
result.timex = DateTimeFormatUtil.luis_date(-1, month, day)
if future_date < reference:
future_date = future_date.replace(year=future_date.year+1)
if past_date >= reference:
past_date = past_date.replace(year=past_date.year+1)
else:
result.timex = DateTimeFormatUtil.luis_date(year, month, day)
result.future_value = future_date
result.past_value = past_date
result.success = True
return result
def parse_single_number(self, source: str, reference: datetime) -> DateTimeParseResult:
trimmed_source = source.strip()
result = DateTimeResolutionResult()
ers = self.config.ordinal_extractor.extract(trimmed_source)
if not ers or not ers[0].text:
ers = self.config.integer_extractor.extract(trimmed_source)
if not ers or not ers[0].text:
return result
day = int(self.config.number_parser.parse(ers[0]).value)
month = reference.month
year = reference.year
result.timex = DateTimeFormatUtil.luis_date(-1, -1, day)
past_date = DateUtils.safe_create_from_min_value(year, month, day)
future_date = DateUtils.safe_create_from_min_value(year, month, day)
if future_date != DateUtils.min_value and future_date < reference:
future_date = future_date.replace(month=future_date.month + 1)
if past_date != DateUtils.min_value and past_date >= reference:
past_date = past_date.replace(month=past_date.month - 1)
result.future_value = future_date
result.past_value = past_date
result.success = True
return result
| 36.418093 | 195 | 0.629708 |
6911d1f6589c7081c29210bafedded78c816581a
| 5,332 |
py
|
Python
|
cycada/data/data_loader.py
|
masterfulai/cycada_release
|
8c47ff6405074dd247aae0859ec1aec60509447e
|
[
"BSD-2-Clause"
] | null | null | null |
cycada/data/data_loader.py
|
masterfulai/cycada_release
|
8c47ff6405074dd247aae0859ec1aec60509447e
|
[
"BSD-2-Clause"
] | null | null | null |
cycada/data/data_loader.py
|
masterfulai/cycada_release
|
8c47ff6405074dd247aae0859ec1aec60509447e
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function
import os
from os.path import join
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import torch.nn as nn
from torchvision import datasets, transforms
from ..util import to_tensor_raw
def load_data(name, dset, batch=64, rootdir='', num_channels=3,
image_size=32, download=True, kwargs={}):
is_train = (dset == 'train')
if isinstance(name, list) and len(name) == 2: # load adda data
print('loading adda data')
src_dataset = get_dataset(name[0], join(rootdir, name[0]), dset,
image_size, num_channels, download=download)
tgt_dataset = get_dataset(name[1], join(rootdir, name[1]), dset,
image_size, num_channels, download=download)
dataset = AddaDataset(src_dataset, tgt_dataset)
else:
print('loading source model', name, rootdir, dset, image_size, download)
dataset = get_dataset(name, rootdir, dset, image_size, num_channels,
download=download)
if len(dataset) == 0:
return None
loader = torch.utils.data.DataLoader(dataset, batch_size=batch,
shuffle=is_train, **kwargs)
return loader
def get_transform_dataset(dataset_name, rootdir, net_transform, downscale):
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
transform, target_transform = get_transform2(dataset_name, net_transform, downscale)
return get_fcn_dataset(dataset_name, rootdir, transform=transform,
target_transform=target_transform)
sizes = {'cityscapes': 1024, 'gta5': 1024, 'cyclegta5': 1024}
def get_orig_size(dataset_name):
"Size of images in the dataset for relative scaling."
try:
return sizes[dataset_name]
except:
raise Exception('Unknown dataset size:', dataset_name)
def get_transform2(dataset_name, net_transform, downscale):
"Returns image and label transform to downscale, crop and prepare for net."
orig_size = get_orig_size(dataset_name)
transform = []
target_transform = []
if downscale is not None:
transform.append(transforms.Resize(orig_size // downscale))
target_transform.append(
transforms.Resize(orig_size // downscale,
interpolation=Image.NEAREST))
transform.extend([transforms.Resize(orig_size), net_transform])
target_transform.extend([transforms.Resize(orig_size, interpolation=Image.NEAREST),
to_tensor_raw])
transform = transforms.Compose(transform)
target_transform = transforms.Compose(target_transform)
return transform, target_transform
def get_transform(params, image_size, num_channels):
# Transforms for PIL Images: Gray <-> RGB
Gray2RGB = transforms.Lambda(lambda x: x.convert('RGB'))
RGB2Gray = transforms.Lambda(lambda x: x.convert('L'))
transform = []
# Does size request match original size?
if not image_size == params.image_size:
print('not appending transform')
transform.append(transforms.Resize(image_size))
else:
print('not appending transform')
# Does number of channels requested match original?
if not num_channels == params.num_channels:
if num_channels == 1:
transform.append(RGB2Gray)
elif num_channels == 3:
transform.append(Gray2RGB)
else:
print('NumChannels should be 1 or 3', num_channels)
raise Exception
transform += [transforms.ToTensor(),
transforms.Normalize((params.mean,), (params.std,))]
return transforms.Compose(transform)
def get_target_transform(params):
transform = params.target_transform
t_uniform = transforms.Lambda(lambda x: x[:,0]
if isinstance(x, (list, np.ndarray)) and len(x) == 2 else x)
if transform is None:
return t_uniform
else:
return transforms.Compose([transform, t_uniform])
class AddaDataset(data.Dataset):
def __init__(self, src_data, tgt_data):
self.src = src_data
self.tgt = tgt_data
def __getitem__(self, index):
ns = len(self.src)
nt = len(self.tgt)
xs, ys = self.src[index % ns]
xt, yt = self.tgt[index % nt]
return (xs, ys), (xt, yt)
def __len__(self):
return min(len(self.src), len(self.tgt))
data_params = {}
def register_data_params(name):
def decorator(cls):
data_params[name] = cls
return cls
return decorator
dataset_obj = {}
def register_dataset_obj(name):
def decorator(cls):
dataset_obj[name] = cls
return cls
return decorator
class DatasetParams(object):
"Class variables defined."
num_channels = 1
image_size = 16
mean = 0.1307
std = 0.3081
num_cls = 10
target_transform = None
def get_dataset(name, rootdir, dset, image_size, num_channels, download=True):
is_train = (dset == 'train')
params = data_params[name]
transform = get_transform(params, image_size, num_channels)
target_transform = get_target_transform(params)
return dataset_obj[name](rootdir, train=is_train, transform=transform,
target_transform=target_transform, download=download)
def get_fcn_dataset(name, rootdir, **kwargs):
return dataset_obj[name](rootdir, **kwargs)
| 33.746835 | 88 | 0.674794 |
84f9916e456b71ad319f79a0b277ac8227533160
| 29,086 |
py
|
Python
|
sympy/__init__.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/__init__.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/__init__.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
"""
SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as simple
as possible in order to be comprehensible and easily extensible. SymPy is
written entirely in Python. It depends on mpmath, and other external libraries
may be optionally for things like plotting support.
See the webpage for more information and documentation:
https://sympy.org
"""
import sys
if sys.version_info < (3, 7):
raise ImportError("Python version 3.7 or above is required for SymPy.")
del sys
try:
import mpmath
except ImportError:
raise ImportError("SymPy now depends on mpmath as an external library. "
"See https://docs.sympy.org/latest/install.html#mpmath for more information.")
del mpmath
from sympy.release import __version__
if 'dev' in __version__:
def enable_warnings():
import warnings
warnings.filterwarnings('default', '.*', DeprecationWarning, module='sympy.*')
del warnings
enable_warnings()
del enable_warnings
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug() # type: bool
from .core import (sympify, SympifyError, cacheit, Basic, Atom,
preorder_traversal, S, Expr, AtomicExpr, UnevaluatedExpr, Symbol,
Wild, Dummy, symbols, var, Number, Float, Rational, Integer,
NumberSymbol, RealNumber, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo,
AlgebraicNumber, comp, mod_inverse, Pow, integer_nthroot, integer_log,
Mul, prod, Add, Mod, Rel, Eq, Ne, Lt, Le, Gt, Ge, Equality,
GreaterThan, LessThan, Unequality, StrictGreaterThan, StrictLessThan,
vectorize, Lambda, WildFunction, Derivative, diff, FunctionClass,
Function, Subs, expand, PoleError, count_ops, expand_mul, expand_log,
expand_func, expand_trig, expand_complex, expand_multinomial, nfloat,
expand_power_base, expand_power_exp, arity, PrecisionExhausted, N,
evalf, Tuple, Dict, gcd_terms, factor_terms, factor_nc, evaluate,
Catalan, EulerGamma, GoldenRatio, TribonacciConstant, bottom_up, use,
postorder_traversal, default_sort_key, ordered)
from .logic import (to_cnf, to_dnf, to_nnf, And, Or, Not, Xor, Nand, Nor,
Implies, Equivalent, ITE, POSform, SOPform, simplify_logic, bool_map,
true, false, satisfiable)
from .assumptions import (AppliedPredicate, Predicate, AssumptionsContext,
assuming, Q, ask, register_handler, remove_handler, refine)
from .polys import (Poly, PurePoly, poly_from_expr, parallel_poly_from_expr,
degree, total_degree, degree_list, LC, LM, LT, pdiv, prem, pquo,
pexquo, div, rem, quo, exquo, half_gcdex, gcdex, invert,
subresultants, resultant, discriminant, cofactors, gcd_list, gcd,
lcm_list, lcm, terms_gcd, trunc, monic, content, primitive, compose,
decompose, sturm, gff_list, gff, sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor, intervals, refine_root, count_roots, real_roots,
nroots, ground_roots, nth_power_roots_poly, cancel, reduced, groebner,
is_zero_dimensional, GroebnerBasis, poly, symmetrize, horner,
interpolate, rational_interpolate, viete, together,
BasePolynomialError, ExactQuotientFailed, PolynomialDivisionFailed,
OperationNotSupported, HeuristicGCDFailed, HomomorphismFailed,
IsomorphismFailed, ExtraneousFactors, EvaluationFailed,
RefinementFailed, CoercionFailed, NotInvertible, NotReversible,
NotAlgebraic, DomainError, PolynomialError, UnificationFailed,
GeneratorsError, GeneratorsNeeded, ComputationFailed,
UnivariatePolynomialError, MultivariatePolynomialError,
PolificationFailed, OptionError, FlagError, minpoly,
minimal_polynomial, primitive_element, field_isomorphism,
to_number_field, isolate, round_two, prime_decomp, prime_valuation,
itermonomials, Monomial, lex, grlex,
grevlex, ilex, igrlex, igrevlex, CRootOf, rootof, RootOf,
ComplexRootOf, RootSum, roots, Domain, FiniteField, IntegerRing,
RationalField, RealField, ComplexField, PythonFiniteField,
GMPYFiniteField, PythonIntegerRing, GMPYIntegerRing, PythonRational,
GMPYRationalField, AlgebraicField, PolynomialRing, FractionField,
ExpressionDomain, FF_python, FF_gmpy, ZZ_python, ZZ_gmpy, QQ_python,
QQ_gmpy, GF, FF, ZZ, QQ, ZZ_I, QQ_I, RR, CC, EX, EXRAW,
construct_domain, swinnerton_dyer_poly, cyclotomic_poly,
symmetric_poly, random_poly, interpolating_poly, jacobi_poly,
chebyshevt_poly, chebyshevu_poly, hermite_poly, legendre_poly,
laguerre_poly, apart, apart_list, assemble_partfrac_list, Options,
ring, xring, vring, sring, field, xfield, vfield, sfield)
from .series import (Order, O, limit, Limit, gruntz, series, approximants,
residue, EmptySequence, SeqPer, SeqFormula, sequence, SeqAdd, SeqMul,
fourier_series, fps, difference_delta, limit_seq)
from .functions import (factorial, factorial2, rf, ff, binomial,
RisingFactorial, FallingFactorial, subfactorial, carmichael,
fibonacci, lucas, motzkin, tribonacci, harmonic, bernoulli, bell, euler,
catalan, genocchi, partition, sqrt, root, Min, Max, Id, real_root, Rem,
cbrt, re, im, sign, Abs, conjugate, arg, polar_lift,
periodic_argument, unbranched_argument, principal_branch, transpose,
adjoint, polarify, unpolarify, sin, cos, tan, sec, csc, cot, sinc,
asin, acos, atan, asec, acsc, acot, atan2, exp_polar, exp, ln, log,
LambertW, sinh, cosh, tanh, coth, sech, csch, asinh, acosh, atanh,
acoth, asech, acsch, floor, ceiling, frac, Piecewise, piecewise_fold,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv, Ei, expint, E1, li,
Li, Si, Ci, Shi, Chi, fresnels, fresnelc, gamma, lowergamma,
uppergamma, polygamma, loggamma, digamma, trigamma, multigamma,
dirichlet_eta, zeta, lerchphi, polylog, stieltjes, Eijk, LeviCivita,
KroneckerDelta, SingularityFunction, DiracDelta, Heaviside,
bspline_basis, bspline_basis_set, interpolating_spline, besselj,
bessely, besseli, besselk, hankel1, hankel2, jn, yn, jn_zeros, hn1,
hn2, airyai, airybi, airyaiprime, airybiprime, marcumq, hyper,
meijerg, appellf1, legendre, assoc_legendre, hermite, chebyshevt,
chebyshevu, chebyshevu_root, chebyshevt_root, laguerre,
assoc_laguerre, gegenbauer, jacobi, jacobi_normalized, Ynm, Ynm_c,
Znm, elliptic_k, elliptic_f, elliptic_e, elliptic_pi, beta, mathieus,
mathieuc, mathieusprime, mathieucprime, riemann_xi, betainc, betainc_regularized)
from .ntheory import (nextprime, prevprime, prime, primepi, primerange,
randprime, Sieve, sieve, primorial, cycle_length, composite,
compositepi, isprime, divisors, proper_divisors, factorint,
multiplicity, perfect_power, pollard_pm1, pollard_rho, primefactors,
totient, trailing, divisor_count, proper_divisor_count, divisor_sigma,
factorrat, reduced_totient, primenu, primeomega,
mersenne_prime_exponent, is_perfect, is_mersenne_prime, is_abundant,
is_deficient, is_amicable, abundance, npartitions, is_primitive_root,
is_quad_residue, legendre_symbol, jacobi_symbol, n_order, sqrt_mod,
quadratic_residues, primitive_root, nthroot_mod, is_nthpow_residue,
sqrt_mod_iter, mobius, discrete_log, quadratic_congruence,
binomial_coefficients, binomial_coefficients_list,
multinomial_coefficients, continued_fraction_periodic,
continued_fraction_iterator, continued_fraction_reduce,
continued_fraction_convergents, continued_fraction, egyptian_fraction)
from .concrete import product, Product, summation, Sum
from .discrete import (fft, ifft, ntt, intt, fwht, ifwht, mobius_transform,
inverse_mobius_transform, convolution, covering_product,
intersecting_product)
from .simplify import (simplify, hypersimp, hypersimilar, logcombine,
separatevars, posify, besselsimp, kroneckersimp, signsimp,
nsimplify, FU, fu, sqrtdenest, cse, epath, EPath, hyperexpand,
collect, rcollect, radsimp, collect_const, fraction, numer, denom,
trigsimp, exptrigsimp, powsimp, powdenest, combsimp, gammasimp,
ratsimp, ratsimpmodprime)
from .sets import (Set, Interval, Union, EmptySet, FiniteSet, ProductSet,
Intersection, DisjointUnion, imageset, Complement, SymmetricDifference, ImageSet,
Range, ComplexRegion, Complexes, Reals, Contains, ConditionSet, Ordinal,
OmegaPower, ord0, PowerSet, Naturals, Naturals0, UniversalSet,
Integers, Rationals)
from .solvers import (solve, solve_linear_system, solve_linear_system_LU,
solve_undetermined_coeffs, nsolve, solve_linear, checksol, det_quick,
inv_quick, check_assumptions, failing_assumptions, diophantine,
rsolve, rsolve_poly, rsolve_ratio, rsolve_hyper, checkodesol,
classify_ode, dsolve, homogeneous_order, solve_poly_system,
solve_triangulated, pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol, ode_order, reduce_inequalities,
reduce_abs_inequality, reduce_abs_inequalities, solve_poly_inequality,
solve_rational_inequalities, solve_univariate_inequality, decompogen,
solveset, linsolve, linear_eq_to_matrix, nonlinsolve, substitution)
from .matrices import (ShapeError, NonSquareMatrixError, GramSchmidt,
casoratian, diag, eye, hessian, jordan_cell, list2numpy, matrix2numpy,
matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2,
rot_axis3, symarray, wronskian, zeros, MutableDenseMatrix,
DeferredVector, MatrixBase, Matrix, MutableMatrix,
MutableSparseMatrix, banded, ImmutableDenseMatrix,
ImmutableSparseMatrix, ImmutableMatrix, SparseMatrix, MatrixSlice,
BlockDiagMatrix, BlockMatrix, FunctionMatrix, Identity, Inverse,
MatAdd, MatMul, MatPow, MatrixExpr, MatrixSymbol, Trace, Transpose,
ZeroMatrix, OneMatrix, blockcut, block_collapse, matrix_symbols,
Adjoint, hadamard_product, HadamardProduct, HadamardPower,
Determinant, det, diagonalize_vector, DiagMatrix, DiagonalMatrix,
DiagonalOf, trace, DotProduct, kronecker_product, KroneckerProduct,
PermutationMatrix, MatrixPermute, Permanent, per)
from .geometry import (Point, Point2D, Point3D, Line, Ray, Segment, Line2D,
Segment2D, Ray2D, Line3D, Segment3D, Ray3D, Plane, Ellipse, Circle,
Polygon, RegularPolygon, Triangle, rad, deg, are_similar, centroid,
convex_hull, idiff, intersection, closest_points, farthest_points,
GeometryError, Curve, Parabola)
from .utilities import (flatten, group, take, subsets, variations,
numbered_symbols, cartes, capture, dict_merge, prefixes, postfixes,
sift, topological_sort, unflatten, has_dups, has_variety, reshape,
rotations, filldedent, lambdify, source,
threaded, xthreaded, public, memoize_property, timed)
from .integrals import (integrate, Integral, line_integrate, mellin_transform,
inverse_mellin_transform, MellinTransform, InverseMellinTransform,
laplace_transform, inverse_laplace_transform, LaplaceTransform,
InverseLaplaceTransform, fourier_transform, inverse_fourier_transform,
FourierTransform, InverseFourierTransform, sine_transform,
inverse_sine_transform, SineTransform, InverseSineTransform,
cosine_transform, inverse_cosine_transform, CosineTransform,
InverseCosineTransform, hankel_transform, inverse_hankel_transform,
HankelTransform, InverseHankelTransform, singularityintegrate)
from .tensor import (IndexedBase, Idx, Indexed, get_contraction_structure,
get_indices, shape, MutableDenseNDimArray, ImmutableDenseNDimArray,
MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray,
tensorproduct, tensorcontraction, tensordiagonal, derive_by_array,
permutedims, Array, DenseNDimArray, SparseNDimArray)
from .parsing import parse_expr
from .calculus import (euler_equations, singularities, is_increasing,
is_strictly_increasing, is_decreasing, is_strictly_decreasing,
is_monotonic, finite_diff_weights, apply_finite_diff,
differentiate_finite, periodicity, not_empty_in, AccumBounds,
is_convex, stationary_points, minimum, maximum)
from .algebras import Quaternion
from .printing import (pager_print, pretty, pretty_print, pprint,
pprint_use_unicode, pprint_try_use_unicode, latex, print_latex,
multiline_latex, mathml, print_mathml, python, print_python, pycode,
ccode, print_ccode, glsl_code, print_glsl, cxxcode, fcode,
print_fcode, rcode, print_rcode, jscode, print_jscode, julia_code,
mathematica_code, octave_code, rust_code, print_gtk, preview, srepr,
print_tree, StrPrinter, sstr, sstrrepr, TableForm, dotprint,
maple_code, print_maple_code)
from .testing import test, doctest
# This module causes conflicts with other modules:
# from .stats import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit, plot_parametric
from .interactive import init_session, init_printing, interactive_traversal
evalf._create_evalf_table()
__all__ = [
# sympy.core
'sympify', 'SympifyError', 'cacheit', 'Basic', 'Atom',
'preorder_traversal', 'S', 'Expr', 'AtomicExpr', 'UnevaluatedExpr',
'Symbol', 'Wild', 'Dummy', 'symbols', 'var', 'Number', 'Float',
'Rational', 'Integer', 'NumberSymbol', 'RealNumber', 'igcd', 'ilcm',
'seterr', 'E', 'I', 'nan', 'oo', 'pi', 'zoo', 'AlgebraicNumber', 'comp',
'mod_inverse', 'Pow', 'integer_nthroot', 'integer_log', 'Mul', 'prod',
'Add', 'Mod', 'Rel', 'Eq', 'Ne', 'Lt', 'Le', 'Gt', 'Ge', 'Equality',
'GreaterThan', 'LessThan', 'Unequality', 'StrictGreaterThan',
'StrictLessThan', 'vectorize', 'Lambda', 'WildFunction', 'Derivative',
'diff', 'FunctionClass', 'Function', 'Subs', 'expand', 'PoleError',
'count_ops', 'expand_mul', 'expand_log', 'expand_func', 'expand_trig',
'expand_complex', 'expand_multinomial', 'nfloat', 'expand_power_base',
'expand_power_exp', 'arity', 'PrecisionExhausted', 'N', 'evalf', 'Tuple',
'Dict', 'gcd_terms', 'factor_terms', 'factor_nc', 'evaluate', 'Catalan',
'EulerGamma', 'GoldenRatio', 'TribonacciConstant', 'bottom_up', 'use',
'postorder_traversal', 'default_sort_key', 'ordered',
# sympy.logic
'to_cnf', 'to_dnf', 'to_nnf', 'And', 'Or', 'Not', 'Xor', 'Nand', 'Nor',
'Implies', 'Equivalent', 'ITE', 'POSform', 'SOPform', 'simplify_logic',
'bool_map', 'true', 'false', 'satisfiable',
# sympy.assumptions
'AppliedPredicate', 'Predicate', 'AssumptionsContext', 'assuming', 'Q',
'ask', 'register_handler', 'remove_handler', 'refine',
# sympy.polys
'Poly', 'PurePoly', 'poly_from_expr', 'parallel_poly_from_expr', 'degree',
'total_degree', 'degree_list', 'LC', 'LM', 'LT', 'pdiv', 'prem', 'pquo',
'pexquo', 'div', 'rem', 'quo', 'exquo', 'half_gcdex', 'gcdex', 'invert',
'subresultants', 'resultant', 'discriminant', 'cofactors', 'gcd_list',
'gcd', 'lcm_list', 'lcm', 'terms_gcd', 'trunc', 'monic', 'content',
'primitive', 'compose', 'decompose', 'sturm', 'gff_list', 'gff',
'sqf_norm', 'sqf_part', 'sqf_list', 'sqf', 'factor_list', 'factor',
'intervals', 'refine_root', 'count_roots', 'real_roots', 'nroots',
'ground_roots', 'nth_power_roots_poly', 'cancel', 'reduced', 'groebner',
'is_zero_dimensional', 'GroebnerBasis', 'poly', 'symmetrize', 'horner',
'interpolate', 'rational_interpolate', 'viete', 'together',
'BasePolynomialError', 'ExactQuotientFailed', 'PolynomialDivisionFailed',
'OperationNotSupported', 'HeuristicGCDFailed', 'HomomorphismFailed',
'IsomorphismFailed', 'ExtraneousFactors', 'EvaluationFailed',
'RefinementFailed', 'CoercionFailed', 'NotInvertible', 'NotReversible',
'NotAlgebraic', 'DomainError', 'PolynomialError', 'UnificationFailed',
'GeneratorsError', 'GeneratorsNeeded', 'ComputationFailed',
'UnivariatePolynomialError', 'MultivariatePolynomialError',
'PolificationFailed', 'OptionError', 'FlagError', 'minpoly',
'minimal_polynomial', 'primitive_element', 'field_isomorphism',
'to_number_field', 'isolate', 'round_two', 'prime_decomp',
'prime_valuation', 'itermonomials', 'Monomial', 'lex', 'grlex',
'grevlex', 'ilex', 'igrlex', 'igrevlex', 'CRootOf', 'rootof', 'RootOf',
'ComplexRootOf', 'RootSum', 'roots', 'Domain', 'FiniteField',
'IntegerRing', 'RationalField', 'RealField', 'ComplexField',
'PythonFiniteField', 'GMPYFiniteField', 'PythonIntegerRing',
'GMPYIntegerRing', 'PythonRational', 'GMPYRationalField',
'AlgebraicField', 'PolynomialRing', 'FractionField', 'ExpressionDomain',
'FF_python', 'FF_gmpy', 'ZZ_python', 'ZZ_gmpy', 'QQ_python', 'QQ_gmpy',
'GF', 'FF', 'ZZ', 'QQ', 'ZZ_I', 'QQ_I', 'RR', 'CC', 'EX', 'EXRAW',
'construct_domain', 'swinnerton_dyer_poly', 'cyclotomic_poly',
'symmetric_poly', 'random_poly', 'interpolating_poly', 'jacobi_poly',
'chebyshevt_poly', 'chebyshevu_poly', 'hermite_poly', 'legendre_poly',
'laguerre_poly', 'apart', 'apart_list', 'assemble_partfrac_list',
'Options', 'ring', 'xring', 'vring', 'sring', 'field', 'xfield', 'vfield',
'sfield',
# sympy.series
'Order', 'O', 'limit', 'Limit', 'gruntz', 'series', 'approximants',
'residue', 'EmptySequence', 'SeqPer', 'SeqFormula', 'sequence', 'SeqAdd',
'SeqMul', 'fourier_series', 'fps', 'difference_delta', 'limit_seq',
# sympy.functions
'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial',
'FallingFactorial', 'subfactorial', 'carmichael', 'fibonacci', 'lucas',
'motzkin', 'tribonacci', 'harmonic', 'bernoulli', 'bell', 'euler', 'catalan',
'genocchi', 'partition', 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'Rem',
'cbrt', 're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
'transpose', 'adjoint', 'polarify', 'unpolarify', 'sin', 'cos', 'tan',
'sec', 'csc', 'cot', 'sinc', 'asin', 'acos', 'atan', 'asec', 'acsc',
'acot', 'atan2', 'exp_polar', 'exp', 'ln', 'log', 'LambertW', 'sinh',
'cosh', 'tanh', 'coth', 'sech', 'csch', 'asinh', 'acosh', 'atanh',
'acoth', 'asech', 'acsch', 'floor', 'ceiling', 'frac', 'Piecewise',
'piecewise_fold', 'erf', 'erfc', 'erfi', 'erf2', 'erfinv', 'erfcinv',
'erf2inv', 'Ei', 'expint', 'E1', 'li', 'Li', 'Si', 'Ci', 'Shi', 'Chi',
'fresnels', 'fresnelc', 'gamma', 'lowergamma', 'uppergamma', 'polygamma',
'loggamma', 'digamma', 'trigamma', 'multigamma', 'dirichlet_eta', 'zeta',
'lerchphi', 'polylog', 'stieltjes', 'Eijk', 'LeviCivita',
'KroneckerDelta', 'SingularityFunction', 'DiracDelta', 'Heaviside',
'bspline_basis', 'bspline_basis_set', 'interpolating_spline', 'besselj',
'bessely', 'besseli', 'besselk', 'hankel1', 'hankel2', 'jn', 'yn',
'jn_zeros', 'hn1', 'hn2', 'airyai', 'airybi', 'airyaiprime',
'airybiprime', 'marcumq', 'hyper', 'meijerg', 'appellf1', 'legendre',
'assoc_legendre', 'hermite', 'chebyshevt', 'chebyshevu',
'chebyshevu_root', 'chebyshevt_root', 'laguerre', 'assoc_laguerre',
'gegenbauer', 'jacobi', 'jacobi_normalized', 'Ynm', 'Ynm_c', 'Znm',
'elliptic_k', 'elliptic_f', 'elliptic_e', 'elliptic_pi', 'beta',
'mathieus', 'mathieuc', 'mathieusprime', 'mathieucprime', 'riemann_xi','betainc',
'betainc_regularized',
# sympy.ntheory
'nextprime', 'prevprime', 'prime', 'primepi', 'primerange', 'randprime',
'Sieve', 'sieve', 'primorial', 'cycle_length', 'composite', 'compositepi',
'isprime', 'divisors', 'proper_divisors', 'factorint', 'multiplicity',
'perfect_power', 'pollard_pm1', 'pollard_rho', 'primefactors', 'totient',
'trailing', 'divisor_count', 'proper_divisor_count', 'divisor_sigma',
'factorrat', 'reduced_totient', 'primenu', 'primeomega',
'mersenne_prime_exponent', 'is_perfect', 'is_mersenne_prime',
'is_abundant', 'is_deficient', 'is_amicable', 'abundance', 'npartitions',
'is_primitive_root', 'is_quad_residue', 'legendre_symbol',
'jacobi_symbol', 'n_order', 'sqrt_mod', 'quadratic_residues',
'primitive_root', 'nthroot_mod', 'is_nthpow_residue', 'sqrt_mod_iter',
'mobius', 'discrete_log', 'quadratic_congruence', 'binomial_coefficients',
'binomial_coefficients_list', 'multinomial_coefficients',
'continued_fraction_periodic', 'continued_fraction_iterator',
'continued_fraction_reduce', 'continued_fraction_convergents',
'continued_fraction', 'egyptian_fraction',
# sympy.concrete
'product', 'Product', 'summation', 'Sum',
# sympy.discrete
'fft', 'ifft', 'ntt', 'intt', 'fwht', 'ifwht', 'mobius_transform',
'inverse_mobius_transform', 'convolution', 'covering_product',
'intersecting_product',
# sympy.simplify
'simplify', 'hypersimp', 'hypersimilar', 'logcombine', 'separatevars',
'posify', 'besselsimp', 'kroneckersimp', 'signsimp',
'nsimplify', 'FU', 'fu', 'sqrtdenest', 'cse', 'epath', 'EPath',
'hyperexpand', 'collect', 'rcollect', 'radsimp', 'collect_const',
'fraction', 'numer', 'denom', 'trigsimp', 'exptrigsimp', 'powsimp',
'powdenest', 'combsimp', 'gammasimp', 'ratsimp', 'ratsimpmodprime',
# sympy.sets
'Set', 'Interval', 'Union', 'EmptySet', 'FiniteSet', 'ProductSet',
'Intersection', 'imageset', 'DisjointUnion', 'Complement', 'SymmetricDifference',
'ImageSet', 'Range', 'ComplexRegion', 'Reals', 'Contains', 'ConditionSet',
'Ordinal', 'OmegaPower', 'ord0', 'PowerSet', 'Naturals',
'Naturals0', 'UniversalSet', 'Integers', 'Rationals', 'Complexes',
# sympy.solvers
'solve', 'solve_linear_system', 'solve_linear_system_LU',
'solve_undetermined_coeffs', 'nsolve', 'solve_linear', 'checksol',
'det_quick', 'inv_quick', 'check_assumptions', 'failing_assumptions',
'diophantine', 'rsolve', 'rsolve_poly', 'rsolve_ratio', 'rsolve_hyper',
'checkodesol', 'classify_ode', 'dsolve', 'homogeneous_order',
'solve_poly_system', 'solve_triangulated', 'pde_separate',
'pde_separate_add', 'pde_separate_mul', 'pdsolve', 'classify_pde',
'checkpdesol', 'ode_order', 'reduce_inequalities',
'reduce_abs_inequality', 'reduce_abs_inequalities',
'solve_poly_inequality', 'solve_rational_inequalities',
'solve_univariate_inequality', 'decompogen', 'solveset', 'linsolve',
'linear_eq_to_matrix', 'nonlinsolve', 'substitution',
# sympy.matrices
'ShapeError', 'NonSquareMatrixError', 'GramSchmidt', 'casoratian', 'diag',
'eye', 'hessian', 'jordan_cell', 'list2numpy', 'matrix2numpy',
'matrix_multiply_elementwise', 'ones', 'randMatrix', 'rot_axis1',
'rot_axis2', 'rot_axis3', 'symarray', 'wronskian', 'zeros',
'MutableDenseMatrix', 'DeferredVector', 'MatrixBase', 'Matrix',
'MutableMatrix', 'MutableSparseMatrix', 'banded', 'ImmutableDenseMatrix',
'ImmutableSparseMatrix', 'ImmutableMatrix', 'SparseMatrix', 'MatrixSlice',
'BlockDiagMatrix', 'BlockMatrix', 'FunctionMatrix', 'Identity', 'Inverse',
'MatAdd', 'MatMul', 'MatPow', 'MatrixExpr', 'MatrixSymbol', 'Trace',
'Transpose', 'ZeroMatrix', 'OneMatrix', 'blockcut', 'block_collapse',
'matrix_symbols', 'Adjoint', 'hadamard_product', 'HadamardProduct',
'HadamardPower', 'Determinant', 'det', 'diagonalize_vector', 'DiagMatrix',
'DiagonalMatrix', 'DiagonalOf', 'trace', 'DotProduct',
'kronecker_product', 'KroneckerProduct', 'PermutationMatrix',
'MatrixPermute', 'Permanent', 'per',
# sympy.geometry
'Point', 'Point2D', 'Point3D', 'Line', 'Ray', 'Segment', 'Line2D',
'Segment2D', 'Ray2D', 'Line3D', 'Segment3D', 'Ray3D', 'Plane', 'Ellipse',
'Circle', 'Polygon', 'RegularPolygon', 'Triangle', 'rad', 'deg',
'are_similar', 'centroid', 'convex_hull', 'idiff', 'intersection',
'closest_points', 'farthest_points', 'GeometryError', 'Curve', 'Parabola',
# sympy.utilities
'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols',
'cartes', 'capture', 'dict_merge', 'prefixes', 'postfixes', 'sift',
'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape',
'rotations', 'filldedent', 'lambdify', 'source', 'threaded', 'xthreaded',
'public', 'memoize_property', 'timed',
# sympy.integrals
'integrate', 'Integral', 'line_integrate', 'mellin_transform',
'inverse_mellin_transform', 'MellinTransform', 'InverseMellinTransform',
'laplace_transform', 'inverse_laplace_transform', 'LaplaceTransform',
'InverseLaplaceTransform', 'fourier_transform',
'inverse_fourier_transform', 'FourierTransform',
'InverseFourierTransform', 'sine_transform', 'inverse_sine_transform',
'SineTransform', 'InverseSineTransform', 'cosine_transform',
'inverse_cosine_transform', 'CosineTransform', 'InverseCosineTransform',
'hankel_transform', 'inverse_hankel_transform', 'HankelTransform',
'InverseHankelTransform', 'singularityintegrate',
# sympy.tensor
'IndexedBase', 'Idx', 'Indexed', 'get_contraction_structure',
'get_indices', 'shape', 'MutableDenseNDimArray', 'ImmutableDenseNDimArray',
'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray',
'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array',
'permutedims', 'Array', 'DenseNDimArray', 'SparseNDimArray',
# sympy.parsing
'parse_expr',
# sympy.calculus
'euler_equations', 'singularities', 'is_increasing',
'is_strictly_increasing', 'is_decreasing', 'is_strictly_decreasing',
'is_monotonic', 'finite_diff_weights', 'apply_finite_diff',
'differentiate_finite', 'periodicity', 'not_empty_in',
'AccumBounds', 'is_convex', 'stationary_points', 'minimum', 'maximum',
# sympy.algebras
'Quaternion',
# sympy.printing
'pager_print', 'pretty', 'pretty_print', 'pprint', 'pprint_use_unicode',
'pprint_try_use_unicode', 'latex', 'print_latex', 'multiline_latex',
'mathml', 'print_mathml', 'python', 'print_python', 'pycode', 'ccode',
'print_ccode', 'glsl_code', 'print_glsl', 'cxxcode', 'fcode',
'print_fcode', 'rcode', 'print_rcode', 'jscode', 'print_jscode',
'julia_code', 'mathematica_code', 'octave_code', 'rust_code', 'print_gtk',
'preview', 'srepr', 'print_tree', 'StrPrinter', 'sstr', 'sstrrepr',
'TableForm', 'dotprint', 'maple_code', 'print_maple_code',
# sympy.plotting
'plot', 'textplot', 'plot_backends', 'plot_implicit', 'plot_parametric',
# sympy.interactive
'init_session', 'init_printing', 'interactive_traversal',
# sympy.testing
'test', 'doctest',
]
#===========================================================================#
# #
# XXX: The names below were importable before SymPy 1.6 using #
# #
# from sympy import * #
# #
# This happened implicitly because there was no __all__ defined in this #
# __init__.py file. Not every package is imported. The list matches what #
# would have been imported before. It is possible that these packages will #
# not be imported by a star-import from sympy in future. #
# #
#===========================================================================#
__all__.extend((
'algebras',
'assumptions',
'calculus',
'concrete',
'discrete',
'external',
'functions',
'geometry',
'interactive',
'multipledispatch',
'ntheory',
'parsing',
'plotting',
'polys',
'printing',
'release',
'strategies',
'tensor',
'utilities',
))
| 54.879245 | 91 | 0.675789 |
f36915faba08a1ee5f120c5b1f0aa369c0ba0a0f
| 62 |
py
|
Python
|
.history/postImages/index_20201005142231.py
|
Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE
|
9a8289d8550115362c46dea3ed8570b789c09a10
|
[
"MIT"
] | 2 |
2020-10-21T22:14:15.000Z
|
2020-10-21T22:14:16.000Z
|
.history/postImages/index_20201005142231.py
|
Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE
|
9a8289d8550115362c46dea3ed8570b789c09a10
|
[
"MIT"
] | null | null | null |
.history/postImages/index_20201005142231.py
|
Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE
|
9a8289d8550115362c46dea3ed8570b789c09a10
|
[
"MIT"
] | null | null | null |
import pandas as pd
df = pd.read_excel("bridgeData2.xlsx")
| 12.4 | 38 | 0.725806 |
0ffad7247f063b37292363f378b5bcb87dc48c33
| 1,352 |
py
|
Python
|
src/target.py
|
agoswam/Simulator
|
3507b697aab134022cfab61b6231979795bb9115
|
[
"MIT"
] | 2 |
2018-04-01T23:26:31.000Z
|
2020-04-24T17:12:00.000Z
|
src/target.py
|
agoswam/Simulator
|
3507b697aab134022cfab61b6231979795bb9115
|
[
"MIT"
] | null | null | null |
src/target.py
|
agoswam/Simulator
|
3507b697aab134022cfab61b6231979795bb9115
|
[
"MIT"
] | 2 |
2022-02-18T12:13:12.000Z
|
2022-03-13T13:07:19.000Z
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------\n
# Author: Angshuman Goswami, Graduate Student, Clemson University
# Date Create: 20/5/2016, Last Modified: 20/5/2016 \n
# --------------------------------------------------------------------------------------\n
import rospy
import time
import os
import math
from rospy import init_node, Subscriber, Publisher, get_param
from rospy import Rate, is_shutdown, ROSInterruptException, spin, on_shutdown
from rospy_tutorials.msg import Floats
from rospy.numpy_msg import numpy_msg
from numpy import pi
from geometry_msgs.msg import Vector3
Target_X = 22
Target_Y = 0.1
def target():
# initialize node
global Target_X, Target_X
rospy.init_node('target', anonymous=True)
state_pub = rospy.Publisher('target', Vector3, queue_size = 10)
target_XY = Vector3(Target_X, Target_Y,0)
# set node rate
loop_rate = 20
dt = 1.0 / loop_rate
rate = rospy.Rate(loop_rate)
t0 = time.time()
while not rospy.is_shutdown():
state_pub.publish(target_XY)
rate.sleep()
if __name__=='__main__':
try:
target()
except rospy.ROSInterruptException:
pass
| 26.509804 | 90 | 0.553994 |
59fedc7421e9567d2bddf07c2afe51b2c3399aac
| 10,215 |
py
|
Python
|
venv/Lib/site-packages/sklearn/linear_model/tests/test_theil_sen.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | 2 |
2021-05-02T07:59:56.000Z
|
2021-12-14T19:53:13.000Z
|
venv/Lib/site-packages/sklearn/linear_model/tests/test_theil_sen.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | 7 |
2021-06-08T21:46:24.000Z
|
2022-03-12T00:35:31.000Z
|
my_env/Lib/site-packages/sklearn/linear_model/tests/test_theil_sen.py
|
obulrdy6881/Drowsinss
|
61cb9281d7dd22aee282b517e2fbf500f0ff9935
|
[
"MIT"
] | 1 |
2021-05-02T07:59:59.000Z
|
2021-05-02T07:59:59.000Z
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model._theil_sen import _modified_weiszfeld_step
from sklearn.utils._testing import assert_almost_equal, assert_raises
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert np.abs(lstq.coef_ - w) > 0.9
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert np.abs(lstq.coef_ - w - c) > 0.5
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.e-6
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(max_subpopulation=-1, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(n_subsamples=1, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(n_subsamples=101, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
assert_raises(ValueError, theil_sen.fit, X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=2,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| 36.223404 | 80 | 0.648556 |
3bafd4ac8dce53a61bc5f969df68d84407e7f6df
| 22,480 |
py
|
Python
|
scipy/optimize/_linprog.py
|
gitter-badger/scipy
|
0d10fea581d5044bbecc8b4fbe8c11fc102f6592
|
[
"BSD-3-Clause"
] | 1 |
2019-07-14T23:22:16.000Z
|
2019-07-14T23:22:16.000Z
|
scipy/optimize/_linprog.py
|
gitter-badger/scipy
|
0d10fea581d5044bbecc8b4fbe8c11fc102f6592
|
[
"BSD-3-Clause"
] | 1 |
2021-06-25T15:36:38.000Z
|
2021-06-25T15:36:38.000Z
|
scipy/optimize/_linprog.py
|
gitter-badger/scipy
|
0d10fea581d5044bbecc8b4fbe8c11fc102f6592
|
[
"BSD-3-Clause"
] | 1 |
2019-07-21T15:59:29.000Z
|
2019-07-21T15:59:29.000Z
|
"""
A top-level linear programming interface. Currently this interface solves
linear programming problems via the Simplex and Interior-Point methods.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .optimize import OptimizeResult, OptimizeWarning
from warnings import warn
from ._linprog_ip import _linprog_ip
from ._linprog_simplex import _linprog_simplex
from ._linprog_rs import _linprog_rs
from ._linprog_util import (
_parse_linprog, _presolve, _get_Abc, _postprocess
)
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
x = res['x']
fun = res['fun']
phase = res['phase']
status = res['status']
nit = res['nit']
message = res['message']
complete = res['complete']
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float': lambda x: "{0: 12.4f}".format(x)})
if status:
print('--------- Simplex Early Exit -------\n'.format(nit))
print('The simplex method exited early with status {0:d}'.format(status))
print(message)
elif complete:
print('--------- Simplex Complete --------\n')
print('Iterations required: {}'.format(nit))
else:
print('--------- Iteration {0:d} ---------\n'.format(nit))
if nit > 0:
if phase == 1:
print('Current Pseudo-Objective Value:')
else:
print('Current Objective Value:')
print('f = ', fun)
print()
print('Current Solution Vector:')
print('x = ', x)
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
nit = res['nit']
x = res['x']
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(x)
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
options=None, x0=None):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Informally, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : {'interior-point', 'revised simplex', 'simplex'}, optional
The algorithm used to solve the standard form problem.
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are supported.
callback : callable, optional
If a callback function is provided, it will be called at least once per
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The current solution vector.
fun : float
The current value of the objective function ``c @ x``.
success : bool
``True`` when the algorithm has completed successfully.
slack : 1D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the algorithm.
``0`` : Optimization proceeding nominally.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The current iteration number.
message : str
A string descriptor of the algorithm status.
options : dict, optional
A dictionary of solver options. All methods accept the following
options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to ``True`` to print convergence messages.
For method-specific options, see
:func:`show_options('linprog') <show_options>`.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The total number of iterations performed in all phases.
message : str
A string descriptor of the exit status of the algorithm.
See Also
--------
show_options : Additional options accepted by the solvers.
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter.
:ref:`'interior-point' <optimize.linprog-interior-point>` is the default
as it is typically the fastest and most robust method.
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more
accurate for the problems it solves.
:ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is
included for backwards compatibility and educational purposes.
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
Method *revised simplex* uses the revised simplex method as decribed in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
.. versionadded:: 1.3.0
Method *simplex* uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
Nelder-Mead simplex). This algorithm is included for backwards
compatibility and educational purposes.
.. versionadded:: 0.15.0
Before applying any method, a presolve procedure based on [8]_ attempts
to identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g. a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if it is
important to know whether the problem is actually infeasible, solve the
problem again with option ``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
The selected algorithm solves the standard form problem, and a
postprocessing routine converts this to a solution to the original problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
Examples
--------
Consider the following problem:
.. math::
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
-x_0 - 2x_1 & \geq -4,\\
x_1 & \geq -3.
The problem is not presented in the form accepted by `linprog`. This is
easily remedied by converting the "greater than" inequality
constraint to a "less than" inequality constraint by
multiplying both sides by a factor of :math:`-1`. Note also that the last
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
Finally, since there are no bounds on :math:`x_0`, we must explicitly
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
default is for variables to be non-negative. After collecting coeffecients
into arrays and tuples, the input for this problem is:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
Note that the default method for `linprog` is 'interior-point', which is
approximate by nature.
>>> print(res)
con: array([], dtype=float64)
fun: -21.99999984082494 # may vary
message: 'Optimization terminated successfully.'
nit: 6 # may vary
slack: array([3.89999997e+01, 8.46872439e-08] # may vary
status: 0
success: True
x: array([ 9.99999989, -2.99999999]) # may vary
If you need greater accuracy, try 'revised simplex'.
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex')
>>> print(res)
con: array([], dtype=float64)
fun: -22.0 # may vary
message: 'Optimization terminated successfully.'
nit: 1 # may vary
slack: array([39., 0.]) # may vary
status: 0
success: True
x: array([10., -3.]) # may vary
"""
meth = method.lower()
if x0 is not None and meth != "revised simplex":
warning_message = "x0 is used only when method is 'revised simplex'. "
warn(warning_message, OptimizeWarning)
c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options, x0 = _parse_linprog(
c, A_ub, b_ub, A_eq, b_eq, bounds, options, x0)
tol = solver_options.get('tol', 1e-9)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o = c.copy(
), A_ub.copy(), b_ub.copy(), A_eq.copy(), b_eq.copy()
# Solve trivial problem, eliminate variables, tighten bounds, etc...
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
rr = solver_options.pop('rr', True)
(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, x0, undo, complete, status,
message) = _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, rr, tol)
if not complete:
A, b, c, c0, x0 = _get_Abc(c, c0, A_ub, b_ub, A_eq,
b_eq, bounds, x0, undo)
T_o = (c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, undo)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options)
elif meth == 'revised simplex':
x, status, message, iteration = _linprog_rs(
c, c0=c0, A=A, b=b, x0=x0, callback=callback, _T_o=T_o, **solver_options)
else:
raise ValueError('Unknown solver %s' % method)
# Eliminate artificial variables, re-introduce presolved variables, etc...
# need modified bounds here to translate variables appropriately
disp = solver_options.get('disp', False)
x, fun, slack, con, status, message = _postprocess(
x, c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds,
complete, undo, status, message, tol, iteration, disp)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
| 40.650995 | 143 | 0.634342 |
b4dedc3acd601780663bee854729475c458d315c
| 818 |
py
|
Python
|
presto/migrations/0056_auto_20180816_0948.py
|
pwgbots/presto
|
49622bdfb6c597c3451d9f5c589d9a1797143d4f
|
[
"MIT"
] | null | null | null |
presto/migrations/0056_auto_20180816_0948.py
|
pwgbots/presto
|
49622bdfb6c597c3451d9f5c589d9a1797143d4f
|
[
"MIT"
] | 8 |
2020-07-28T07:59:09.000Z
|
2022-03-11T23:43:01.000Z
|
presto/migrations/0056_auto_20180816_0948.py
|
pwgbots/presto
|
49622bdfb6c597c3451d9f5c589d9a1797143d4f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-16 07:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('presto', '0055_auto_20180815_2208'),
]
operations = [
migrations.AddField(
model_name='queuepicture',
name='mail_body',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='queuepicture',
name='mail_subject',
field=models.CharField(blank=True, default='', max_length=256),
),
migrations.AddField(
model_name='queuepicture',
name='suppressed',
field=models.BooleanField(default=False),
),
]
| 26.387097 | 75 | 0.589242 |
3cf10124766b687e8713c783e217653c4f0301ba
| 7,564 |
py
|
Python
|
test/tensorflow_test.py
|
delding/ray
|
8532ba4272556aa24b5e0c7d275c7b383815c022
|
[
"Apache-2.0"
] | null | null | null |
test/tensorflow_test.py
|
delding/ray
|
8532ba4272556aa24b5e0c7d275c7b383815c022
|
[
"Apache-2.0"
] | null | null | null |
test/tensorflow_test.py
|
delding/ray
|
8532ba4272556aa24b5e0c7d275c7b383815c022
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy.testing import assert_almost_equal
import tensorflow as tf
import unittest
import ray
def make_linear_network(w_name=None, b_name=None):
# Define the inputs.
x_data = tf.placeholder(tf.float32, shape=[100])
y_data = tf.placeholder(tf.float32, shape=[100])
# Define the weights and computation.
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name=w_name)
b = tf.Variable(tf.zeros([1]), name=b_name)
y = w * x_data + b
# Return the loss and weight initializer.
return (tf.reduce_mean(tf.square(y - y_data)),
tf.global_variables_initializer(), x_data, y_data)
class NetActor(object):
def __init__(self):
# Uses a separate graph for each network.
with tf.Graph().as_default():
# Create the network.
loss, init, _, _ = make_linear_network()
sess = tf.Session()
# Additional code for setting and getting the weights.
variables = ray.experimental.TensorFlowVariables(loss, sess)
# Return all of the data needed to use the network.
self.values = [variables, init, sess]
sess.run(init)
def set_and_get_weights(self, weights):
self.values[0].set_weights(weights)
return self.values[0].get_weights()
def get_weights(self):
return self.values[0].get_weights()
class TrainActor(object):
def __init__(self):
# Almost the same as above, but now returns the placeholders and gradient.
with tf.Graph().as_default():
loss, init, x_data, y_data = make_linear_network()
sess = tf.Session()
variables = ray.experimental.TensorFlowVariables(loss, sess)
optimizer = tf.train.GradientDescentOptimizer(0.9)
grads = optimizer.compute_gradients(loss)
train = optimizer.apply_gradients(grads)
self.values = [loss, variables, init, sess, grads, train, [x_data, y_data]]
sess.run(init)
def training_step(self, weights):
_, variables, _, sess, grads, _, placeholders = self.values
variables.set_weights(weights)
return sess.run([grad[0] for grad in grads],
feed_dict=dict(zip(placeholders, [[1] * 100, [2] * 100])))
def get_weights(self):
return self.values[1].get_weights()
class TensorFlowTest(unittest.TestCase):
def testTensorFlowVariables(self):
ray.init(num_workers=2)
sess = tf.Session()
loss, init, _, _ = make_linear_network()
sess.run(init)
variables = ray.experimental.TensorFlowVariables(loss, sess)
weights = variables.get_weights()
for (name, val) in weights.items():
weights[name] += 1.0
variables.set_weights(weights)
self.assertEqual(weights, variables.get_weights())
loss2, init2, _, _ = make_linear_network("w", "b")
sess.run(init2)
variables2 = ray.experimental.TensorFlowVariables(loss2, sess)
weights2 = variables2.get_weights()
for (name, val) in weights2.items():
weights2[name] += 2.0
variables2.set_weights(weights2)
self.assertEqual(weights2, variables2.get_weights())
flat_weights = variables2.get_flat() + 2.0
variables2.set_flat(flat_weights)
assert_almost_equal(flat_weights, variables2.get_flat())
variables3 = ray.experimental.TensorFlowVariables(loss2)
self.assertEqual(variables3.sess, None)
sess = tf.Session()
variables3.set_session(sess)
self.assertEqual(variables3.sess, sess)
ray.worker.cleanup()
# Test that the variable names for the two different nets are not
# modified by TensorFlow to be unique (i.e. they should already
# be unique because of the variable prefix).
def testVariableNameCollision(self):
ray.init(num_workers=2)
net1 = NetActor()
net2 = NetActor()
# This is checking that the variable names of the two nets are the same,
# i.e. that the names in the weight dictionaries are the same
net1.values[0].set_weights(net2.values[0].get_weights())
ray.worker.cleanup()
# Test that different networks on the same worker are independent and
# we can get/set their weights without any interaction.
def testNetworksIndependent(self):
# Note we use only one worker to ensure that all of the remote functions
# run on the same worker.
ray.init(num_workers=1)
net1 = NetActor()
net2 = NetActor()
# Make sure the two networks have different weights. TODO(rkn): Note that
# equality comparisons of numpy arrays normally does not work. This only
# works because at the moment they have size 1.
weights1 = net1.get_weights()
weights2 = net2.get_weights()
self.assertNotEqual(weights1, weights2)
# Set the weights and get the weights, and make sure they are unchanged.
new_weights1 = net1.set_and_get_weights(weights1)
new_weights2 = net2.set_and_get_weights(weights2)
self.assertEqual(weights1, new_weights1)
self.assertEqual(weights2, new_weights2)
# Swap the weights.
new_weights1 = net2.set_and_get_weights(weights1)
new_weights2 = net1.set_and_get_weights(weights2)
self.assertEqual(weights1, new_weights1)
self.assertEqual(weights2, new_weights2)
ray.worker.cleanup()
# This test creates an additional network on the driver so that the
# tensorflow variables on the driver and the worker differ.
def testNetworkDriverWorkerIndependent(self):
ray.init(num_workers=1)
# Create a network on the driver locally.
sess1 = tf.Session()
loss1, init1, _, _ = make_linear_network()
ray.experimental.TensorFlowVariables(loss1, sess1)
sess1.run(init1)
net2 = ray.actor(NetActor)()
weights2 = ray.get(net2.get_weights())
new_weights2 = ray.get(net2.set_and_get_weights(net2.get_weights()))
self.assertEqual(weights2, new_weights2)
ray.worker.cleanup()
def testVariablesControlDependencies(self):
ray.init(num_workers=1)
# Creates a network and appends a momentum optimizer.
sess = tf.Session()
loss, init, _, _ = make_linear_network()
minimizer = tf.train.MomentumOptimizer(0.9, 0.9).minimize(loss)
net_vars = ray.experimental.TensorFlowVariables(minimizer, sess)
sess.run(init)
# Tests if all variables are properly retrieved, 2 variables and 2 momentum
# variables.
self.assertEqual(len(net_vars.variables.items()), 4)
ray.worker.cleanup()
def testRemoteTrainingStep(self):
ray.init(num_workers=1)
net = ray.actor(TrainActor)()
ray.get(net.training_step(net.get_weights()))
ray.worker.cleanup()
def testRemoteTrainingLoss(self):
ray.init(num_workers=2)
net = ray.actor(TrainActor)()
loss, variables, _, sess, grads, train, placeholders = TrainActor().values
before_acc = sess.run(loss, feed_dict=dict(zip(placeholders,
[[2] * 100, [4] * 100])))
for _ in range(3):
gradients_list = ray.get([net.training_step(variables.get_weights())
for _ in range(2)])
mean_grads = [sum([gradients[i] for gradients in gradients_list]) /
len(gradients_list) for i in range(len(gradients_list[0]))]
feed_dict = {grad[0]: mean_grad for (grad, mean_grad)
in zip(grads, mean_grads)}
sess.run(train, feed_dict=feed_dict)
after_acc = sess.run(loss, feed_dict=dict(zip(placeholders,
[[2] * 100, [4] * 100])))
self.assertTrue(before_acc < after_acc)
ray.worker.cleanup()
if __name__ == "__main__":
unittest.main(verbosity=2)
| 33.469027 | 79 | 0.691433 |
df932a1d318345b8235882775e0cd92939917f5c
| 1,968 |
py
|
Python
|
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | 10 |
2021-02-09T19:25:46.000Z
|
2022-03-29T13:49:23.000Z
|
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | null | null | null |
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | 5 |
2020-12-23T15:24:40.000Z
|
2022-01-06T09:42:38.000Z
|
#! /usr/bin/env python
from .toy_problem_test import ToyProblemTest
from .reconciliation_problem_test import ReconciliationProblemTest
from .reconciliation_problem_2_test import ReconciliationProblem2Test
from .recon3_test import Recon3Test
from .optgapc1_test import OptGapC1Test
from .optgapc2_test import OptGapC2Test
from .optgapc3_test import OptGapC3Test
from .optgap4_test import OptGap4Test
from .single_edge_b import SingleEdgeBTest
from .feasibility_test import FeasibilityTest
from .flow_path_construction_test import FlowPathConstructionTest
from .we_need_to_fix_this_test import WeNeedToFixThisTest
from .abstract_test import bcolors
import argparse
ALL_TESTS = [ToyProblemTest(), ReconciliationProblemTest(),
ReconciliationProblem2Test(), Recon3Test(), OptGapC1Test(),
OptGapC2Test(), OptGapC3Test(), FeasibilityTest(),
OptGap4Test(), FlowPathConstructionTest(), WeNeedToFixThisTest(),
SingleEdgeBTest()]
TEST_NAME_DICT = {test.name: test for test in ALL_TESTS}
def run_tests(tests_to_run):
tests_that_failed = []
for test in tests_to_run:
print('\n\n---{} TEST---\n\n'.format(test.name.upper()))
test.run()
if test.has_error:
tests_that_failed.append(test)
for test in tests_that_failed:
print()
print(bcolors.ERROR + '\n\n---{} TEST failed---\n\n'.format(test.name.upper()) + bcolors.ENDC)
if len(tests_that_failed) == 0:
print(bcolors.OKGREEN + 'All tests passed!' + bcolors.ENDC)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tests', nargs='+', required=False)
args = parser.parse_args()
if args.tests is not None:
tests_to_run = [TEST_NAME_DICT[name] for name in args.tests]
else:
tests_to_run = ALL_TESTS
print('RUNNING THE FOLLOWING TESTS: {}'.format(
[test.name for test in tests_to_run]))
run_tests(tests_to_run)
| 33.931034 | 102 | 0.723069 |
d1a50a3f9e7436229e04040cd893d47631ec5b01
| 5,033 |
py
|
Python
|
src/lib/detectors/base_detector.py
|
hieubkvn123/CenterNet
|
438c1e8d0424122ece353bb20e64ff51f9444b6f
|
[
"MIT"
] | null | null | null |
src/lib/detectors/base_detector.py
|
hieubkvn123/CenterNet
|
438c1e8d0424122ece353bb20e64ff51f9444b6f
|
[
"MIT"
] | null | null | null |
src/lib/detectors/base_detector.py
|
hieubkvn123/CenterNet
|
438c1e8d0424122ece353bb20e64ff51f9444b6f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import time
import torch
from models.model import create_model, load_model
from utils.image import get_affine_transform
from utils.debugger import Debugger
class BaseDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.max_per_image = 100
self.num_classes = opt.num_classes
self.scales = opt.test_scales
self.opt = opt
self.pause = True
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
return images, meta
def process(self, images, return_time=False):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def run(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
theme=self.opt.debugger_theme)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type (''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
for scale in self.scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
# import pdb; pdb.set_trace()
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images, return_time=True)
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
self.show_results(debugger, image, results)
return {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time}
| 34.951389 | 78 | 0.66362 |
9f5208b51e8e6a67eae1cac19789cc03133b7f42
| 20,406 |
py
|
Python
|
sdk/python/pulumi_azure_native/datalakeanalytics/get_account.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datalakeanalytics/get_account.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datalakeanalytics/get_account.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
]
@pulumi.output_type
class GetAccountResult:
"""
A Data Lake Analytics account object, containing all information associated with the named Data Lake Analytics account.
"""
def __init__(__self__, account_id=None, compute_policies=None, creation_time=None, current_tier=None, data_lake_store_accounts=None, debug_data_access_level=None, default_data_lake_store_account=None, endpoint=None, firewall_allow_azure_ips=None, firewall_rules=None, firewall_state=None, hive_metastores=None, id=None, last_modified_time=None, location=None, max_degree_of_parallelism=None, max_degree_of_parallelism_per_job=None, max_job_count=None, min_priority_per_job=None, name=None, new_tier=None, provisioning_state=None, public_data_lake_store_accounts=None, query_store_retention=None, state=None, storage_accounts=None, system_max_degree_of_parallelism=None, system_max_job_count=None, tags=None, type=None, virtual_network_rules=None):
if account_id and not isinstance(account_id, str):
raise TypeError("Expected argument 'account_id' to be a str")
pulumi.set(__self__, "account_id", account_id)
if compute_policies and not isinstance(compute_policies, list):
raise TypeError("Expected argument 'compute_policies' to be a list")
pulumi.set(__self__, "compute_policies", compute_policies)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if current_tier and not isinstance(current_tier, str):
raise TypeError("Expected argument 'current_tier' to be a str")
pulumi.set(__self__, "current_tier", current_tier)
if data_lake_store_accounts and not isinstance(data_lake_store_accounts, list):
raise TypeError("Expected argument 'data_lake_store_accounts' to be a list")
pulumi.set(__self__, "data_lake_store_accounts", data_lake_store_accounts)
if debug_data_access_level and not isinstance(debug_data_access_level, str):
raise TypeError("Expected argument 'debug_data_access_level' to be a str")
pulumi.set(__self__, "debug_data_access_level", debug_data_access_level)
if default_data_lake_store_account and not isinstance(default_data_lake_store_account, str):
raise TypeError("Expected argument 'default_data_lake_store_account' to be a str")
pulumi.set(__self__, "default_data_lake_store_account", default_data_lake_store_account)
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if firewall_allow_azure_ips and not isinstance(firewall_allow_azure_ips, str):
raise TypeError("Expected argument 'firewall_allow_azure_ips' to be a str")
pulumi.set(__self__, "firewall_allow_azure_ips", firewall_allow_azure_ips)
if firewall_rules and not isinstance(firewall_rules, list):
raise TypeError("Expected argument 'firewall_rules' to be a list")
pulumi.set(__self__, "firewall_rules", firewall_rules)
if firewall_state and not isinstance(firewall_state, str):
raise TypeError("Expected argument 'firewall_state' to be a str")
pulumi.set(__self__, "firewall_state", firewall_state)
if hive_metastores and not isinstance(hive_metastores, list):
raise TypeError("Expected argument 'hive_metastores' to be a list")
pulumi.set(__self__, "hive_metastores", hive_metastores)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if max_degree_of_parallelism and not isinstance(max_degree_of_parallelism, int):
raise TypeError("Expected argument 'max_degree_of_parallelism' to be a int")
pulumi.set(__self__, "max_degree_of_parallelism", max_degree_of_parallelism)
if max_degree_of_parallelism_per_job and not isinstance(max_degree_of_parallelism_per_job, int):
raise TypeError("Expected argument 'max_degree_of_parallelism_per_job' to be a int")
pulumi.set(__self__, "max_degree_of_parallelism_per_job", max_degree_of_parallelism_per_job)
if max_job_count and not isinstance(max_job_count, int):
raise TypeError("Expected argument 'max_job_count' to be a int")
pulumi.set(__self__, "max_job_count", max_job_count)
if min_priority_per_job and not isinstance(min_priority_per_job, int):
raise TypeError("Expected argument 'min_priority_per_job' to be a int")
pulumi.set(__self__, "min_priority_per_job", min_priority_per_job)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if new_tier and not isinstance(new_tier, str):
raise TypeError("Expected argument 'new_tier' to be a str")
pulumi.set(__self__, "new_tier", new_tier)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_data_lake_store_accounts and not isinstance(public_data_lake_store_accounts, list):
raise TypeError("Expected argument 'public_data_lake_store_accounts' to be a list")
pulumi.set(__self__, "public_data_lake_store_accounts", public_data_lake_store_accounts)
if query_store_retention and not isinstance(query_store_retention, int):
raise TypeError("Expected argument 'query_store_retention' to be a int")
pulumi.set(__self__, "query_store_retention", query_store_retention)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if storage_accounts and not isinstance(storage_accounts, list):
raise TypeError("Expected argument 'storage_accounts' to be a list")
pulumi.set(__self__, "storage_accounts", storage_accounts)
if system_max_degree_of_parallelism and not isinstance(system_max_degree_of_parallelism, int):
raise TypeError("Expected argument 'system_max_degree_of_parallelism' to be a int")
pulumi.set(__self__, "system_max_degree_of_parallelism", system_max_degree_of_parallelism)
if system_max_job_count and not isinstance(system_max_job_count, int):
raise TypeError("Expected argument 'system_max_job_count' to be a int")
pulumi.set(__self__, "system_max_job_count", system_max_job_count)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_rules and not isinstance(virtual_network_rules, list):
raise TypeError("Expected argument 'virtual_network_rules' to be a list")
pulumi.set(__self__, "virtual_network_rules", virtual_network_rules)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> str:
"""
The unique identifier associated with this Data Lake Analytics account.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="computePolicies")
def compute_policies(self) -> Sequence['outputs.ComputePolicyResponse']:
"""
The list of compute policies associated with this account.
"""
return pulumi.get(self, "compute_policies")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
The account creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="currentTier")
def current_tier(self) -> str:
"""
The commitment tier in use for the current month.
"""
return pulumi.get(self, "current_tier")
@property
@pulumi.getter(name="dataLakeStoreAccounts")
def data_lake_store_accounts(self) -> Sequence['outputs.DataLakeStoreAccountInformationResponse']:
"""
The list of Data Lake Store accounts associated with this account.
"""
return pulumi.get(self, "data_lake_store_accounts")
@property
@pulumi.getter(name="debugDataAccessLevel")
def debug_data_access_level(self) -> str:
"""
The current state of the DebugDataAccessLevel for this account.
"""
return pulumi.get(self, "debug_data_access_level")
@property
@pulumi.getter(name="defaultDataLakeStoreAccount")
def default_data_lake_store_account(self) -> str:
"""
The default Data Lake Store account associated with this account.
"""
return pulumi.get(self, "default_data_lake_store_account")
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The full CName endpoint for this account.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="firewallAllowAzureIps")
def firewall_allow_azure_ips(self) -> Optional[str]:
"""
The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced.
"""
return pulumi.get(self, "firewall_allow_azure_ips")
@property
@pulumi.getter(name="firewallRules")
def firewall_rules(self) -> Sequence['outputs.FirewallRuleResponse']:
"""
The list of firewall rules associated with this account.
"""
return pulumi.get(self, "firewall_rules")
@property
@pulumi.getter(name="firewallState")
def firewall_state(self) -> Optional[str]:
"""
The current state of the IP address firewall for this account.
"""
return pulumi.get(self, "firewall_state")
@property
@pulumi.getter(name="hiveMetastores")
def hive_metastores(self) -> Sequence['outputs.HiveMetastoreResponse']:
"""
The list of hiveMetastores associated with this account.
"""
return pulumi.get(self, "hive_metastores")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> str:
"""
The account last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxDegreeOfParallelism")
def max_degree_of_parallelism(self) -> Optional[int]:
"""
The maximum supported degree of parallelism for this account.
"""
return pulumi.get(self, "max_degree_of_parallelism")
@property
@pulumi.getter(name="maxDegreeOfParallelismPerJob")
def max_degree_of_parallelism_per_job(self) -> Optional[int]:
"""
The maximum supported degree of parallelism per job for this account.
"""
return pulumi.get(self, "max_degree_of_parallelism_per_job")
@property
@pulumi.getter(name="maxJobCount")
def max_job_count(self) -> Optional[int]:
"""
The maximum supported jobs running under the account at the same time.
"""
return pulumi.get(self, "max_job_count")
@property
@pulumi.getter(name="minPriorityPerJob")
def min_priority_per_job(self) -> int:
"""
The minimum supported priority per job for this account.
"""
return pulumi.get(self, "min_priority_per_job")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="newTier")
def new_tier(self) -> Optional[str]:
"""
The commitment tier for the next month.
"""
return pulumi.get(self, "new_tier")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning status of the Data Lake Analytics account.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicDataLakeStoreAccounts")
def public_data_lake_store_accounts(self) -> Optional[Sequence['outputs.DataLakeStoreAccountInformationResponse']]:
"""
The list of Data Lake Store accounts associated with this account.
"""
return pulumi.get(self, "public_data_lake_store_accounts")
@property
@pulumi.getter(name="queryStoreRetention")
def query_store_retention(self) -> Optional[int]:
"""
The number of days that job metadata is retained.
"""
return pulumi.get(self, "query_store_retention")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the Data Lake Analytics account.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageAccounts")
def storage_accounts(self) -> Sequence['outputs.StorageAccountInformationResponse']:
"""
The list of Azure Blob Storage accounts associated with this account.
"""
return pulumi.get(self, "storage_accounts")
@property
@pulumi.getter(name="systemMaxDegreeOfParallelism")
def system_max_degree_of_parallelism(self) -> int:
"""
The system defined maximum supported degree of parallelism for this account, which restricts the maximum value of parallelism the user can set for the account.
"""
return pulumi.get(self, "system_max_degree_of_parallelism")
@property
@pulumi.getter(name="systemMaxJobCount")
def system_max_job_count(self) -> int:
"""
The system defined maximum supported jobs running under the account at the same time, which restricts the maximum number of running jobs the user can set for the account.
"""
return pulumi.get(self, "system_max_job_count")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkRules")
def virtual_network_rules(self) -> Sequence['outputs.VirtualNetworkRuleResponse']:
"""
The list of virtualNetwork rules associated with this account.
"""
return pulumi.get(self, "virtual_network_rules")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
account_id=self.account_id,
compute_policies=self.compute_policies,
creation_time=self.creation_time,
current_tier=self.current_tier,
data_lake_store_accounts=self.data_lake_store_accounts,
debug_data_access_level=self.debug_data_access_level,
default_data_lake_store_account=self.default_data_lake_store_account,
endpoint=self.endpoint,
firewall_allow_azure_ips=self.firewall_allow_azure_ips,
firewall_rules=self.firewall_rules,
firewall_state=self.firewall_state,
hive_metastores=self.hive_metastores,
id=self.id,
last_modified_time=self.last_modified_time,
location=self.location,
max_degree_of_parallelism=self.max_degree_of_parallelism,
max_degree_of_parallelism_per_job=self.max_degree_of_parallelism_per_job,
max_job_count=self.max_job_count,
min_priority_per_job=self.min_priority_per_job,
name=self.name,
new_tier=self.new_tier,
provisioning_state=self.provisioning_state,
public_data_lake_store_accounts=self.public_data_lake_store_accounts,
query_store_retention=self.query_store_retention,
state=self.state,
storage_accounts=self.storage_accounts,
system_max_degree_of_parallelism=self.system_max_degree_of_parallelism,
system_max_job_count=self.system_max_job_count,
tags=self.tags,
type=self.type,
virtual_network_rules=self.virtual_network_rules)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
A Data Lake Analytics account object, containing all information associated with the named Data Lake Analytics account.
API Version: 2016-11-01.
:param str account_name: The name of the Data Lake Analytics account.
:param str resource_group_name: The name of the Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datalakeanalytics:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
account_id=__ret__.account_id,
compute_policies=__ret__.compute_policies,
creation_time=__ret__.creation_time,
current_tier=__ret__.current_tier,
data_lake_store_accounts=__ret__.data_lake_store_accounts,
debug_data_access_level=__ret__.debug_data_access_level,
default_data_lake_store_account=__ret__.default_data_lake_store_account,
endpoint=__ret__.endpoint,
firewall_allow_azure_ips=__ret__.firewall_allow_azure_ips,
firewall_rules=__ret__.firewall_rules,
firewall_state=__ret__.firewall_state,
hive_metastores=__ret__.hive_metastores,
id=__ret__.id,
last_modified_time=__ret__.last_modified_time,
location=__ret__.location,
max_degree_of_parallelism=__ret__.max_degree_of_parallelism,
max_degree_of_parallelism_per_job=__ret__.max_degree_of_parallelism_per_job,
max_job_count=__ret__.max_job_count,
min_priority_per_job=__ret__.min_priority_per_job,
name=__ret__.name,
new_tier=__ret__.new_tier,
provisioning_state=__ret__.provisioning_state,
public_data_lake_store_accounts=__ret__.public_data_lake_store_accounts,
query_store_retention=__ret__.query_store_retention,
state=__ret__.state,
storage_accounts=__ret__.storage_accounts,
system_max_degree_of_parallelism=__ret__.system_max_degree_of_parallelism,
system_max_job_count=__ret__.system_max_job_count,
tags=__ret__.tags,
type=__ret__.type,
virtual_network_rules=__ret__.virtual_network_rules)
| 44.554585 | 751 | 0.693276 |
86f6642f596353769ef97edecbd241ea64b055e4
| 7,211 |
py
|
Python
|
trax/rl_trainer.py
|
yakovkeselman/trax
|
615432bbc58ffb5bdf83a771e8f8b470995456db
|
[
"Apache-2.0"
] | 1 |
2020-05-30T15:19:39.000Z
|
2020-05-30T15:19:39.000Z
|
trax/rl_trainer.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | null | null | null |
trax/rl_trainer.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Trainer for RL environments.
For now we only support PPO as RL algorithm.
Sample invocation:
TRAIN_BATCH_SIZE=32
python trax/rl_trainer.py \
--config_file=trax/rl/configs/ppo_acrobot.gin \
--train_batch_size=${TRAIN_BATCH_SIZE} \
--output_dir=${HOME}/ppo_acrobot \
--alsologtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import faulthandler
import gin
import jax
from jax.config import config
from tensor2tensor import envs # pylint: disable=unused-import
from tensor2tensor.envs import env_problem_utils
from trax import math
from trax import rl # pylint: disable=unused-import
from trax import trainer_flags # pylint: disable=unused-import
from trax.rl import envs as rl_envs # pylint: disable=unused-import
from trax.rl import task as rl_task
from trax.rl import trainers as rl_trainers
from trax.rl import training as light_trainers
from trax.tf_numpy import numpy as tf_np
FLAGS = flags.FLAGS
# Not just 'train' to avoid a conflict with trax.train in GIN files.
@gin.configurable(blacklist=[
'output_dir', 'train_batch_size', 'eval_batch_size', 'trajectory_dump_dir'
])
def train_rl(
output_dir,
train_batch_size,
eval_batch_size,
env_name='Acrobot-v1',
max_timestep=None,
clip_rewards=False,
rendered_env=False,
resize=False,
resize_dims=(105, 80),
trainer_class=rl_trainers.PPO,
n_epochs=10000,
trajectory_dump_dir=None,
num_actions=None,
light_rl=False,
light_rl_trainer=light_trainers.RLTrainer,
):
"""Train the RL agent.
Args:
output_dir: Output directory.
train_batch_size: Number of parallel environments to use for training.
eval_batch_size: Number of parallel environments to use for evaluation.
env_name: Name of the environment.
max_timestep: Int or None, the maximum number of timesteps in a trajectory.
The environment is wrapped in a TimeLimit wrapper.
clip_rewards: Whether to clip and discretize the rewards.
rendered_env: Whether the environment has visual input. If so, a
RenderedEnvProblem will be used.
resize: whether to do resize or not
resize_dims: Pair (height, width), dimensions to resize the visual
observations to.
trainer_class: RLTrainer class to use.
n_epochs: Number epochs to run the training for.
trajectory_dump_dir: Directory to dump trajectories to.
num_actions: None unless one wants to use the discretization wrapper. Then
num_actions specifies the number of discrete actions.
light_rl: whether to use the light RL setting (experimental).
light_rl_trainer: whichh light RL trainer to use (experimental).
"""
tf_np.set_allow_float64(FLAGS.tf_allow_float64)
if light_rl:
task = rl_task.RLTask()
env_name = task.env_name
if FLAGS.jax_debug_nans:
config.update('jax_debug_nans', True)
if FLAGS.use_tpu:
config.update('jax_platform_name', 'tpu')
else:
config.update('jax_platform_name', '')
if light_rl:
trainer = light_rl_trainer(task=task, output_dir=output_dir)
def light_training_loop():
"""Run the trainer for n_epochs and call close on it."""
try:
logging.info('Starting RL training for %d epochs.', n_epochs)
trainer.run(n_epochs, n_epochs_is_total_epochs=True)
logging.info('Completed RL training for %d epochs.', n_epochs)
trainer.close()
logging.info('Trainer is now closed.')
except Exception as e:
raise e
finally:
logging.info('Encountered an exception, still calling trainer.close()')
trainer.close()
logging.info('Trainer is now closed.')
if FLAGS.jax_debug_nans or FLAGS.disable_jit:
math.disable_jit()
with jax.disable_jit():
light_training_loop()
else:
light_training_loop()
return
# TODO(pkozakowski): Find a better way to determine this.
train_env_kwargs = {}
eval_env_kwargs = {}
if 'OnlineTuneEnv' in env_name:
envs_output_dir = FLAGS.envs_output_dir or os.path.join(output_dir, 'envs')
train_env_output_dir = os.path.join(envs_output_dir, 'train')
eval_env_output_dir = os.path.join(envs_output_dir, 'eval')
train_env_kwargs = {'output_dir': train_env_output_dir}
eval_env_kwargs = {'output_dir': eval_env_output_dir}
parallelism = multiprocessing.cpu_count() if FLAGS.parallelize_envs else 1
logging.info('Num discretized actions %s', num_actions)
logging.info('Resize %d', resize)
train_env = env_problem_utils.make_env(
batch_size=train_batch_size,
env_problem_name=env_name,
rendered_env=rendered_env,
resize=resize,
resize_dims=resize_dims,
max_timestep=max_timestep,
clip_rewards=clip_rewards,
parallelism=parallelism,
use_tpu=FLAGS.use_tpu,
num_actions=num_actions,
**train_env_kwargs)
assert train_env
eval_env = env_problem_utils.make_env(
batch_size=eval_batch_size,
env_problem_name=env_name,
rendered_env=rendered_env,
resize=resize,
resize_dims=resize_dims,
max_timestep=max_timestep,
clip_rewards=clip_rewards,
parallelism=parallelism,
use_tpu=FLAGS.use_tpu,
num_actions=num_actions,
**eval_env_kwargs)
assert eval_env
def run_training_loop():
"""Runs the training loop."""
logging.info('Starting the training loop.')
trainer = trainer_class(
output_dir=output_dir,
train_env=train_env,
eval_env=eval_env,
trajectory_dump_dir=trajectory_dump_dir,
async_mode=FLAGS.async_mode,
)
trainer.training_loop(n_epochs=n_epochs)
if FLAGS.jax_debug_nans or FLAGS.disable_jit:
math.disable_jit()
with jax.disable_jit():
run_training_loop()
else:
run_training_loop()
def main(argv):
del argv
logging.info('Starting RL training.')
gin_configs = FLAGS.config or []
gin.parse_config_files_and_bindings(FLAGS.config_file, gin_configs)
logging.info('Gin cofig:')
logging.info(gin_configs)
train_rl(
output_dir=FLAGS.output_dir,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
trajectory_dump_dir=(FLAGS.trajectory_dump_dir or None),
)
# TODO(afrozm): This is for debugging.
logging.info('Dumping stack traces of all stacks.')
faulthandler.dump_traceback(all_threads=True)
logging.info('Training is done, should exit.')
if __name__ == '__main__':
app.run(main)
| 30.816239 | 79 | 0.731383 |
663e40c4d11b49d3253c606f44057700f25209a0
| 6,419 |
py
|
Python
|
apps/core/backends/auth.py
|
sparcs-kaist/sparcssso
|
9aeedc02652dadacb44c6a4ba06901f6d2372223
|
[
"MIT"
] | 18 |
2015-07-06T06:20:14.000Z
|
2022-03-20T23:45:40.000Z
|
apps/core/backends/auth.py
|
sparcs-kaist/sparcssso
|
9aeedc02652dadacb44c6a4ba06901f6d2372223
|
[
"MIT"
] | 170 |
2015-07-07T08:42:03.000Z
|
2022-03-24T17:31:17.000Z
|
apps/core/backends/auth.py
|
sparcs-kaist/sparcssso
|
9aeedc02652dadacb44c6a4ba06901f6d2372223
|
[
"MIT"
] | 11 |
2015-07-07T20:42:19.000Z
|
2022-01-12T22:39:59.000Z
|
import json
import logging
import re
import uuid
from urllib.parse import parse_qsl, urlencode
import ldap3
import oauth2 as oauth
import requests
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from apps.core.models import UserProfile
logger = logging.getLogger('sso.auth')
# log any inactive user access
def check_active_user(request, user):
if user and not user.is_active:
logger.warning('login.reject', {
'r': request,
'uid': user.username,
'hide': True,
})
return user.is_active if user else True
# login backends that uses email and password
class EmailLoginBackend(ModelBackend):
def authenticate(self, request=None, email=None, password=None):
user = User.objects.filter(email=email).first()
if not check_active_user(request, user):
return
username = user.username if user else f'unknown:{email}'
return super().authenticate(
request=request, username=username, password=password,
)
# login backends that uses LDAP id and password
class LDAPLoginBackend(ModelBackend):
def authenticate(self, request=None, ldap_id=None, ldap_pw=None):
user = User.objects.filter(profile__sparcs_id=ldap_id).first()
if not check_active_user(request, user):
return
# prevent LDAP injection attack
# the regex is taken from NAME_REGEX in adduser
if not re.match(r'^[a-z][-a-z0-9]*$', ldap_id):
return
ldap_server = ldap3.Server(
'ldap://sparcs.org', use_ssl=True, get_info=ldap3.ALL,
)
ldap_connection = ldap3.Connection(
ldap_server,
user=f'uid={ldap_id},ou=People,dc=sparcs,dc=org',
password=ldap_pw,
)
if not ldap_connection.bind():
return
return user
# login backends that uses user object itself
class PasswordlessLoginBackend(ModelBackend):
def authenticate(self, request=None, user=None):
if not check_active_user(request, user):
return
# deny password-less staff login
if user and user.is_staff:
logger.error('login.error', {
'r': request,
'uid': user.username,
})
return None
return user
# Facebook Init & Auth
def auth_fb_init(callback_url):
args = {
'client_id': settings.FACEBOOK_APP_ID,
'auth_type': 'rerequest',
'scope': 'email',
'redirect_uri': callback_url,
}
return f'https://www.facebook.com/dialog/oauth?{urlencode(args)}'
def auth_fb_callback(code, callback_url):
# get access token
args = {
'client_id': settings.FACEBOOK_APP_ID,
'client_secret': settings.FACEBOOK_APP_SECRET,
'redirect_uri': callback_url,
'code': code,
}
token_info = requests.get(
'https://graph.facebook.com/v10.0/oauth/access_token?',
params=args, verify=True).json()
if 'access_token' not in token_info:
return None, None
# get grant info
access_token = token_info['access_token']
args = {
'access_token': access_token,
}
grant_info = requests.get('https://graph.facebook.com/v10.0/me/permissions',
params=args, verify=True).json()
for data in grant_info['data']:
if data['status'] == 'declined':
return None, None
# get facebook profile
args = {
'fields': 'email,first_name,last_name',
'access_token': access_token,
}
fb_info = requests.get('https://graph.facebook.com/v10.0/me',
params=args, verify=True).json()
info = {
'userid': fb_info['id'],
'email': fb_info.get('email'),
'first_name': fb_info.get('first_name'),
'last_name': fb_info.get('last_name'),
}
fb_profile = UserProfile.objects.filter(facebook_id=info['userid'],
test_only=False).first()
return fb_profile, info
# Twitter Init & Auth
tw_consumer = oauth.Consumer(settings.TWITTER_APP_ID,
settings.TWITTER_APP_SECRET)
tw_client = oauth.Client(tw_consumer)
def auth_tw_init(callback_url):
body = f'oauth_callback={callback_url}'
resp, content = tw_client.request(
'https://twitter.com/oauth/request_token', 'POST', body)
tokens = dict(parse_qsl(content.decode('utf-8')))
oauth_token = tokens['oauth_token']
url = f'https://twitter.com/oauth/authenticate?oauth_token={oauth_token}'
return url, tokens
def auth_tw_callback(tokens, verifier):
token = oauth.Token(tokens['oauth_token'], tokens['oauth_token_secret'])
token.set_verifier(verifier)
client = oauth.Client(tw_consumer, token)
resp, content = client.request(
'https://twitter.com/oauth/access_token', 'POST')
tw_info = dict(parse_qsl(content.decode('utf-8')))
# access denied
if 'user_id' not in tw_info:
return None, None
info = {
'userid': tw_info['user_id'],
'first_name': tw_info['screen_name'],
'gender': '*H',
}
tw_profile = UserProfile.objects.filter(twitter_id=info['userid'],
test_only=False).first()
return tw_profile, info
# KAIST Auth
def auth_kaist_init(callback_url):
state = str(uuid.uuid4())
args = {
'client_id': 'SPARCS',
'state': state,
'redirect_url': callback_url,
}
return f'https://iam2.kaist.ac.kr/api/sso/commonLogin?{urlencode(args)}', state
def auth_kaist_callback(token, iam_info_raw):
iam_info = json.loads(iam_info_raw)['dataMap']
state = iam_info['state']
if state != token:
return None, None, False
k_info = iam_info['USER_INFO']
info = {
'userid': k_info['kaist_uid'],
'email': k_info.get('mail'),
'first_name': k_info.get('givenname'),
'last_name': k_info.get('sn'),
'gender': f'*{k_info.get("ku_sex")}',
'birthday': k_info.get('ku_born_date').replace('/', '-'),
'kaist_info': k_info,
}
kaist_profile = UserProfile.objects.filter(kaist_id=info['userid'],
test_only=False).first()
return kaist_profile, info, True
| 29.995327 | 83 | 0.618476 |
aa96ac5f62cb897c38639d4e68f348c7fcf1d769
| 1,249 |
py
|
Python
|
tracker/models.py
|
RacherinTest/pricetracker
|
4b9fc5c12301ae35fba7a78b18cf6bfd5b21a481
|
[
"Apache-2.0"
] | 3 |
2020-10-03T14:37:40.000Z
|
2021-03-28T17:21:44.000Z
|
tracker/models.py
|
RacherinTest/pricetracker
|
4b9fc5c12301ae35fba7a78b18cf6bfd5b21a481
|
[
"Apache-2.0"
] | 8 |
2021-02-08T20:41:55.000Z
|
2021-09-22T18:36:38.000Z
|
tracker/models.py
|
RacherinTest/pricetracker
|
4b9fc5c12301ae35fba7a78b18cf6bfd5b21a481
|
[
"Apache-2.0"
] | 1 |
2020-10-03T14:37:41.000Z
|
2020-10-03T14:37:41.000Z
|
from django.db import models
from django.contrib.auth.models import AbstractUser, User
from django.utils.crypto import get_random_string
from pricetracker.settings import AUTH_USER_MODEL
# Create your models here.
class MyUser(AbstractUser):
telegram_id = models.CharField(max_length=200)
telegram_status = models.BooleanField(default=False)
telegram_auth_string = models.CharField(max_length=20)
user_product_count = models.CharField(max_length=5)
id = models.AutoField(primary_key=True)
def __str__(self):
return self.username
class Product(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
status = models.BooleanField(default=True)
store = models.CharField(max_length=50)
requested_price = models.TextField(default=10)
last_price = models.IntegerField(null=True, blank=True)
discount_price = models.CharField(max_length=100, null=True, blank=True)
product_url = models.CharField(max_length=600)
alert = models.BooleanField(default=True)
alert_price = models.TextField(max_length=10)
created_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
| 34.694444 | 76 | 0.763811 |
72d2e7b85ee578fda758a101760900da45965cac
| 3,123 |
py
|
Python
|
mtp_api/apps/core/tests/test_admin.py
|
ministryofjustice/mtp-api
|
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
|
[
"MIT"
] | 5 |
2016-01-05T12:21:35.000Z
|
2020-10-28T17:06:02.000Z
|
mtp_api/apps/core/tests/test_admin.py
|
ministryofjustice/mtp-api
|
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
|
[
"MIT"
] | 209 |
2015-06-12T09:39:41.000Z
|
2022-03-21T16:01:19.000Z
|
mtp_api/apps/core/tests/test_admin.py
|
ministryofjustice/mtp-api
|
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
|
[
"MIT"
] | 1 |
2021-04-11T06:19:23.000Z
|
2021-04-11T06:19:23.000Z
|
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.urls import reverse
from django.test import TestCase
from mtp_common.test_utils import silence_logger
class RecreateTestDataViewTestCase(TestCase):
@property
def url(self):
return reverse('admin:recreate_test_data')
def make_superuser(self, log_into_client=False):
superuser = get_user_model().objects.create(
username='superuser',
is_staff=True,
is_superuser=True,
)
superuser.set_password('superuser')
superuser.save()
if log_into_client:
self.assertTrue(self.client.login(
username='superuser',
password='superuser',
))
return superuser
def test_anonymous_access_denied(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_unauthorised_access_denied(self):
call_command('load_test_data', verbosity=0)
self.assertTrue(self.client.login(
username='test-prison-1',
password='test-prison-1',
))
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_superadmin_access_allowed(self):
self.make_superuser(log_into_client=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h1>Recreate test data</h1>', html=True)
def test_data_management_command_runs(self):
from core.management.commands.load_test_data import Command
self.make_superuser(log_into_client=True)
with mock.patch.object(Command, 'handle') as method:
method.return_value = ''
with silence_logger():
response = self.client.post(self.url, data={
'scenario': 'random',
'number_of_transactions': '50',
'number_of_disbursements': '50',
'number_of_payments': '50',
'number_of_prisoners': '50',
'days_of_history': '7',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(method.call_count, 1)
expected_options_subset = {
'no_protect_superusers': False,
'protect_usernames': ['transaction-uploader'],
'protect_credits': False,
'no_color': True,
'credits': 'random',
'prisons': ['sample'],
'number_of_transactions': 50,
'number_of_payments': 50,
'number_of_prisoners': 50,
'days_of_history': 7
}
options = method.call_args[1]
options_subset = {
k: v
for k, v in options.items()
if k in expected_options_subset.keys()
}
self.assertDictEqual(options_subset, expected_options_subset)
| 36.741176 | 79 | 0.5943 |
dba0946cd4f8f0080c5a74ac81c9f5a350bd5084
| 1,045 |
py
|
Python
|
Labs/lab5/l5e2.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
Labs/lab5/l5e2.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
Labs/lab5/l5e2.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
###############################################################################
# File Name : l5e2.py
# Created By : Félix Chiasson (7138723)
# Creation Date : [2015-10-13 11:44]
# Last Modified : [2015-10-13 12:45]
# Description : Retourne la moyenne, min et max des notes.
###############################################################################
def notes(l):
# Calcul la moyenne
somme = 0
c = 0
maximum = l[0]
minimum = l[0]
while c < len(l):
somme = l[c] + somme
c = c + 1
for val in l:
if val > maximum:
maximum = val
for i in l:
if val < minimum:
minimum = val
avg = somme / len(l)
resultats = [avg, minimum, maximum]
return resultats
inp = input("Veuillez entrer vos notes: ")
liste = list(eval(inp))
results = notes(liste)
print("La moyenne est",results[0],"le minimum est",results[1],"le maximum est",
results[2])
| 30.735294 | 79 | 0.44689 |
69f1e438400cfe7d94491a057c076b49baf9e256
| 3,814 |
py
|
Python
|
utils/augmentation.py
|
flytocc/RAPiD
|
92e6a44b8a0107def055e93c971d78fd548562f8
|
[
"MIT"
] | 142 |
2020-05-26T03:07:00.000Z
|
2022-03-31T08:12:43.000Z
|
utils/augmentation.py
|
flytocc/RAPiD
|
92e6a44b8a0107def055e93c971d78fd548562f8
|
[
"MIT"
] | 36 |
2020-06-03T08:07:37.000Z
|
2022-03-25T04:12:14.000Z
|
utils/augmentation.py
|
flytocc/RAPiD
|
92e6a44b8a0107def055e93c971d78fd548562f8
|
[
"MIT"
] | 46 |
2020-05-26T03:08:17.000Z
|
2022-03-11T02:47:05.000Z
|
import random
import numpy as np
import scipy.ndimage
import torch
import torchvision.transforms.functional as tvf
import torch.nn.functional as tnf
def hflip(image, labels):
'''
left-right flip
Args:
image: PIL.Image
labels: tensor, shape(N,5), absolute x,y,w,h, angle in degree
'''
image = tvf.hflip(image)
labels[:,0] = image.width - labels[:,0] # x,y,w,h,(angle)
labels[:,4] = -labels[:,4]
return image, labels
def vflip(image, labels):
'''
up-down flip
Args:
image: PIL.Image
labels: tensor, shape(N,5), absolute x,y,w,h, angle in degree
'''
image = tvf.vflip(image)
labels[:,1] = image.height - labels[:,1] # x,y,w,h,(angle)
labels[:,4] = -labels[:,4]
return image, labels
def rotate(image, degrees, labels, expand=False):
'''
image: PIL.Image
labels: tensor, shape(N,5), absolute x,y,w,h, angle in degree
'''
img_w, img_h = image.width, image.height
image = tvf.rotate(image, angle=-degrees, expand=expand)
new_w, new_h = image.width, image.height
# image coordinate to cartesian coordinate
x = labels[:,0] - 0.5*img_w
y = -(labels[:,1] - 0.5*img_h)
# cartesian to polar
r = (x.pow(2) + y.pow(2)).sqrt()
theta = torch.empty_like(r)
theta[x>=0] = torch.atan(y[x>=0]/x[x>=0])
theta[x<0] = torch.atan(y[x<0]/x[x<0]) + np.pi
theta[torch.isnan(theta)] = 0
# modify theta
theta -= (degrees*np.pi/180)
# polar to cartesian
x = r * torch.cos(theta)
y = r * torch.sin(theta)
labels[:,0] = x + 0.5*new_w
labels[:,1] = -y + 0.5*new_h
labels[:,4] += degrees
labels[:,4] = torch.remainder(labels[:,4], 180)
labels[:,4][labels[:,4]>=90] -= 180
return image, labels
def add_gaussian(imgs, max_var=0.1):
'''
imgs: tensor, (batch),C,H,W
max_var: variance is uniformly ditributed between 0~max_var
'''
var = torch.rand(1) * max_var
imgs = imgs + torch.randn_like(imgs) * var
return imgs
def add_saltpepper(imgs, max_p=0.06):
'''
imgs: tensor, (batch),C,H,W
p: probibility to add salt and pepper
'''
c,h,w = imgs.shape[-3:]
p = torch.rand(1) * max_p
total = int(c*h*w * p)
idxC = torch.randint(0,c,size=(total,))
idxH = torch.randint(0,h,size=(total,))
idxW = torch.randint(0,w,size=(total,))
value = torch.randint(0,2,size=(total,),dtype=torch.float)
imgs[...,idxC,idxH,idxW] = value
return imgs
def random_avg_filter(img):
assert img.dim() == 3
img = img.unsqueeze(0)
ks = random.choice([3,5])
pad_size = ks // 2
img = tnf.avg_pool2d(img, kernel_size=ks, stride=1, padding=pad_size)
return img.squeeze(0)
def max_filter(img):
assert img.dim() == 3
img = img.unsqueeze(0)
img = tnf.max_pool2d(img, kernel_size=3, stride=1, padding=1)
return img.squeeze(0)
def get_gaussian_kernels():
gaussian_kernels = []
for ks in [3,5]:
delta = np.zeros((ks,ks))
delta[ks//2,ks//2] = 1
kernel = scipy.ndimage.gaussian_filter(delta, sigma=3)
kernel = torch.from_numpy(kernel).float().view(1,1,ks,ks)
gaussian_kernels.append(kernel)
return gaussian_kernels
gaussian_kernels = get_gaussian_kernels()
def random_gaussian_filter(img):
assert img.dim() == 3
img = img.unsqueeze(1)
kernel = random.choice(gaussian_kernels)
assert torch.isclose(kernel.sum(), torch.Tensor([1]))
pad_size = kernel.shape[2] // 2
img = tnf.conv2d(img, weight=kernel, stride=1, padding=pad_size)
return img.squeeze(1)
if __name__ == "__main__":
from PIL import Image
img_path = 'C:/Projects/MW18Mar/train_no19/Mar10_000291.jpg'
img = Image.open(img_path)
img.show()
new_img = tvf.rotate(img, -45)
new_img.show()
| 26.486111 | 73 | 0.61484 |
2df4edc1521bc842558c4436eaa33a2e1d052fd4
| 184 |
py
|
Python
|
keras-onnx/onnx_keras/__init__.py
|
jwj04ok/ONNX_Convertor
|
067a17e16dfc8aa80e36f44c4523959daf7359f5
|
[
"MIT"
] | 33 |
2020-06-09T21:05:35.000Z
|
2022-02-24T01:48:45.000Z
|
keras-onnx/onnx_keras/__init__.py
|
jwj04ok/ONNX_Convertor
|
067a17e16dfc8aa80e36f44c4523959daf7359f5
|
[
"MIT"
] | 17 |
2020-07-14T19:44:09.000Z
|
2022-02-10T10:03:01.000Z
|
keras-onnx/onnx_keras/__init__.py
|
jwj04ok/ONNX_Convertor
|
067a17e16dfc8aa80e36f44c4523959daf7359f5
|
[
"MIT"
] | 16 |
2020-06-17T22:56:11.000Z
|
2021-12-21T05:44:32.000Z
|
# This is an empty file for the subdirectory
from . import frontend
from .helper import set_compatibility
from .helper import set_custom_layer
from .helper import set_duplicate_weights
| 36.8 | 44 | 0.842391 |
47b062d3d099046df08948eb5a978abf1fb29284
| 2,104 |
py
|
Python
|
LSTMModel/PrepareDataset.py
|
Mozartuss/DEAP_EmotionRecognition_V2
|
39bd50f327d465114ee0f798ac7bcee13b457b17
|
[
"Apache-2.0"
] | 5 |
2022-01-25T19:46:19.000Z
|
2022-01-28T18:07:53.000Z
|
LSTMModel/PrepareDataset.py
|
Mozartuss/DEAP_EmotionRecognition_V2
|
39bd50f327d465114ee0f798ac7bcee13b457b17
|
[
"Apache-2.0"
] | null | null | null |
LSTMModel/PrepareDataset.py
|
Mozartuss/DEAP_EmotionRecognition_V2
|
39bd50f327d465114ee0f798ac7bcee13b457b17
|
[
"Apache-2.0"
] | 1 |
2022-02-23T09:52:11.000Z
|
2022-02-23T09:52:11.000Z
|
from pathlib import Path
import numpy as np
from sklearn.preprocessing import normalize, StandardScaler
from tensorflow.keras.utils import to_categorical
from Utils.Constants import FINAL_DATASET_PATH, FINAL_DATASET_PATH_PCA, FINAL_DATASET_PATH_MRMR, FINAL_DATASET_PATH_PSO, \
FINAL_DATASET_PATH_GWO, FINAL_DATASET_PATH_CS
def prepare_dataset(label_type: str = "Arousal", pca: bool = False, mrmr: bool = False, pso: bool = False,
gwo: bool = False, cs: bool = False):
if pca:
data_path = FINAL_DATASET_PATH_PCA
elif mrmr:
data_path = FINAL_DATASET_PATH_MRMR
elif pso:
data_path = FINAL_DATASET_PATH_PSO
elif gwo:
data_path = FINAL_DATASET_PATH_GWO
elif cs:
data_path = FINAL_DATASET_PATH_CS
else:
data_path = FINAL_DATASET_PATH
x_train = np.load(str(Path(data_path, "data_training.npy")))
y_train = np.load(str(Path(data_path, "label_training.npy")))
x_train = normalize(x_train)
y_train_arousal = np.ravel(y_train[:, [0]])
y_train_valence = np.ravel(y_train[:, [1]])
y_train_arousal = to_categorical(y_train_arousal)
y_train_valence = to_categorical(y_train_valence)
x_train = np.array(x_train[:])
x_test = np.load(str(Path(data_path, "data_testing.npy")))
y_test = np.load(str(Path(data_path, "label_testing.npy")))
x_test = normalize(x_test)
y_test_arousal = np.ravel(y_test[:, [0]])
y_test_valence = np.ravel(y_test[:, [1]])
y_test_arousal = to_categorical(y_test_arousal)
y_test_valence = to_categorical(y_test_valence)
x_test = np.array(x_test[:])
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
if label_type.lower() == "arousal":
return x_train, y_train_arousal, x_test, y_test_arousal
else:
return x_train, y_train_valence, x_test, y_test_valence
if __name__ == '__main__':
prepare_dataset()
| 34.491803 | 122 | 0.705798 |
5a37912dceacf56d07b3e8bacd322a8ad053c95b
| 411 |
py
|
Python
|
examples/download.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | null | null | null |
examples/download.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | null | null | null |
examples/download.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | 1 |
2022-03-16T11:10:43.000Z
|
2022-03-16T11:10:43.000Z
|
'''
This script introduces how to download the data
'''
import os
os.chdir('..')
from minder_utils.download import Downloader
Downloader().export(since='2021-10-10', until='2021-10-12', reload=True,
save_path='./data/activity/', categories=['raw_activity_pir'])
Downloader().refresh(until='2021-12-10',
save_path='./data/activity/', categories=['raw_activity_pir'])
| 25.6875 | 82 | 0.659367 |
a43ec0a8d461b0643e27ed88ad4fab502bf081ad
| 68,124 |
py
|
Python
|
lib/lib/mle_classes.py
|
jkaessens/gwas-assoc
|
1053c94222701f108362e33c99155cfc148f4ca2
|
[
"MIT"
] | null | null | null |
lib/lib/mle_classes.py
|
jkaessens/gwas-assoc
|
1053c94222701f108362e33c99155cfc148f4ca2
|
[
"MIT"
] | null | null | null |
lib/lib/mle_classes.py
|
jkaessens/gwas-assoc
|
1053c94222701f108362e33c99155cfc148f4ca2
|
[
"MIT"
] | null | null | null |
import os
import os.path
import sys
import re
import gzip
import decimal
## -- several mlinfo and mldose files can be put into an object of Mlinfo -- #
## -- or Mldose -- #
class Mlinfo:
""" class Mlinfo implements tasks on Mlinfo files from MACH software """
def __init__(self, mlinfo_file_cases, write_file=None, rsq=0.0, post_prob=0.0):
""" init with cases """
self._mlinfo_files_cases = [] # names of mlinfo files from cases
self._mlinfo_files_controls = [] # names of mlinfo files from controls
self._mlinfo_files = [] # names of mlinfo files from cases and controls
self._mlinfo_files_cases.append(mlinfo_file_cases)
self._mlinfo_files.append(mlinfo_file_cases)
self._write_file = write_file # name of output file
self._write_file_filtered_snps = None
self._rsq = float(rsq) # chosen rsquare threshold after imputation
self._post_prob = float(post_prob) # chosen post_prob threshold after imputation
self._map_snps = []
def map_snps(self, file_number=0):
""" map mlinfo file into memory """
mlinfo_files_numof = len(self._mlinfo_files)
if mlinfo_files_numof <= file_number:
return
try:
fh = gzip.open(self._mlinfo_files[file_number], "rb")
except IOError, e:
print e
sys.exit(1)
comment_pattern = re.compile("^#.*$")
header_pattern = re.compile("^SNP.*$")
# skip comment lines that start with "#" and header line
line = fh.readline()
while comment_pattern.search(line):
line = fh.readline()
if not header_pattern.search(line):
print >> sys.stderr, "error: no header in mlinfo file."
sys.exit(1)
line = fh.readline() #skip header
while line:
self._map_snps.append(re.split("\s+",line)[0])
line = fh.readline()
fh.close()
def snps_get(self):
""" get mapped snps from mlinfo file in order they appeared in file """
return self._map_snps
def snps_get_hash(self):
""" get mapped snps from mlinfo file im hash """
snp_hash = {}
for snp in self._map_snps:
snp_hash[snp] = 1
return snp_hash
def free_snps(self):
""" free mapped snps from mlinfo file """
self._map_snps = []
def rsq_set(self, rsq):
""" set rsq for filtering """
self._rsq = float(rsq)
def post_prob_set(self, post_prob):
""" set rsq for filtering """
self._post_prob = float(post_prob)
def write_file_set(self, write_file):
""" set name of output file """
self._write_file = write_file
def write_file_set_filtered_SNPs(self, write_file_filtered_snps):
""" set name of output file for filtered SNPs """
self._write_file_filtered_snps = write_file_filtered_snps
def mlinfo_file_add_controls(self, mlinfo_file_controls):
""" add another mlinfo file from controls to mlinfo file list """
self._mlinfo_files_controls.append(mlinfo_file_controls)
self._mlinfo_files.append(mlinfo_file_controls)
def mlinfo_file_add_cases(self, mlinfo_file_cases):
""" add another mlinfo file from cases to mlinfo file list """
self._mlinfo_files_cases.append(mlinfo_file_cases)
self._mlinfo_files.append(mlinfo_file_cases)
def check_same_snps_same_order_same_alleles(self):
""" check for same snps and same snp order and for same minor alleles in
all mlinfo file """
files_numof = len(self._mlinfo_files)
if files_numof < 1:
return
# first mlinfo file
rs_previous = [] # rs numbers
a1_previous = [] # allele 1
a2_previous = [] # allele 2
try:
fh = gzip.open(self._mlinfo_files[0], "rb")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
rs_previous.append(re.split("\s+",line)[0])
a1_previous.append(re.split("\s+",line)[1])
a2_previous.append(re.split("\s+",line)[2])
line = fh.readline()
fh.close()
# next mlinfo files
for i in xrange(1,files_numof):
rs_next = []
a1_next = []
a2_next = []
try:
fh = gzip.open(self._mlinfo_files[i], "rb")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
rs_next.append(re.split("\s+",line)[0])
a1_next.append(re.split("\s+",line)[1])
a2_next.append(re.split("\s+",line)[2])
line = fh.readline()
fh.close()
if rs_previous != rs_next or a1_previous != a1_previous \
or a2_previous != a2_previous:
print >> sys.stderr, "error: mlinfo files differ."
sys.exit(1)
rs_previous = rs_next
a1_previous = a1_next
a2_previous = a2_next
def paste_mlinfo_files(self):
""" paste all mlinfo files into one file """
files_numof = len(self._mlinfo_files)
if files_numof < 1:
return
fh = [] # list of mlinfo filehandles
for i in xrange(files_numof):
fh.append(gzip.open(self._mlinfo_files[i], "rb"))
out = gzip.open(self._write_file, "w")
line = fh[0].readline().replace("\n","")
while line:
out.writelines(line + " ")
for i in xrange(1,files_numof-1):
line = fh[i].readline().replace("\n","")
out.writelines(line + " ")
line = fh[files_numof-1].readline().replace("\n","")
out.writelines(line)
out.writelines("\n")
line = fh[0].readline().replace("\n","")
for i in xrange(files_numof):
fh[i].close()
out.close()
def select_filtered_snps_rsq_postprob(self, file_number=0):
""" filter snps by rsq and average posterior probability for cases mlinfo file """
mlinfo_files_numof = len(self._mlinfo_files)
if mlinfo_files_numof <= file_number:
return
try:
fh = gzip.open(self._mlinfo_files[file_number], "rb")
out = gzip.open(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
#### skip comment lines that start with "#" and header line
#### write comment lines
###comment_pattern = re.compile("^#.*$")
###while comment_pattern.search(line):
### out.writelines(line)
### line = fh.readline()
#### filter snps for r2 and postprob
###out.writelines("# filtered Rsq >=%s\n" %(str(self._rsq)))
###out.writelines("# filtered Quality >=%s\n" %(str(self._post_prob)))
# read from header how many mlinfo have been merge
# and which columns to check for r2 and postprob
# write header
header_pattern = re.compile("^SNP.*$")
line = fh.readline()
if not header_pattern.search(line):
print >> sys.stderr, "error: no header in merged mlinfo file."
sys.exit(1)
mlinfo_files_numof = len(re.findall('SNP', line))
post_prob_columns = []
r2_columns = []
for i in xrange(1,len(re.findall("SNP", line))+1):
post_prob_columns.append(i*7-2)
r2_columns.append(i*7-1)
assert(mlinfo_files_numof == len(r2_columns))
assert(len(post_prob_columns) == len(r2_columns))
out.writelines(line)
# write to out file
line = fh.readline()
while line:
list = re.split("\s+",line)
keep_snp = True
for i in xrange(mlinfo_files_numof):
if float(list[post_prob_columns[i]]) < self._post_prob:
keep_snp = False
break
elif float(list[r2_columns[i]]) < self._rsq:
keep_snp = False
break
if keep_snp:
out.writelines(line)
line = fh.readline()
fh.close()
out.close()
def snp_list_first_allele_get(self, new_snp_list=[]):
""" get list of first alleles from new_snp_list """
# copy list
new_snp_list_first_allele = new_snp_list[:]
for i in xrange(len(new_snp_list)):
# check allele order
a1 = ""
#if cmp(new_snp_list[i][4],new_snp_list[i][5]) == 0:
# print >> sys.stderr, "error: monoallelic snps not allowed, rs=" +\
# str(new_snp_list[i][1])
# sys.exit(1)
#elif cmp(new_snp_list[i][4],new_snp_list[i][5]) > 0:
if cmp(new_snp_list[i][4],new_snp_list[i][5]) > 0:
# switch alleles
a1 = new_snp_list[i][5]
new_snp_list_first_allele[i] = a1
else:
# do not switch alleles
a1 = new_snp_list[i][4]
new_snp_list_first_allele[i] = a1
return new_snp_list_first_allele
def write_new_mlinfo(self, freq1_only_typed_snps=[], new_snp_list=[]):
""" write new mlinfo file from new_snp_list """
try:
out = gzip.open(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
assert(len(new_snp_list) == len(freq1_only_typed_snps))
header_line = "SNP\tAl1\tAl2\tFreq1\tMAF\tQuality\tRsq\n"
out.writelines(header_line)
for i in xrange(len(new_snp_list)):
# check allele order
a1 = ""
a2 = ""
if cmp(new_snp_list[i][4],new_snp_list[i][5]) == 0:
print >> sys.stderr, "error: monoallelic snps not allowed, rs=" +\
str(new_snp_list[i][1])
sys.exit(1)
elif cmp(new_snp_list[i][4],new_snp_list[i][5]) > 0:
# switch alleles
a1 = new_snp_list[i][5]
a2 = new_snp_list[i][4]
else:
# do not switch alleles
a1 = new_snp_list[i][4]
a2 = new_snp_list[i][5]
freq1 = float('%.4f' %freq1_only_typed_snps[i])
if freq1_only_typed_snps[i] > 0.5:
maf = 1.0 - freq1
else:
maf = freq1_only_typed_snps[i]
out.writelines(new_snp_list[i][1] + "\t" + a1 + "\t" + a2 +\
"\t%.4f\t%.4f\t1.0\t1.0\n" %(freq1, maf))
out.close()
def snps_al1_get(self, file_number=0):
""" return list: (snp, allele1) """
snps_allele1_list = []
mlinfo_files_numof = len(self._mlinfo_files)
if mlinfo_files_numof <= file_number:
return
try:
fh = gzip.open(self._mlinfo_files[file_number], "rb")
except IOError, e:
print e
sys.exit(1)
comment_pattern = re.compile("^#.*$")
header_pattern = re.compile("^SNP.*$")
# skip comment lines that start with "#" and header line
line = fh.readline()
while comment_pattern.search(line):
line = fh.readline()
# scan header line
if not header_pattern.search(line):
print >> sys.stderr, "error: no header in mlinfo file."
sys.exit(1)
line = fh.readline() #skip header
# scan whole mlinfo file line by line
while line:
list = re.split("\s+",line)
snp = list[0]
al1 = list[1]
snps_allele1_list.append( (snp, al1) )
line = fh.readline()
fh.close()
return snps_allele1_list
class Mldose:
""" class Mldose implements tasks on Mldose files from MACH software """
def __init__(self, mldose_file_cases, write_file=None, write_file_ids=None):
""" init with mldose file from cases"""
self._mldose_files_cases = [] # names of mldose files from cases
self._mldose_files_controls = [] # names of mldose files from controls
self._mldose_files = [] # names of mldose files from cases and controls
self._mldose_files_cases.append(mldose_file_cases)
self._mldose_files.append(mldose_file_cases)
self._write_file = write_file # name of output file
self._write_file_ids = write_file_ids # name of output file for ids
self._samples_controls = [] # control sample list
self._samples_cases = [] # cases sample list
def write_file_set(self, write_file):
""" set name of output file """
self._write_file = write_file
def write_file_set_ids(self, write_file_ids):
""" set name of output file """
self._write_file_ids = write_file_ids
def mldose_file_add_cases(self, mldose_file_cases):
""" add another mldose file from cases to mldose file list """
self._mldose_files_cases.append(mldose_file_cases)
self._mldose_files.append(mldose_file_cases)
def mldose_file_add_controls(self, mldose_file_controls):
""" add another mldose file from controls to mldose file list """
self._mldose_files_controls.append(mldose_file_controls)
self._mldose_files.append(mldose_file_controls)
def mldose_files_cases_get(self):
""" get name of cases mldose file """
return self._mldose_files_cases
def mldose_files_get(self):
""" get name of mldose file """
return self._mldose_files
def ids_all_get(self, file_number=0):
""" select all ids from mldose file and return ids as list """
mldose_files_numof = len(self._mldose_files)
if mldose_files_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files[file_number], "rb")
except IOError, e:
print e
sys.exit(1)
ids_list = []
line = fh.readline()
while line:
id = re.split("\s+",line)[0]
ids_list.append(id)
line = fh.readline()
fh.close()
return ids_list
def select_cases_from_mldose(self, cases=[], file_number=0):
""" select cases from mldose file(s) and write to output file """
mldose_files_cases_numof = len(self._mldose_files_cases)
if mldose_files_cases_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files_cases[file_number], "rb")
out = gzip.open(self._write_file, "w")
if self._write_file_ids:
out2 = gzip.open(self._write_file_ids, "w")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
id = re.split("\s+",line)[0]
try:
index = cases.index(id)
except ValueError:
# skip sample in mldose file
line = fh.readline()
continue
out.writelines(line)
if self._write_file_ids:
out2.writelines(id + "\n")
del cases[index]
line = fh.readline()
fh.close()
out.close()
if self._write_file_ids:
out2.close()
def select_cases_from_mldose_and_append(self, cases=[], file_number=0):
""" select cases from mldose file(s) and write to output file
file_number starts with 0 """
mldose_files_cases_numof = len(self._mldose_files_cases)
if mldose_files_cases_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files_cases[file_number], "rb")
out = gzip.open(self._write_file, "a")
if self._write_file_ids:
out2 = gzip.open(self._write_file_ids, "a")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
id = re.split("\s+",line)[0]
try:
index = cases.index(id)
except ValueError:
# skip sample in mldose file
line = fh.readline()
continue
out.writelines(line)
if self._write_file_ids:
out2.writelines(id + "\n")
del cases[index]
line = fh.readline()
fh.close()
out.close()
if self._write_file_ids:
out2.close()
def select_controls_from_mldose(self, controls=[], file_number=0):
""" select controls from mldose file(s) and write to output file """
mldose_files_controls_numof = len(self._mldose_files_controls)
if mldose_files_controls_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files_controls[file_number], "rb")
out = gzip.open(self._write_file, "w")
if self._write_file_ids:
out2 = gzip.open(self._write_file_ids, "w")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
id = re.split("\s+",line)[0]
try:
index = controls.index(id)
except ValueError:
# skip sample in mldose file
line = fh.readline()
continue
out.writelines(line)
if self._write_file_ids:
out2.writelines(id + "\n")
del controls[index]
line = fh.readline()
fh.close()
out.close()
if self._write_file_ids:
out2.close()
def select_controls_from_mldose_and_append(self, controls=[], file_number=0):
""" select controls from mldose file(s) and write to output file
file_number starts with 0 """
mldose_files_controls_numof = len(self._mldose_files_controls)
if mldose_files_controls_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files_controls[file_number], "rb")
out = gzip.open(self._write_file, "a")
if self._write_file_ids:
out2 = gzip.open(self._write_file_ids, "a")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
id = re.split("\s+",line)[0]
try:
index = controls.index(id)
except ValueError:
# skip sample in mldose file
line = fh.readline()
continue
out.writelines(line)
if self._write_file_ids:
out2.writelines(id + "\n")
del controls[index]
line = fh.readline()
fh.close()
out.close()
if self._write_file_ids:
out2.close()
def select_ids_cases_controls_from_mldose(self, cases_controls=[], file_number=0):
""" select cases from mldose file(s) and write to output file """
mldose_files_cases_numof = len(self._mldose_files_cases)
if mldose_files_cases_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files_cases[file_number], "rb")
out2 = gzip.open(self._write_file_ids, "w")
except IOError, e:
print e
sys.exit(1)
line = fh.readline()
while line:
id = re.split("\s+",line)[0]
try:
index = cases_controls.index(id)
except ValueError:
# skip sample in mldose file
line = fh.readline()
continue
if self._write_file_ids:
out2.writelines(id + "\n")
del cases_controls[index]
line = fh.readline()
fh.close()
out2.close()
def select_freq1_from_mldose(self, file_number=0):
""" select freq1 in order of mldose file and return
note that control ids are utilized for extracting cases
select freq1 from controls only in order of mldose file and return """
mldose_files_numof = len(self._mldose_files)
if mldose_files_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files[file_number], "rb")
except IOError, e:
print e
sys.exit(1)
freq1_snps = []
sample_counter = 0
# first line, generate list
line = fh.readline().replace("\n","")
dosages = re.split("\s+",line)[2:]
# if last element is empty then remove it
if dosages[-1] == "":
dosages.pop()
dosages_numof = len(dosages)
for i in xrange(dosages_numof):
# generate list of dosages
freq1_snps.append(float(dosages[i]))
sample_counter = sample_counter + 1
line = fh.readline().replace("\n","")
# sum all dosages for allele 1
while line:
dosages = re.split("\s+",line)[2:]
# if last element is empty then remove it
if dosages[-1] == "":
dosages.pop()
if len(dosages) != dosages_numof:
print >> sys.stderr, "error: different num of columns in mldose file."
sys.exit(1)
for i in xrange(dosages_numof):
freq1_snps[i] = freq1_snps[i] + float(dosages[i])
sample_counter = sample_counter + 1
line = fh.readline().replace("\n","")
fh.close()
# calc freq1 for allele 1
for i in xrange(dosages_numof):
freq1_snps[i] = freq1_snps[i] / (2*float(sample_counter))
return freq1_snps
def select_freq1_from_mldose_all_cases_controls(self, ids_controls_list, file_number=0):
""" select freq1 in order of mldose file and return
note that control ids are utilized for extracting cases
select freq1 from controls only in order of mldose file and return """
mldose_files_numof = len(self._mldose_files)
if mldose_files_numof <= file_number:
return
try:
fh = gzip.open(self._mldose_files[file_number], "rb")
except IOError, e:
print e
sys.exit(1)
ids_controls_hash = {}
for id in ids_controls_list:
ids_controls_hash[id] = True
freq1_snps_all = []
freq1_snps_cases = []
freq1_snps_controls = []
sample_counter_all = 0
sample_counter_ca = 0
sample_counter_co = 0
dosages_numof = 0
line = fh.readline().replace("\n","")
# sum all dosages for allele 1
while line:
list = re.split("\s+",line)
id = list[0]
dosages = list[2:]
# if last element is empty then remove it
if dosages[-1] == "":
dosages.pop()
# find first case
if sample_counter_all == 0:
# count only if case
if not ids_controls_hash.has_key(id):
dosages_numof = len(dosages)
for i in xrange(dosages_numof):
# generate list of dosages
freq1_snps_cases.append(float(dosages[i]))
freq1_snps_controls.append(float(0.0))
freq1_snps_all.append(float(dosages[i]))
sample_counter_ca = sample_counter_ca + 1
# count only if control
elif ids_controls_hash.has_key(id):
dosages_numof = len(dosages)
for i in xrange(dosages_numof):
# generate list of dosages
freq1_snps_cases.append(float(0.0))
freq1_snps_controls.append(float(dosages[i]))
freq1_snps_all.append(float(dosages[i]))
sample_counter_co = sample_counter_co + 1
# error: neither case nor control
else:
print >> sys.stderr, "error: sample id neither case nor control."
sys.exit(1)
sample_counter_all = sample_counter_all + 1
line = fh.readline().replace("\n","")
# parse next cases/controls
else:
if len(dosages) != dosages_numof:
print >> sys.stderr, "error: different num of columns in mldose file."
sys.exit(1)
# count only if case
if not ids_controls_hash.has_key(id):
for i in xrange(dosages_numof):
freq1_snps_cases[i] = freq1_snps_cases[i] + float(dosages[i])
# count all samples
freq1_snps_all[i] = freq1_snps_all[i] + float(dosages[i])
sample_counter_ca = sample_counter_ca + 1
sample_counter_all = sample_counter_all + 1
# count only if controls
elif ids_controls_hash.has_key(id):
for i in xrange(dosages_numof):
freq1_snps_controls[i] = freq1_snps_controls[i] + float(dosages[i])
# count all samples
freq1_snps_all[i] = freq1_snps_all[i] + float(dosages[i])
sample_counter_co = sample_counter_co + 1
sample_counter_all = sample_counter_all + 1
# error: neither case nor control
else:
print >> sys.stderr, "error: sample id neither case nor control."
sys.exit(1)
line = fh.readline().replace("\n","")
fh.close()
# calc freq1 for allele 1
for i in xrange(dosages_numof):
freq1_snps_cases[i] = freq1_snps_cases[i] / (2*float(sample_counter_ca))
freq1_snps_controls[i] = freq1_snps_controls[i] / (2*float(sample_counter_co))
freq1_snps_all[i] = freq1_snps_all[i] / (2*float(sample_counter_all))
return freq1_snps_all, freq1_snps_cases, freq1_snps_controls
def compare(a,b):
""" compare decimals """
if float(a) < float(b):
return -1
elif a == b:
return 0
else:
return 1
def col_ten(t):
""" sort 10th column """
return t[9]
class Mlids:
""" class Mlids implements tasks on Mlids files from MACH software """
def __init__(self, mlids_file_cases, write_file=None):
""" init with cases """
self._mlids_files_cases = [] # names of mlids files from cases
self._mlids_files_controls = [] # names of mlids files from controls
self._mlids_files = [] # names of mlids files from cases and controls
self._mlids_files_cases.append(mlids_file_cases)
self._mlids_files.append(mlids_file_cases)
self._write_file = write_file # name of output file
self._write_file_filtered_snps = None
self._map_indivs = [] # sample names
def map_indivs(self, file_number=0):
""" map mlids file into memory """
mlids_files_numof = len(self._mlids_files)
if mlids_files_numof <= file_number:
return
try:
fh = gzip.open(self._mlids_files[file_number], "rb")
except IOError, e:
print e
sys.exit(1)
# skip comment lines that start with "#" and header line
comment_pattern = re.compile("^#.*$")
line = fh.readline()
while comment_pattern.search(line):
line = fh.readline()
while line:
self._map_indivs.append(re.split("\s+",line)[0])
line = fh.readline()
fh.close()
def indivs_numof_get(self):
""" get number of mapped indivs from mlids file """
return len(self._map_indivs)
def free_indivs(self):
""" free mapped indivs from mlids file """
self._map_indivs = []
class Mach2dat:
""" class Mach2dat implements tasks on results files from MACH2DAT software """
def __init__(self, mach2dat_file, write_file=None, mlinfo_filter=None):
""" init """
self._mach2dat_files = [] # names of mach2dat files
self._mach2dat_files.append(mach2dat_file)
self._write_file = write_file # name of output file
self._mlinfo_filter = mlinfo_filter # name of mlinfo file of filtered snps
self._map_snps_header = []
self._map_snps = {}
def write_file_set(self, write_file):
""" set name of output file """
self._write_file = write_file
def mlinfo_filter_set(self, mlinfo_filter):
""" set name of mlinfo file of filtered snps """
self._mlinfo_filter = mlinfo_filter
def map_snps(self, file_number=0):
""" map mach2dat file into memory """
mach2dat_files_numof = len(self._mach2dat_files)
if mach2dat_files_numof <= file_number:
return
try:
fh = file(self._mach2dat_files[file_number], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline()
while not header_pattern.search(line):
line = fh.readline()
# write new header
header_columns = re.split("\s+",line)[0:3] + re.split("\s+",line)[5:12]
self._map_snps_header = header_columns[:]
# read all snps
line = fh.readline().replace("\n","")
while line:
list = re.split("\s+",line)
# delete empty elements
if list[-1] == "":
del list[-1]
# workaround because of MACH2DAT bug:
# If affy ids in output, then col2 and 3 are accidently merged (one
# less column)
# no workaround
if len(list) == 12:
if list[5] != "NA":
self._map_snps[list[1]] = (list[0], list[1], list[2],\
decimal.Decimal(list[5]),\
decimal.Decimal(list[6]),\
decimal.Decimal(list[7]),\
decimal.Decimal(list[8]),\
decimal.Decimal(list[9]),\
decimal.Decimal(list[10]),\
decimal.Decimal(list[11]))
# workaround, col 2 and col 3 merged and has to be splitted
elif len(list) == 11:
if list[4] != "NA" and list[5] != "NA":
split_list = list[1].split(",")
marker = split_list[0][:-1]
alleles = split_list[0][-1:] + "," + split_list[1]
self._map_snps[marker] = (list[0], marker, alleles,\
decimal.Decimal(list[4]),\
decimal.Decimal(list[5]),\
decimal.Decimal(list[6]),\
decimal.Decimal(list[7]),\
decimal.Decimal(list[8]),\
decimal.Decimal(list[9]),\
decimal.Decimal(list[10]))
line = fh.readline().replace("\n","")
fh.close()
def free_snps(self):
""" free mapping of snps """
self._map_snps_header = []
self._map_snps = {}
def write_snps_filtered_sorted(self, snps_rs_al1_al1freq, maf_threshold, file_number=0):
""" write snps filtered by mlinfo_filter file and snps
sorted by p-value with additionally al1 and al1_freq
and al1_freq for cases and controls separately """
if self._mlinfo_filter == None:
print >> sys.stderr, "error: no mlinfo_filter file specified."
sys.exit(1)
try:
fh = file(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
mlinfo = Mlinfo(mlinfo_file_cases=self._mlinfo_filter)
mlinfo.map_snps()
snps_filtered = mlinfo.snps_get()
mlinfo.free_snps() ; del mlinfo
mach2dat_filtered = []
for snp in snps_filtered:
if self._map_snps.has_key(snp):
mach2dat_filtered.append(self._map_snps[snp])
# sort by pvalues
mach2dat_filtered.sort(cmp=compare, key=col_ten) # sort 10th column
# print new header
for i in xrange(len(self._map_snps_header)):
if i == 0:
fh.writelines("%s" %(self._map_snps_header[i]))
else:
fh.writelines(" %s" %(self._map_snps_header[i]))
fh.writelines(" AL1 FREQ1 MAF FREQ1_CA FREQ1_CO\n")
# print the rest
for line in mach2dat_filtered:
# round 4 decimals
al1_freq = decimal.Decimal(str(round(snps_rs_al1_al1freq[line[1]][1], 4)))
al1_freq_ca = decimal.Decimal(str(round(snps_rs_al1_al1freq[line[1]][2], 4)))
al1_freq_co = decimal.Decimal(str(round(snps_rs_al1_al1freq[line[1]][3], 4)))
# determine MAF for SNP
maf = decimal.Decimal(str(0.0))
if al1_freq >= decimal.Decimal(str(0.5)):
maf = 1 - al1_freq
else:
maf = al1_freq
# filter for maf frequency
if ( al1_freq >= decimal.Decimal(str(maf_threshold)) and \
al1_freq <= decimal.Decimal(str(1-maf_threshold)) ):
for i in xrange(len(line)):
if i == 0:
fh.writelines("%s" %(line[i]))
else:
fh.writelines(" %s" %(line[i]))
al1 = snps_rs_al1_al1freq[line[1]][0]
fh.writelines( " %s %s %s %s %s\n" %(al1, al1_freq, maf, \
al1_freq_ca, al1_freq_co) )
fh.close()
def write_snps_sorted(self, snps_rs_al1_al1freq, maf_threshold, file_number=0):
""" write snps filtered by mlinfo_filter file and snps
sorted by p-value with additionally al1 and al1_freq"""
try:
fh = file(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
mach2dat = []
keys = self._map_snps.iterkeys()
for snp in keys:
mach2dat.append(self._map_snps[snp])
# sort by pvalues
mach2dat.sort(cmp=compare, key=col_ten) # sort 10th column
# print new header
for i in xrange(len(self._map_snps_header)):
if i == 0:
fh.writelines("%s" %(self._map_snps_header[i]))
else:
fh.writelines(" %s" %(self._map_snps_header[i]))
fh.writelines(" AL1 FREQ1 MAF FREQ1_CA FREQ1_CO\n")
# print the rest
for line in mach2dat:
# round 4 decimals
al1_freq = decimal.Decimal(str(round(snps_rs_al1_al1freq[line[1]][1], 4)))
al1_freq_ca = decimal.Decimal(str(round(snps_rs_al1_al1freq[line[1]][2], 4)))
al1_freq_co = decimal.Decimal(str(round(snps_rs_al1_al1freq[line[1]][3], 4)))
# determine MAF for SNP
maf = decimal.Decimal(str(0.0))
if al1_freq >= decimal.Decimal(str(0.5)):
maf = 1 - al1_freq
else:
maf = al1_freq
# filter for maf frequency
if (al1_freq >= decimal.Decimal(str(maf_threshold)) and \
al1_freq <= decimal.Decimal(str(1-maf_threshold)) ):
for i in xrange(len(line)):
if i == 0:
fh.writelines("%s" %(line[i]))
else:
fh.writelines(" %s" %(line[i]))
al1 = snps_rs_al1_al1freq[line[1]][0]
fh.writelines( " %s %s %s %s %s\n" %(al1, al1_freq, maf, \
al1_freq_ca, al1_freq_co) )
fh.close()
class Mach2dat_merge:
""" class Mach2dat_merge implements tasks on filtered/sorted
results files from MACH2DAT software """
def __init__(self, write_file=None, write_file_assoc=None):
""" init """
self._mach2dat_files = [] # names of mach2dat files
self._write_file = write_file # name of output file
self._write_file_assoc = write_file_assoc # name of assoc output file
self._write_file_combined_short = None # name of combined short output file
self._map_snps_header = []
self._map_snps = [] # list of dictionaries
self._map_snps_lists = [] # list of lists
def write_file_set(self, write_file):
""" set name of output file """
self._write_file = write_file
def write_file_assoc_set(self, write_file_assoc):
""" set name of output assoc file """
self._write_file_assoc = write_file_assoc
def write_file_combined_short_set(self, write_file_combined_short):
""" set name of combined short output file """
self._write_file_combined_short = write_file_combined_short
def mach2dat_file_add(self, mach2dat_file):
""" add another mach2dat file from cases to mach2dat file list """
self._mach2dat_files.append(mach2dat_file)
def map_snps_filter_snps(self, snp_filter_dict, file_number=0):
""" map mach2dat file into memory and filter by using snp_filter_dict """
mach2dat_files_numof = len(self._mach2dat_files)
if mach2dat_files_numof <= file_number:
return
try:
fh = file(self._mach2dat_files[file_number], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# write new header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps = {}
while line:
list = re.split("\s+",line)
if list[3] != "NA" and (not snp_filter_dict.has_key(list[1])):
map_snps[list[1]] = (list[0], list[1], list[2],\
decimal.Decimal(list[3]),\
decimal.Decimal(list[4]),\
decimal.Decimal(list[5]),\
decimal.Decimal(list[6]),\
decimal.Decimal(list[7]),\
decimal.Decimal(list[8]),\
decimal.Decimal(list[9]),\
list[10],\
decimal.Decimal(list[11]),\
decimal.Decimal(list[12]),\
decimal.Decimal(list[13]),\
decimal.Decimal(list[14]))
line = fh.readline()
self._map_snps.append(map_snps.copy())
fh.close()
def map_snps(self, file_number=0):
""" map mach2dat file into memory """
mach2dat_files_numof = len(self._mach2dat_files)
if mach2dat_files_numof <= file_number:
return
try:
fh = file(self._mach2dat_files[file_number], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# write new header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps = {}
while line:
list = re.split("\s+",line)
if list[3] != "NA":
map_snps[list[1]] = (list[0], list[1], list[2],\
decimal.Decimal(list[3]),\
decimal.Decimal(list[4]),\
decimal.Decimal(list[5]),\
decimal.Decimal(list[6]),\
decimal.Decimal(list[7]),\
decimal.Decimal(list[8]),\
decimal.Decimal(list[9]),\
list[10],\
decimal.Decimal(list[11]),\
decimal.Decimal(list[12]),\
decimal.Decimal(list[13]),\
decimal.Decimal(list[14]))
line = fh.readline()
self._map_snps.append(map_snps.copy())
fh.close()
def map_snps_list(self, file_number=0):
""" map mach2dat file into memory but into list instead
of hash """
mach2dat_files_numof = len(self._mach2dat_files)
if mach2dat_files_numof <= file_number:
return
try:
fh = file(self._mach2dat_files[file_number], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# write new header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps = []
while line:
list = re.split("\s+",line)
if list[3] != "NA":
map_snps.append( (list[0], list[1], list[2],\
decimal.Decimal(list[3]),\
decimal.Decimal(list[4]),\
decimal.Decimal(list[5]),\
decimal.Decimal(list[6]),\
decimal.Decimal(list[7]),\
decimal.Decimal(list[8]),\
decimal.Decimal(list[9]),\
list[10],\
decimal.Decimal(list[11]),\
decimal.Decimal(list[12]),\
decimal.Decimal(list[13]),\
decimal.Decimal(list[14])) )
line = fh.readline()
# clone list
self._map_snps_lists.append(map_snps[:])
fh.close()
def free_snps(self):
""" free mapping of snps """
self._map_snps_header = []
self._map_snps = []
self._map_snps_lists = []
def write_mach2dat_merged_sorted(self, file_number=0):
""" write all snps to merged file, sorted """
try:
fh = file(self._write_file, "w")
fh2 = file(self._write_file_assoc, "w")
except IOError, e:
print e
sys.exit(1)
mach2dat = []
for j in xrange(len(self._map_snps)):
keys = self._map_snps[j].iterkeys()
for snp in keys:
mach2dat.append(self._map_snps[j][snp])
# sort by pvalues
mach2dat.sort(cmp=compare, key=col_ten) # sort 10th column
# print new header from first mach2dat result file
for i in xrange(len(self._map_snps_header[0])):
if i == 0:
fh.writelines("%s" %(self._map_snps_header[0][i]))
else:
fh.writelines(" %s" %(self._map_snps_header[0][i]))
fh.writelines("\n")
# print new header for new assoc file with pvalues from MACH2DAT
fh2.writelines(" SNP P\n")
# print the rest
for line in mach2dat:
for i in xrange(len(line)):
if i == 0:
fh.writelines("%s" %(line[i]))
else:
fh.writelines(" %s" %(line[i]))
fh.writelines("\n")
fh2.writelines(" %s %s\n" %(line[1], line[9]))
fh.close()
fh2.close()
def write_mach2dat_merged_sorted_no_assoc_file(self, file_number=0):
""" write all snps to merged file, sorted, do not write assoc file """
try:
fh = file(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
mach2dat = []
for j in xrange(len(self._map_snps)):
keys = self._map_snps[j].iterkeys()
for snp in keys:
mach2dat.append(self._map_snps[j][snp])
# sort by pvalues
mach2dat.sort(cmp=compare, key=col_ten) # sort 10th column
# print new header from first mach2dat result file
for i in xrange(len(self._map_snps_header[0])):
if i == 0:
fh.writelines("%s" %(self._map_snps_header[0][i]))
else:
fh.writelines(" %s" %(self._map_snps_header[0][i]))
fh.writelines("\n")
# print the rest
for line in mach2dat:
for i in xrange(len(line)):
if i == 0:
fh.writelines("%s" %(line[i]))
else:
fh.writelines(" %s" %(line[i]))
fh.writelines("\n")
fh.close()
def write_mach2dat_typed_imputed_info(self, typed_snps, file_number=0):
""" write all snps with imputed or typed info """
try:
fh = file(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
mach2dat = []
for j in xrange(len(self._map_snps)):
keys = self._map_snps[j].iterkeys()
for snp in keys:
mach2dat.append(self._map_snps[j][snp])
# sort by pvalues
mach2dat.sort(cmp=compare, key=col_ten) # sort 10th column
# print new header from first mach2dat result file
for i in xrange(len(self._map_snps_header[0])):
if i == 0:
fh.writelines("%s" %(self._map_snps_header[0][i]))
else:
fh.writelines(" %s" %(self._map_snps_header[0][i]))
fh.writelines("\n")
# print the rest in order how it was mapped
for line in mach2dat:
for i in xrange(len(line)):
if i == 0:
fh.writelines("%s" %(line[i]))
elif i == 1:
if typed_snps.has_key(line[i]):
fh.writelines(" %s_typed" %(line[i]))
else:
fh.writelines(" %s_imputed" %(line[i]))
else:
fh.writelines(" %s" %(line[i]))
fh.writelines("\n")
fh.close()
def typed_imputed_info2pos(self, ref_file, pos_write_file):
""" add chr and pos to typed imputed info """
try:
fh = file(self._write_file, "r")
fh_ref = file(ref_file, "r")
fh_pos = file(pos_write_file, "w")
except IOError, e:
print e
sys.exit(1)
dict_SNPs = {} # store SNPs with line
comment_pattern = re.compile("^#.*$")
blankline_pattern = re.compile("^\s*$")
# typed imputed info file
# print header
line = fh.readline().replace("\n", "")
fh_pos.writelines("CHR POS " + line + "\n")
line = fh.readline().replace("\n", "")
while line:
# skip comment lines that start with "#"
if(comment_pattern.search(line)):
line = fh.readline().replace("\n", "")
continue
# skip blank lines
if(blankline_pattern.search(line)):
line = fh.readline().replace("\n", "")
continue
list = re.split("\s+",line)
snp = list[1].split("_")[0]
dict_SNPs[snp] = line
line = fh.readline().replace("\n", "")
fh.close()
# hapmap reference bim file
sep = " "
line = fh_ref.readline().replace("\n", "")
while line:
# skip comment lines that start with "#"
if(comment_pattern.search(line)):
line = fh_ref.readline().replace("\n", "")
continue
# skip blank lines
if(blankline_pattern.search(line)):
line = fh_ref.readline().replace("\n", "")
continue
list = re.split("\s+",line)
snp = list[1]
if dict_SNPs.has_key(snp):
fh_pos.writelines(list[0] +sep+\
list[3] +sep+\
dict_SNPs[snp] +"\n")
line = fh_ref.readline().replace("\n", "")
fh_ref.close()
fh_pos.close()
def get_rs_pvalue_type_hash(self, file_number=0):
""" get rs, pvalue, type in hash """
rs_pvalue_type_hash = {}
mach2dat = []
for j in xrange(len(self._map_snps)):
keys = self._map_snps[j].iterkeys()
for snp in keys:
mach2dat.append(self._map_snps[j][snp])
#for snp in mach2dat:
#rs, type = mach2dat[snp][1].split("_")
#rs_pvalue_type_hash[rs] = (mach2dat[snp][9], type)
for i in xrange(len(mach2dat)):
rs, type = mach2dat[i][1].split("_")
rs_pvalue_type_hash[rs] = (mach2dat[i][9], type)
return rs_pvalue_type_hash
def get_best_pvalue(self, file_number=0):
""" get best_pvalue as Decimal.decimal object """
best_pvalue = 1.0
mach2dat = []
for j in xrange(len(self._map_snps)):
keys = self._map_snps[j].iterkeys()
for snp in keys:
mach2dat.append(self._map_snps[j][snp])
for i in xrange(len(mach2dat)):
if i == 0:
best_pvalue = mach2dat[i][9]
elif mach2dat[i][9] < best_pvalue:
best_pvalue = mach2dat[i][9]
return best_pvalue
def write_metal_format_from_map_snps_list(self,\
numof_cases_controls, hapmap_rs_hash, file_number=0):
""" write metal format """
try:
fh = file(self._write_file, "w")
except IOError, e:
print e
sys.exit(1)
mach2dat = []
for j in xrange(len(self._map_snps_lists)):
for snp_line in self._map_snps_lists[j]:
mach2dat.append(snp_line)
# print new header from first mach2dat result file
fh.writelines("TRAIT MARKER CHR POS AL1 AL2 FREQ1 MAF SAMPLE_SIZE EFFECT1 OR STDERR LRCHISQ LRPVAL\n")
# print the rest in order how it was mapped
for tuple in mach2dat:
trait = tuple[0]
snp = tuple[1]
chr = str(hapmap_rs_hash[snp][0])
pos = str(hapmap_rs_hash[snp][2])
a1, a2 = tuple[2].split(",")
freq1 = tuple[11]
maf = tuple[12]
sample_size = str(numof_cases_controls)
effect1 = tuple[3]
odds_ratio = tuple[4]
stderr = tuple[5]
lrchisq = tuple[8]
lrpval = tuple[9]
assert(a1 == tuple[10])
fh.writelines( "%s %s %s %s %s %s %s %s %s %s %s %s %s %s\n" \
%(trait, snp, chr, pos, a1, a2, freq1, maf, \
sample_size, effect1, odds_ratio, stderr, \
lrchisq, lrpval) )
fh.close()
def write_mach2dat_typed_imputed_info_combined_without_map(self, disease_name,\
disease_name_comparison_list,\
rs_tophits_hash, file_number_disease=0):
""" write combined outfile without mapping files into memory,
that means files were not mapped previous to this function call
only print combined output for rs_tophits_hash
DO NOT MAP snps previously into memory """
# check if valid file_number_disease
mach2dat_files_numof = len(self._mach2dat_files)
if mach2dat_files_numof <= file_number_disease:
return
# ----------------------------------------------------------- #
# -- (1) read rs_tophits snps from MACH2dat merged outfile -- #
# ----------------------------------------------------------- #
try:
fh = file(self._mach2dat_files[file_number_disease], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# store header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps_rs_tophits_disease = []
while line:
list = re.split("\s+",line)
rs = list[1].split("_")[0]
if rs_tophits_hash.has_key(rs) and list[3] != "NA":
map_snps_rs_tophits_disease.append( (list[0], list[1], list[2],\
list[3],\
list[4],\
list[5],\
list[6],\
list[7],\
list[8],\
list[9],\
list[10],\
list[11],\
list[12],\
list[13],\
list[14]) )
line = fh.readline()
fh.close()
# ------------------------------------------------------------------ #
# -- (2) read rs_tophits snps (if available) from MACH2DAT merged -- #
# -- outfile(s) for comparison -- #
# ------------------------------------------------------------------ #
map_snps_rs_tophits_compare_list = [] # list of hashes for each
# comparison file
# for all comparison file(s) (without disease file)
for i in xrange(mach2dat_files_numof):
# skip disease file
if i == file_number_disease:
continue
# try to open disease file for comparison
try:
fh = file(self._mach2dat_files[i], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# store additional header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps_rs_tophits_hash = {}
while line:
list = re.split("\s+",line)
rs = list[1].split("_")[0]
if rs_tophits_hash.has_key(rs) and list[3] != "NA":
map_snps_rs_tophits_hash[rs] = ( list[0], list[1], list[2],\
list[3],\
list[4],\
list[5],\
list[6],\
list[7],\
list[8],\
list[9],\
list[10],\
list[11],\
list[12],\
list[13],\
list[14] )
line = fh.readline()
# copy hash
map_snps_rs_tophits_compare_list.append(map_snps_rs_tophits_hash.copy())
fh.close()
# ---------------------------- #
# -- print comparison table -- #
# ---------------------------- #
try:
fh = file(self._write_file, "w")
fh2 = file(self._write_file_combined_short, "w")
except IOError, e:
print e
sys.exit(1)
short_columns_disease = [1,2,4,9,10,11,13,14] # count from 0
short_columns_compare_disease = [4,9,10,11,13,14]
# -- print new header -- #
# print columns from disease mach2dat result file
for i in xrange(len(self._map_snps_header[0])):
# short version
if i in short_columns_disease:
if i == short_columns_disease[0]:
fh2.writelines("%s_%s" %(self._map_snps_header[0][i],\
disease_name))
else:
fh2.writelines("\t%s_%s" %(self._map_snps_header[0][i],\
disease_name))
# long version
if i == 0:
fh.writelines("%s_%s" %(self._map_snps_header[0][i], disease_name))
else:
fh.writelines("\t%s_%s" %(self._map_snps_header[0][i], disease_name))
# print additional columns from additional comparison file(s)
# for each comparison file
for j in xrange(1, len(self._map_snps_header)):
for i in xrange(len(self._map_snps_header[j])):
# short version
if i in short_columns_compare_disease:
fh2.writelines("\t%s_%s" %(self._map_snps_header[j][i],\
disease_name_comparison_list[j-1]))
# long version
fh.writelines("\t%s_%s" %(self._map_snps_header[j][i],\
disease_name_comparison_list[j-1]))
fh.writelines("\n")
fh2.writelines("\n")
# -- print rs_tophits lines -- #
for line in map_snps_rs_tophits_disease:
# rs of line
rs = line[1].split("_")[0]
# print columns from disease file
for j in xrange(len(line)):
# short version
if j in short_columns_disease:
if j == short_columns_disease[0]:
fh2.writelines(line[j])
else:
fh2.writelines("\t" + line[j])
# long version
if j == 0:
fh.writelines(line[j])
else:
fh.writelines("\t" + line[j])
# print columns for each comparison file
for map_snps_rs_tophits_hash in map_snps_rs_tophits_compare_list:
# if snp is also in comparison file
if map_snps_rs_tophits_hash.has_key(rs):
assert(len(line) == len(map_snps_rs_tophits_hash[rs]))
for j in xrange(len(map_snps_rs_tophits_hash[rs])):
# short version
if j in short_columns_compare_disease:
fh2.writelines("\t%s" %((map_snps_rs_tophits_hash[rs][j])))
# long version
fh.writelines("\t%s" %((map_snps_rs_tophits_hash[rs][j])))
# if snp is not in comparison file
else:
for j in xrange(len(line)):
# short version
if j in short_columns_compare_disease:
fh2.writelines("\t---")
# long version
fh.writelines("\t---")
fh.writelines("\n")
fh2.writelines("\n")
fh.close()
fh2.close()
def write_mach2dat_typed_imputed_info_combined_without_map_columns_grouped(self, disease_name,\
disease_name_comparison_list,\
rs_tophits_hash, file_number_disease=0):
""" write combined outfile without mapping files into memory,
that means files were not mapped previous to this function call
only print combined output for rs_tophits_hash
DO NOT MAP snps previously into memory """
# check if valid file_number_disease
mach2dat_files_numof = len(self._mach2dat_files)
if mach2dat_files_numof <= file_number_disease:
return
# ----------------------------------------------------------- #
# -- (1) read rs_tophits snps from MACH2dat merged outfile -- #
# ----------------------------------------------------------- #
try:
fh = file(self._mach2dat_files[file_number_disease], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# store header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps_rs_tophits_disease = []
while line:
list = re.split("\s+",line)
rs = list[1].split("_")[0]
if rs_tophits_hash.has_key(rs) and list[3] != "NA":
map_snps_rs_tophits_disease.append( (list[0], list[1], list[2],\
list[3],\
list[4],\
list[5],\
list[6],\
list[7],\
list[8],\
list[9],\
list[10],\
list[11],\
list[12],\
list[13],\
list[14]) )
line = fh.readline()
fh.close()
# ------------------------------------------------------------------ #
# -- (2) read rs_tophits snps (if available) from MACH2DAT merged -- #
# -- outfile(s) for comparison -- #
# ------------------------------------------------------------------ #
map_snps_rs_tophits_compare_list = [] # list of hashes for each
# comparison file
# for all comparison file(s) (without disease file)
for i in xrange(mach2dat_files_numof):
# skip disease file
if i == file_number_disease:
continue
# try to open disease file for comparison
try:
fh = file(self._mach2dat_files[i], "r")
except IOError, e:
print e
sys.exit(1)
header_pattern = re.compile("^TRAIT.*$")
# skip lines until header line
line = fh.readline().replace("\n","")
while not header_pattern.search(line):
line = fh.readline().replace("\n","")
# store additional header
self._map_snps_header.append(re.split("\s+",line)[:])
# read all snps
line = fh.readline()
map_snps_rs_tophits_hash = {}
while line:
list = re.split("\s+",line)
rs = list[1].split("_")[0]
if rs_tophits_hash.has_key(rs) and list[3] != "NA":
map_snps_rs_tophits_hash[rs] = ( list[0], list[1], list[2],\
list[3],\
list[4],\
list[5],\
list[6],\
list[7],\
list[8],\
list[9],\
list[10],\
list[11],\
list[12],\
list[13],\
list[14] )
line = fh.readline()
# copy hash
map_snps_rs_tophits_compare_list.append(map_snps_rs_tophits_hash.copy())
fh.close()
# ---------------------------- #
# -- print comparison table -- #
# ---------------------------- #
try:
fh = file(self._write_file, "w")
fh2 = file(self._write_file_combined_short, "w")
except IOError, e:
print e
sys.exit(1)
long_columns = range(len(self._map_snps_header[0])) # count from 0
short_columns = [1,2,4,9,10,11,13,14] # count from 0
short_columns_compare = [4,9,10,11,13,14] # count from 0
for i in long_columns:
# -- print new header -- #
# print columns from disease mach2dat result file
# short version
if i in short_columns:
if i == short_columns[0]:
fh2.writelines("%s_%s" %(self._map_snps_header[0][i],\
disease_name))
else:
fh2.writelines("\t%s_%s" %(self._map_snps_header[0][i],\
disease_name))
# long version
if i == 0:
fh.writelines("%s_%s" %(self._map_snps_header[0][i], disease_name))
else:
fh.writelines("\t%s_%s" %(self._map_snps_header[0][i], disease_name))
# print additional columns from additional comparison file(s)
# for each comparison file
for j in xrange(1, len(self._map_snps_header)):
# short version
if i in short_columns_compare:
fh2.writelines("\t%s_%s" %(self._map_snps_header[j][i],\
disease_name_comparison_list[j-1]))
# long version
fh.writelines("\t%s_%s" %(self._map_snps_header[j][i],\
disease_name_comparison_list[j-1]))
fh.writelines("\n")
fh2.writelines("\n")
# -- print rs_tophits lines -- #
for line in map_snps_rs_tophits_disease:
# rs of line
rs = line[1].split("_")[0]
for i in long_columns:
# short version
if i in short_columns:
if i == short_columns[0]:
fh2.writelines(line[i])
else:
fh2.writelines("\t" + line[i])
# long version
if i == 0:
fh.writelines(line[i])
else:
fh.writelines("\t" + line[i])
# print columns for each comparison file
for map_snps_rs_tophits_hash in map_snps_rs_tophits_compare_list:
# if snp is also in comparison file
if map_snps_rs_tophits_hash.has_key(rs):
assert(len(line) == len(map_snps_rs_tophits_hash[rs]))
# short version
if i in short_columns_compare:
fh2.writelines("\t%s" %((map_snps_rs_tophits_hash[rs][i])))
# long version
fh.writelines("\t%s" %((map_snps_rs_tophits_hash[rs][i])))
# if snp is not in comparison file
else:
# short version
if i in short_columns_compare:
fh2.writelines("\t---")
# long version
fh.writelines("\t---")
fh.writelines("\n")
fh2.writelines("\n")
fh.close()
fh2.close()
| 35.685699 | 110 | 0.508529 |
c60bb420ee439965704054f35a930536ccc79dd5
| 1,752 |
py
|
Python
|
prs2/middleware.py
|
ropable/prs
|
0654ca7fce5a6639f5df67905dd2e5c09a1c1f17
|
[
"Apache-2.0"
] | 1 |
2016-08-21T06:45:50.000Z
|
2016-08-21T06:45:50.000Z
|
prs2/middleware.py
|
dbca-wa/prs
|
0654ca7fce5a6639f5df67905dd2e5c09a1c1f17
|
[
"Apache-2.0"
] | 12 |
2019-02-11T00:01:01.000Z
|
2022-02-01T02:07:49.000Z
|
prs2/middleware.py
|
ropable/prs
|
0654ca7fce5a6639f5df67905dd2e5c09a1c1f17
|
[
"Apache-2.0"
] | 4 |
2018-08-09T06:56:43.000Z
|
2019-11-13T08:05:46.000Z
|
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError
import logging
LOGGER = logging.getLogger("healthcheck")
class HealthCheckMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.method == "GET":
if request.path == "/readiness":
return self.readiness(request)
elif request.path == "/liveness":
return self.liveness(request)
return self.get_response(request)
def liveness(self, request):
"""Returns that the server is alive.
"""
return HttpResponse("OK")
def readiness(self, request):
"""Connect to each database and do a generic standard SQL query
that doesn't write any data and doesn't depend on any tables
being present.
"""
try:
from django.db import connections
for name in connections:
cursor = connections[name].cursor()
cursor.execute("SELECT 1;")
row = cursor.fetchone()
if row is None:
return HttpResponseServerError("db: invalid response")
except Exception as e:
LOGGER.exception(e)
return HttpResponseServerError("db: cannot connect to database.")
return HttpResponse("OK")
class PrsMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
# Reference: http://www.gnuterrypratchett.com/
response['X-Clacks-Overhead'] = 'GNU Terry Pratchett'
return response
| 30.736842 | 77 | 0.619863 |
30036277781490055c42f7ee2cf4328a736114ba
| 3,692 |
py
|
Python
|
test/functional/mining_getblocktemplate_longpoll.py
|
yvettep321/bitcoin
|
33707a2a8828c68e3c0586bdadea52c84873d386
|
[
"MIT"
] | 7 |
2020-11-09T15:10:26.000Z
|
2022-03-04T21:55:39.000Z
|
test/functional/mining_getblocktemplate_longpoll.py
|
yvettep321/bitcoin
|
33707a2a8828c68e3c0586bdadea52c84873d386
|
[
"MIT"
] | 2 |
2021-03-29T01:09:59.000Z
|
2021-07-02T04:34:25.000Z
|
test/functional/mining_getblocktemplate_longpoll.py
|
yvettep321/bitcoin
|
33707a2a8828c68e3c0586bdadea52c84873d386
|
[
"MIT"
] | 2 |
2021-09-05T22:45:02.000Z
|
2021-09-08T16:16:40.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
import random
import threading
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_rpc_proxy
from test_framework.wallet import MiniWallet
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate({'rules': ['segwit']})
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']})
class GetBlockTemplateLPTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.log.info("Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template2['longpollid'] == longpollid
self.log.info("Test that longpoll waits if we do nothing")
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
miniwallets = [MiniWallet(node) for node in self.nodes]
self.log.info("Test that longpoll will terminate if another node generates a block")
self.generate(miniwallets[1], 1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
self.log.info("Test that longpoll will terminate if we generate a block ourselves")
thr = LongpollThread(self.nodes[0])
thr.start()
self.generate(miniwallets[0], 1) # generate a block on own node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
# Add enough mature utxos to the wallets, so that all txs spend confirmed coins
self.nodes[0].generate(COINBASE_MATURITY)
self.sync_blocks()
self.log.info("Test that introducing a new transaction into the mempool will terminate the longpoll")
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
fee_rate = min_relay_fee + Decimal('0.00000010') * random.randint(0,20)
miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes),
fee_rate=fee_rate)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert not thr.is_alive()
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 44.481928 | 134 | 0.685536 |
b3c31a12c8c7dd11900921555205a2cb2b093ea7
| 405 |
py
|
Python
|
accounts/migrations/0002_customuser_phone_number.py
|
abdallah-alabed/DjangoX
|
f8d8f4d3f60699bf3b6fd29cca59a6644a591a42
|
[
"MIT"
] | null | null | null |
accounts/migrations/0002_customuser_phone_number.py
|
abdallah-alabed/DjangoX
|
f8d8f4d3f60699bf3b6fd29cca59a6644a591a42
|
[
"MIT"
] | null | null | null |
accounts/migrations/0002_customuser_phone_number.py
|
abdallah-alabed/DjangoX
|
f8d8f4d3f60699bf3b6fd29cca59a6644a591a42
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0 on 2021-12-08 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
| 21.315789 | 73 | 0.604938 |
69ab5c11be2ea7dbd3119a2d416c95f288b41681
| 2,203 |
py
|
Python
|
docs/conf.py
|
meakio/python-proc
|
0302fe62fa7990e4a8f8020fca83e7566a35e315
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
meakio/python-proc
|
0302fe62fa7990e4a8f8020fca83e7566a35e315
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
meakio/python-proc
|
0302fe62fa7990e4a8f8020fca83e7566a35e315
|
[
"MIT"
] | null | null | null |
"""
Documentation build configuration file for the `proc` package.
"""
# Author: Peter Odding <[email protected]>
# Last Change: June 21, 2018
# URL: https://proc.readthedocs.io
import os
import sys
# Add the 'proc' source distribution's root directory to the module search path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
'property_manager.sphinx',
]
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'proc'
copyright = '2018, Peter Odding'
# Find the package version and make it the release.
from proc import __version__ as proc_version
# The short X.Y version (replacement for |version|).
version = '.'.join(proc_version.split('.')[:2])
# The full version, including alpha/beta/rc tags (replacement for |release|).
release = proc_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# References to external documentation.
intersphinx_mapping = dict(
python2=('https://docs.python.org/2', None),
python3=('https://docs.python.org/3', None),
executor=('https://executor.readthedocs.io/en/latest', None),
propertymanager=('https://property-manager.readthedocs.io/en/latest', None),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
| 29.373333 | 80 | 0.695869 |
ac7a36fd698d79fb6a541fd13c8007927aef9250
| 56 |
py
|
Python
|
src/ThaiPersonalCardExtract/ThaiGovernmentLottery/__init__.py
|
ggafiled/ThaiPersonalCardExtract
|
7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4
|
[
"Apache-2.0"
] | 12 |
2021-08-11T08:48:48.000Z
|
2022-02-10T03:06:34.000Z
|
src/ThaiPersonalCardExtract/ThaiGovernmentLottery/__init__.py
|
ggafiled/ThaiPersonalCardExtract
|
7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4
|
[
"Apache-2.0"
] | 2 |
2021-08-21T16:14:39.000Z
|
2022-02-17T15:31:07.000Z
|
src/ThaiPersonalCardExtract/ThaiGovernmentLottery/__init__.py
|
ggafiled/ThaiPersonalCardExtract
|
7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4
|
[
"Apache-2.0"
] | 7 |
2021-08-11T09:32:02.000Z
|
2022-03-26T13:56:46.000Z
|
from .ThaiGovernmentLottery import ThaiGovernmentLottery
| 56 | 56 | 0.928571 |
a64f3b9bb298a2099c68d2a32b4f6ee7111d6548
| 795 |
bzl
|
Python
|
google/cloud/bigtable/examples/bigtable_examples_unit_tests.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | 299 |
2019-01-31T12:17:56.000Z
|
2022-03-30T15:46:15.000Z
|
google/cloud/bigtable/examples/bigtable_examples_unit_tests.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | 6,560 |
2019-01-29T03:15:15.000Z
|
2022-03-31T23:58:48.000Z
|
google/cloud/bigtable/examples/bigtable_examples_unit_tests.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | 253 |
2019-02-07T01:18:13.000Z
|
2022-03-30T17:21:10.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated unit tests list - DO NOT EDIT."""
bigtable_examples_unit_tests = [
"bigtable_examples_common_test.cc",
]
| 36.136364 | 79 | 0.755975 |
f19feea1875d1b945c8c45fbd873163cd146cbda
| 16,187 |
py
|
Python
|
mne/tests/test_bem.py
|
enricovara/mne-python
|
f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc
|
[
"BSD-3-Clause"
] | 1 |
2021-03-13T04:41:45.000Z
|
2021-03-13T04:41:45.000Z
|
mne/tests/test_bem.py
|
enricovara/mne-python
|
f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc
|
[
"BSD-3-Clause"
] | 28 |
2020-05-07T00:58:34.000Z
|
2020-08-29T23:02:17.000Z
|
mne/tests/test_bem.py
|
hichamjanati/mne-python
|
b8f5e5ce0da8acfeb7298c8eb1d26a75d5526eac
|
[
"BSD-3-Clause"
] | 1 |
2020-07-29T15:48:58.000Z
|
2020-07-29T15:48:58.000Z
|
# Authors: Marijn van Vliet <[email protected]>
#
# License: BSD 3 clause
from copy import deepcopy
from os import makedirs
import os.path as op
import re
from shutil import copy
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from mne import (make_bem_model, read_bem_surfaces, write_bem_surfaces,
make_bem_solution, read_bem_solution, write_bem_solution,
make_sphere_model, Transform, Info, write_surface)
from mne.preprocessing.maxfilter import fit_sphere_to_headshape
from mne.io.constants import FIFF
from mne.transforms import translation
from mne.datasets import testing
from mne.utils import (run_tests_if_main, catch_logging, requires_h5py)
from mne.bem import (_ico_downsample, _get_ico_map, _order_surfaces,
_assert_complete_surface, _assert_inside,
_check_surface_size, _bem_find_surface)
from mne.surface import read_surface
from mne.io import read_info
fname_raw = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test_raw.fif')
subjects_dir = op.join(testing.data_path(download=False), 'subjects')
fname_bem_3 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-320-320-bem.fif')
fname_bem_1 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-bem.fif')
fname_bem_sol_3 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-320-320-bem-sol.fif')
fname_bem_sol_1 = op.join(subjects_dir, 'sample', 'bem',
'sample-320-bem-sol.fif')
def _compare_bem_surfaces(surfs_1, surfs_2):
"""Compare BEM surfaces."""
names = ['id', 'nn', 'rr', 'coord_frame', 'tris', 'sigma', 'ntri', 'np']
ignores = ['tri_cent', 'tri_nn', 'tri_area', 'neighbor_tri']
for s0, s1 in zip(surfs_1, surfs_2):
assert_equal(set(names), set(s0.keys()) - set(ignores))
assert_equal(set(names), set(s1.keys()) - set(ignores))
for name in names:
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-6,
err_msg='Mismatch: "%s"' % name)
def _compare_bem_solutions(sol_a, sol_b):
"""Compare BEM solutions."""
# compare the surfaces we used
_compare_bem_surfaces(sol_a['surfs'], sol_b['surfs'])
# compare the actual solutions
names = ['bem_method', 'field_mult', 'gamma', 'is_sphere',
'nsol', 'sigma', 'source_mult', 'solution']
assert_equal(set(sol_a.keys()), set(sol_b.keys()))
assert_equal(set(names + ['surfs']), set(sol_b.keys()))
for key in names:
assert_allclose(sol_a[key], sol_b[key], rtol=1e-3, atol=1e-5,
err_msg='Mismatch: %s' % key)
@testing.requires_testing_data
@requires_h5py
@pytest.mark.parametrize('ext', ('fif', 'h5'))
def test_io_bem(tmpdir, ext):
"""Test reading and writing of bem surfaces and solutions."""
import h5py
temp_bem = op.join(str(tmpdir), f'temp-bem.{ext}')
# model
with pytest.raises(ValueError, match='BEM data not found'):
read_bem_surfaces(fname_raw)
with pytest.raises(ValueError, match='surface with id 10'):
read_bem_surfaces(fname_bem_3, s_id=10)
surf = read_bem_surfaces(fname_bem_3, patch_stats=True)
surf = read_bem_surfaces(fname_bem_3, patch_stats=False)
write_bem_surfaces(temp_bem, surf[0])
with pytest.raises(IOError, match='exists'):
write_bem_surfaces(temp_bem, surf[0])
write_bem_surfaces(temp_bem, surf[0], overwrite=True)
if ext == 'h5':
with h5py.File(temp_bem, 'r'): # make sure it's valid
pass
surf_read = read_bem_surfaces(temp_bem, patch_stats=False)
_compare_bem_surfaces(surf, surf_read)
# solution
with pytest.raises(RuntimeError, match='No BEM solution found'):
read_bem_solution(fname_bem_3)
temp_sol = op.join(str(tmpdir), f'temp-sol.{ext}')
sol = read_bem_solution(fname_bem_sol_3)
assert 'BEM' in repr(sol)
write_bem_solution(temp_sol, sol)
sol_read = read_bem_solution(temp_sol)
_compare_bem_solutions(sol, sol_read)
sol = read_bem_solution(fname_bem_sol_1)
with pytest.raises(RuntimeError, match='BEM model does not have'):
_bem_find_surface(sol, 3)
def test_make_sphere_model():
"""Test making a sphere model."""
info = read_info(fname_raw)
pytest.raises(ValueError, make_sphere_model, 'foo', 'auto', info)
pytest.raises(ValueError, make_sphere_model, 'auto', 'auto', None)
pytest.raises(ValueError, make_sphere_model, 'auto', 'auto', info,
relative_radii=(), sigmas=())
with pytest.raises(ValueError, match='relative_radii.*must match.*sigmas'):
make_sphere_model('auto', 'auto', info, relative_radii=(1,))
# here we just make sure it works -- the functionality is actually
# tested more extensively e.g. in the forward and dipole code
with catch_logging() as log:
bem = make_sphere_model('auto', 'auto', info, verbose=True)
log = log.getvalue()
assert ' RV = ' in log
for line in log.split('\n'):
if ' RV = ' in line:
val = float(line.split()[-2])
assert val < 0.01 # actually decent fitting
break
assert '3 layers' in repr(bem)
assert 'Sphere ' in repr(bem)
assert ' mm' in repr(bem)
bem = make_sphere_model('auto', None, info)
assert 'no layers' in repr(bem)
assert 'Sphere ' in repr(bem)
with pytest.raises(ValueError, match='at least 2 sigmas.*head_radius'):
make_sphere_model(sigmas=(0.33,), relative_radii=(1.0,))
@testing.requires_testing_data
@pytest.mark.parametrize('kwargs, fname', [
[dict(), fname_bem_3],
[dict(conductivity=[0.3]), fname_bem_1],
])
def test_make_bem_model(tmpdir, kwargs, fname):
"""Test BEM model creation from Python with I/O."""
fname_temp = tmpdir.join('temp-bem.fif')
with catch_logging() as log:
model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir,
verbose=True, **kwargs)
log = log.getvalue()
if len(kwargs.get('conductivity', (0, 0, 0))) == 1:
assert 'distance' not in log
else:
assert re.search(r'urfaces is approximately *3\.4 mm', log) is not None
assert re.search(r'inner skull CM is *0\.65 *-9\.62 *43\.85 mm',
log) is not None
model_c = read_bem_surfaces(fname)
_compare_bem_surfaces(model, model_c)
write_bem_surfaces(fname_temp, model)
model_read = read_bem_surfaces(fname_temp)
_compare_bem_surfaces(model, model_c)
_compare_bem_surfaces(model_read, model_c)
# bad conductivity
with pytest.raises(ValueError, match='conductivity must be'):
make_bem_model('sample', 4, [0.3, 0.006], subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_bem_model_topology(tmpdir):
"""Test BEM model topological checks."""
# bad topology (not enough neighboring tris)
makedirs(tmpdir.join('foo', 'bem'))
for fname in ('inner_skull', 'outer_skull', 'outer_skin'):
fname += '.surf'
copy(op.join(subjects_dir, 'sample', 'bem', fname),
str(tmpdir.join('foo', 'bem', fname)))
outer_fname = tmpdir.join('foo', 'bem', 'outer_skull.surf')
rr, tris = read_surface(outer_fname)
tris = tris[:-1]
write_surface(outer_fname, rr, tris[:-1], overwrite=True)
with pytest.raises(RuntimeError, match='Surface outer skull is not compl'):
make_bem_model('foo', None, subjects_dir=tmpdir)
# Now get past this error to reach gh-6127 (not enough neighbor tris)
rr_bad = np.concatenate([rr, np.mean(rr, axis=0, keepdims=True)], axis=0)
write_surface(outer_fname, rr_bad, tris, overwrite=True)
with pytest.raises(RuntimeError, match='Surface outer skull.*triangles'):
make_bem_model('foo', None, subjects_dir=tmpdir)
@pytest.mark.slowtest
@testing.requires_testing_data
@pytest.mark.parametrize('cond, fname', [
[(0.3,), fname_bem_sol_1],
[(0.3, 0.006, 0.3), fname_bem_sol_3],
])
def test_bem_solution(tmpdir, cond, fname):
"""Test making a BEM solution from Python with I/O."""
# test degenerate conditions
surf = read_bem_surfaces(fname_bem_1)[0]
pytest.raises(RuntimeError, _ico_downsample, surf, 10) # bad dec grade
s_bad = dict(tris=surf['tris'][1:], ntri=surf['ntri'] - 1, rr=surf['rr'])
pytest.raises(RuntimeError, _ico_downsample, s_bad, 1) # not isomorphic
s_bad = dict(tris=surf['tris'].copy(), ntri=surf['ntri'],
rr=surf['rr']) # bad triangulation
s_bad['tris'][0] = [0, 0, 0]
pytest.raises(RuntimeError, _ico_downsample, s_bad, 1)
s_bad['id'] = 1
pytest.raises(RuntimeError, _assert_complete_surface, s_bad)
s_bad = dict(tris=surf['tris'], ntri=surf['ntri'], rr=surf['rr'].copy())
s_bad['rr'][0] = 0.
pytest.raises(RuntimeError, _get_ico_map, surf, s_bad)
surfs = read_bem_surfaces(fname_bem_3)
pytest.raises(RuntimeError, _assert_inside, surfs[0], surfs[1]) # outside
surfs[0]['id'] = 100 # bad surfs
pytest.raises(RuntimeError, _order_surfaces, surfs)
surfs[1]['rr'] /= 1000.
pytest.raises(RuntimeError, _check_surface_size, surfs[1])
# actually test functionality
fname_temp = op.join(str(tmpdir), 'temp-bem-sol.fif')
# use a model and solution made in Python
for model_type in ('python', 'c'):
if model_type == 'python':
model = make_bem_model('sample', conductivity=cond, ico=2,
subjects_dir=subjects_dir)
else:
model = fname_bem_1 if len(cond) == 1 else fname_bem_3
solution = make_bem_solution(model, verbose=True)
solution_c = read_bem_solution(fname)
_compare_bem_solutions(solution, solution_c)
write_bem_solution(fname_temp, solution)
solution_read = read_bem_solution(fname_temp)
_compare_bem_solutions(solution, solution_c)
_compare_bem_solutions(solution_read, solution_c)
def test_fit_sphere_to_headshape():
"""Test fitting a sphere to digitization points."""
# Create points of various kinds
rad = 0.09
big_rad = 0.12
center = np.array([0.0005, -0.01, 0.04])
dev_trans = np.array([0., -0.005, -0.01])
dev_center = center - dev_trans
dig = [
# Left auricular
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': np.array([-1.0, 0.0, 0.0])},
# Nasion
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': np.array([0.0, 1.0, 0.0])},
# Right auricular
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': np.array([1.0, 0.0, 0.0])},
# Top of the head (extra point)
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EXTRA,
'ident': 0,
'r': np.array([0.0, 0.0, 1.0])},
# EEG points
# Fz
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'ident': 0,
'r': np.array([0, .72, .69])},
# F3
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'ident': 1,
'r': np.array([-.55, .67, .50])},
# F4
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'ident': 2,
'r': np.array([.55, .67, .50])},
# Cz
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'ident': 3,
'r': np.array([0.0, 0.0, 1.0])},
# Pz
{'coord_frame': FIFF.FIFFV_COORD_HEAD,
'kind': FIFF.FIFFV_POINT_EEG,
'ident': 4,
'r': np.array([0, -.72, .69])},
]
for d in dig:
d['r'] *= rad
d['r'] += center
# Device to head transformation (rotate .2 rad over X-axis)
dev_head_t = Transform('meg', 'head', translation(*(dev_trans)))
info = Info(dig=dig, dev_head_t=dev_head_t)
# Degenerate conditions
pytest.raises(ValueError, fit_sphere_to_headshape, info,
dig_kinds=(FIFF.FIFFV_POINT_HPI,))
pytest.raises(ValueError, fit_sphere_to_headshape, info,
dig_kinds='foo', units='m')
info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
pytest.raises(RuntimeError, fit_sphere_to_headshape, info, units='m')
info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# # Test with 4 points that match a perfect sphere
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL, FIFF.FIFFV_POINT_EXTRA)
with pytest.warns(RuntimeWarning, match='Only .* head digitization'):
r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds,
units='m')
kwargs = dict(rtol=1e-3, atol=1e-5)
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, dev_center, **kwargs)
# Test with all points
dig_kinds = ('cardinal', FIFF.FIFFV_POINT_EXTRA, 'eeg')
kwargs = dict(rtol=1e-3, atol=1e-3)
with pytest.warns(RuntimeWarning, match='Only .* head digitization'):
r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds,
units='m')
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, dev_center, **kwargs)
# Test with some noisy EEG points only.
dig_kinds = 'eeg'
with pytest.warns(RuntimeWarning, match='Only .* head digitization'):
r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds,
units='m')
kwargs = dict(rtol=1e-3, atol=1e-2)
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, center, **kwargs)
# Test big size
dig_kinds = ('cardinal', 'extra')
info_big = deepcopy(info)
for d in info_big['dig']:
d['r'] -= center
d['r'] *= big_rad / rad
d['r'] += center
with pytest.warns(RuntimeWarning, match='Estimated head size'):
r, oh, od = fit_sphere_to_headshape(info_big, dig_kinds=dig_kinds,
units='mm')
assert_allclose(oh, center * 1000, atol=1e-3)
assert_allclose(r, big_rad * 1000, atol=1e-3)
del info_big
# Test offcenter
dig_kinds = ('cardinal', 'extra')
info_shift = deepcopy(info)
shift_center = np.array([0., -0.03, 0.])
for d in info_shift['dig']:
d['r'] -= center
d['r'] += shift_center
with pytest.warns(RuntimeWarning, match='from head frame origin'):
r, oh, od = fit_sphere_to_headshape(
info_shift, dig_kinds=dig_kinds, units='m')
assert_allclose(oh, shift_center, atol=1e-6)
assert_allclose(r, rad, atol=1e-6)
# Test "auto" mode (default)
# Should try "extra", fail, and go on to EEG
with pytest.warns(RuntimeWarning, match='Only .* head digitization'):
r, oh, od = fit_sphere_to_headshape(info, units='m')
kwargs = dict(rtol=1e-3, atol=1e-3)
assert_allclose(r, rad, **kwargs)
assert_allclose(oh, center, **kwargs)
assert_allclose(od, dev_center, **kwargs)
with pytest.warns(RuntimeWarning, match='Only .* head digitization'):
r2, oh2, od2 = fit_sphere_to_headshape(info, units='m')
assert_allclose(r, r2, atol=1e-7)
assert_allclose(oh, oh2, atol=1e-7)
assert_allclose(od, od2, atol=1e-7)
# this one should pass, 1 EXTRA point and 3 EEG (but the fit is terrible)
info = Info(dig=dig[:7], dev_head_t=dev_head_t)
with pytest.warns(RuntimeWarning, match='Only .* head digitization'):
r, oh, od = fit_sphere_to_headshape(info, units='m')
# this one should fail, 1 EXTRA point and 3 EEG (but the fit is terrible)
info = Info(dig=dig[:6], dev_head_t=dev_head_t)
pytest.raises(ValueError, fit_sphere_to_headshape, info, units='m')
pytest.raises(TypeError, fit_sphere_to_headshape, 1, units='m')
run_tests_if_main()
| 41.398977 | 79 | 0.641317 |
7606af1b9446ca05e1481ccb188e83c054d738db
| 111,783 |
py
|
Python
|
statsmodels/regression/linear_model.py
|
anntzer/statsmodels
|
1ccd5cdba4f9949c5c27ac4d44718893e17d7184
|
[
"BSD-3-Clause"
] | 2 |
2020-04-13T15:45:38.000Z
|
2020-06-01T14:41:04.000Z
|
statsmodels/regression/linear_model.py
|
giuliobeseghi/statsmodels
|
1ccd5cdba4f9949c5c27ac4d44718893e17d7184
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/regression/linear_model.py
|
giuliobeseghi/statsmodels
|
1ccd5cdba4f9949c5c27ac4d44718893e17d7184
|
[
"BSD-3-Clause"
] | 1 |
2020-04-13T17:21:27.000Z
|
2020-04-13T17:21:27.000Z
|
# TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from statsmodels.compat.python import lrange, lzip
from statsmodels.compat.pandas import Appender
import numpy as np
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.tools.tools import pinv_extended
from statsmodels.tools.decorators import (cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from statsmodels.regression._prediction import PredictionResults
from . import _prediction as pred
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR', 'PredictionResults',
'RegressionResultsWrapper']
_fit_regularized_doc =\
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : str
Either 'elastic_net' or 'sqrt_lasso'.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is a
ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for ``params``.
profile_scale : bool
If True the penalized fit is computed using the profile
(concentrated) log-likelihood for the Gaussian model.
Otherwise the fit uses the residual sum of squares.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
**kwargs
Additional keyword arguments that contain information used when
constructing a model using the formula interface.
Returns
-------
statsmodels.base.elastic_net.RegularizedResults
The regularized results.
Notes
-----
The elastic net uses a combination of L1 and L2 penalties.
The implementation closely follows the glmnet package in R.
The function that is minimized is:
.. math::
0.5*RSS/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
For WLS and GLS, the RSS is calculated using the whitened endog and
exog data.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
The square root lasso approach is a variation of the Lasso
that is largely self-tuning (the optimal tuning parameter
does not depend on the standard deviation of the regression
errors). If the errors are Gaussian, the tuning parameter
can be taken to be
alpha = 1.1 * np.sqrt(n) * norm.ppf(1 - 0.05 / (2 * p))
where n is the sample size and p is the number of predictors.
The square root lasso uses the following keyword arguments:
zero_tol : float
Coefficients below this threshold are treated as zero.
The cvxopt module is required to estimate model using the square root
lasso.
References
----------
.. [*] Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
.. [*] A Belloni, V Chernozhukov, L Wang (2011). Square-root Lasso:
pivotal recovery of sparse signals via conic programming.
Biometrika 98(4), 791-806. https://arxiv.org/pdf/1009.5689.pdf
"""
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.inv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
"""Initialize model components."""
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom.
The dof is defined as the rank of the regressor matrix minus 1 if a
constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom.
The dof is defined as the number of observations minus the rank of
the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, x):
"""
Whiten method that must be overwritten by individual models.
Parameters
----------
x : array_like
Data to be whitened.
"""
raise NotImplementedError("Subclasses must implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators.
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance
estimators.
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
**kwargs
Additional keyword arguments that contain information used when
constructing a model using the formula interface.
Returns
-------
RegressionResults
The model estimation results.
See Also
--------
RegressionResults
The results container.
RegressionResults.get_robustcov_results
A method to change the covariance estimator used when fitting the
model.
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if not (hasattr(self, 'pinv_wexog') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(
self.pinv_wexog, np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np.linalg.matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if not (hasattr(self, 'exog_Q') and
hasattr(self, 'exog_R') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np.linalg.matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
else:
raise ValueError('method has to be "pinv" or "qr"')
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model.
exog : array_like, optional
Design / exogenous data. Model exog is used if None.
Returns
-------
array_like
An array of fitted values.
Notes
-----
If the model has not yet been fit, params is not optional.
"""
# JP: this does not look correct for GLMAR
# SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Construct a random number generator for the predictive distribution.
Parameters
----------
params : array_like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array_like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and 'scale'
as arguments and return a random number generator implementing
an ``rvs`` method for simulating random values. Defaults to normal.
Returns
-------
gen
Frozen random number generator object with mean and variance
determined by the fitted linear model. Use the ``rvs`` method
to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen
class GLS(RegressionModel):
__doc__ = r"""
Generalized Least Squares
%(params)s
sigma : scalar or array
The array or scalar `sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
Attributes
----------
pinv_wexog : ndarray
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : ndarray
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : ndarray
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : ndarray
`sigma` is the n x n covariance structure of the error terms.
wexog : ndarray
Design matrix whitened by `cholsigmainv`
wendog : ndarray
Response variable whitened by `cholsigmainv`
See Also
--------
WLS : Fit a linear model using Weighted Least Squares.
OLS : Fit a linear model using Ordinary Least Squares.
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load(as_pandas=False)
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary())
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
# TODO: add options igls, for iterative fgls if sigma is None
# TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
# store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, x):
"""
GLS whiten method.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
ndarray
The value np.dot(cholsigmainv,X).
See Also
--------
GLS : Fit a linear model using Generalized Least Squares.
"""
x = np.asarray(x)
if self.sigma is None or self.sigma.shape == ():
return x
elif self.sigma.ndim == 1:
if x.ndim == 1:
return x * self.cholsigmainv
else:
return x * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, x)
def loglike(self, params):
r"""
Compute the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array_like
The model parameters.
Returns
-------
float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\frac{n}{2}\log\left(\left(Y-\hat{Y}\right)^{\prime}
\left(Y-\hat{Y}\right)\right)
-\frac{n}{2}\left(1+\log\left(\frac{2\pi}{n}\right)\right)
-\frac{1}{2}\log\left(\left|\Sigma\right|\right)
Y and Y-hat are whitened.
"""
# TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
# FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim == 2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""
Compute weights for calculating Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
if self.sigma is None or self.sigma.shape == ():
return np.ones(self.exog.shape[0])
elif self.sigma.ndim == 1:
return self.cholsigmainv
else:
return np.diag(self.cholsigmainv)
@Appender(_fit_regularized_doc)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
if not np.isscalar(alpha):
alpha = np.asarray(alpha)
# Need to adjust since RSS/n term in elastic net uses nominal
# n in denominator
if self.sigma is not None:
alpha = alpha * np.sum(1 / np.diag(self.sigma)) / len(self.endog)
rslt = OLS(self.wendog, self.wexog).fit_regularized(
method=method, alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
profile_scale=profile_scale,
refit=refit, **kwargs)
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper)
rrslt = RegularizedResults(self, rslt.params)
return RegularizedResultsWrapper(rrslt)
class WLS(RegressionModel):
__doc__ = """
Weighted Least Squares
The weights are presumed to be (proportional to) the inverse of
the variance of the observations. That is, if the variables are
to be transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array_like, optional
A 1d array of weights. If you supply 1/W then the variables are
pre- multiplied by 1/sqrt(W). If no weights are supplied the
default value is 1 and WLS results are the same as OLS.
%(extra_params)s
Attributes
----------
weights : ndarray
The stored weights supplied as an argument.
See Also
--------
GLS : Fit a linear model using Generalized Least Squares.
OLS : Fit a linear model using Ordinary Least Squares.
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
Examples
--------
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, x):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights).
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The whitened values sqrt(weights)*X.
"""
x = np.asarray(x)
if x.ndim == 1:
return x * np.sqrt(self.weights)
elif x.ndim == 2:
return np.sqrt(self.weights)[:, None] * x
def loglike(self, params):
r"""
Compute the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array_like
The parameter estimates.
Returns
-------
float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\frac{n}{2}\log SSR
-\frac{n}{2}\left(1+\log\left(\frac{2\pi}{n}\right)\right)
-\frac{1}{2}\log\left(\left|W\right|\right)
where :math:`W` is a diagonal weight matrix matrix and
:math:`SSR=\left(Y-\hat{Y}\right)^\prime W \left(Y-\hat{Y}\right)` is
the sum of the squared weighted residuals.
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""
Compute the weights for calculating the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
return self.weights
@Appender(_fit_regularized_doc)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# Docstring attached below
if not np.isscalar(alpha):
alpha = np.asarray(alpha)
# Need to adjust since RSS/n in elastic net uses nominal n in
# denominator
alpha = alpha * np.sum(self.weights) / len(self.weights)
rslt = OLS(self.wendog, self.wexog).fit_regularized(
method=method, alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
profile_scale=profile_scale,
refit=refit, **kwargs)
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper)
rrslt = RegularizedResults(self, rslt.params)
return RegularizedResultsWrapper(rrslt)
class OLS(WLS):
__doc__ = """
Ordinary Least Squares
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
WLS : Fit a linear model using Weighted Least Squares.
GLS : Fit a linear model using Generalized Least Squares.
Notes
-----
No constant is added by the model unless you are using formulas.
Examples
--------
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
# TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array_like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
float
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf
def whiten(self, x):
"""
OLS model whitener does nothing.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The input array unmodified.
See Also
--------
OLS : Fit a linear model using Ordinary Least Squares.
"""
return x
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array_like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale
def _setup_score_hess(self):
y = self.wendog
if hasattr(self, 'offset'):
y = y - self.offset
self._wendog_xprod = np.sum(y * y)
self._wexog_xprod = np.dot(self.wexog.T, self.wexog)
self._wexog_x_wendog = np.dot(self.wexog.T, y)
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array_like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale
def hessian_factor(self, params, scale=None, observed=True):
"""
Calculate the weights for the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
return np.ones(self.exog.shape[0])
@Appender(_fit_regularized_doc)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# In the future we could add support for other penalties, e.g. SCAD.
if method not in ("elastic_net", "sqrt_lasso"):
msg = "Unknown method '%s' for fit_regularized" % method
raise ValueError(msg)
# Set default parameters.
defaults = {"maxiter": 50, "cnvrg_tol": 1e-10,
"zero_tol": 1e-8}
defaults.update(kwargs)
if method == "sqrt_lasso":
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper
)
params = self._sqrt_lasso(alpha, refit, defaults["zero_tol"])
results = RegularizedResults(self, params)
return RegularizedResultsWrapper(results)
from statsmodels.base.elastic_net import fit_elasticnet
if L1_wt == 0:
return self._fit_ridge(alpha)
# If a scale parameter is passed in, the non-profile
# likelihood (residual sum of squares divided by -2) is used,
# otherwise the profile likelihood is used.
if profile_scale:
loglike_kwds = {}
score_kwds = {}
hess_kwds = {}
else:
loglike_kwds = {"scale": 1}
score_kwds = {"scale": 1}
hess_kwds = {"scale": 1}
return fit_elasticnet(self, method=method,
alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
loglike_kwds=loglike_kwds,
score_kwds=score_kwds,
hess_kwds=hess_kwds,
refit=refit,
check_step=False,
**defaults)
def _sqrt_lasso(self, alpha, refit, zero_tol):
try:
import cvxopt
except ImportError:
msg = 'sqrt_lasso fitting requires the cvxopt module'
raise ValueError(msg)
n = len(self.endog)
p = self.exog.shape[1]
h0 = cvxopt.matrix(0., (2*p+1, 1))
h1 = cvxopt.matrix(0., (n+1, 1))
h1[1:, 0] = cvxopt.matrix(self.endog, (n, 1))
G0 = cvxopt.spmatrix([], [], [], (2*p+1, 2*p+1))
for i in range(1, 2*p+1):
G0[i, i] = -1
G1 = cvxopt.matrix(0., (n+1, 2*p+1))
G1[0, 0] = -1
G1[1:, 1:p+1] = self.exog
G1[1:, p+1:] = -self.exog
c = cvxopt.matrix(alpha / n, (2*p + 1, 1))
c[0] = 1 / np.sqrt(n)
from cvxopt import solvers
solvers.options["show_progress"] = False
rslt = solvers.socp(c, Gl=G0, hl=h0, Gq=[G1], hq=[h1])
x = np.asarray(rslt['x']).flat
bp = x[1:p+1]
bn = x[p+1:]
params = bp - bn
if not refit:
return params
ii = np.flatnonzero(np.abs(params) > zero_tol)
rfr = OLS(self.endog, self.exog[:, ii]).fit()
params *= 0
params[ii] = rfr.params
return params
def _fit_ridge(self, alpha):
"""
Fit a linear model using ridge regression.
Parameters
----------
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Notes
-----
Equivalent to fit_regularized with L1_wt = 0 (but implemented
more efficiently).
"""
u, s, vt = np.linalg.svd(self.exog, 0)
v = vt.T
q = np.dot(u.T, self.endog) * s
s2 = s * s
if np.isscalar(alpha):
sd = s2 + alpha * self.nobs
params = q / sd
params = np.dot(v, params)
else:
alpha = np.asarray(alpha)
vtav = self.nobs * np.dot(vt, alpha[:, None] * v)
d = np.diag(vtav) + s2
np.fill_diagonal(vtav, d)
r = np.linalg.solve(vtav, q)
params = np.dot(v, r)
from statsmodels.base.elastic_net import RegularizedResults
return RegularizedResults(self, params)
class GLSAR(GLS):
__doc__ = """
Generalized Least Squares with AR covariance structure
%(params)s
rho : int
The order of the autoregressive covariance.
%(extra_params)s
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
# TODO: Complete docstring
def __init__(self, endog, exog=None, rho=1, missing='none', hasconst=None,
**kwargs):
# this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0, 1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
# JP this looks wrong, should be a regression on constant
# results for rho estimate now identical to yule-walker on y
# super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0], 1)),
missing=missing, hasconst=None,
**kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwargs):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : int, optional
The number of iterations.
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if max(abs(last - current) / abs(last)) < rtol.
**kwargs
Additional keyword arguments passed to `fit`.
Returns
-------
RegressionResults
The results computed using an iterative fit.
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho': [self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we did not
# update rho
results = self.fit(history=history, **kwargs)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results
def whiten(self, x):
"""
Whiten a series of columns according to an AR(p) covariance structure.
Whitening using this method drops the initial p observations.
Parameters
----------
x : array_like
The data to be whitened.
Returns
-------
ndarray
The whitened data.
"""
# TODO: notation for AR process
x = np.asarray(x, np.float64)
_x = x.copy()
# the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_x[(i + 1):] = _x[(i + 1):] - self.rho[i] * x[0:-(i + 1)]
return _x[self.order:]
def yule_walker(x, order=1, method="unbiased", df=None, inv=False,
demean=True):
"""
Estimate AR(p) parameters from a sequence using the Yule-Walker equations.
Unbiased or maximum-likelihood estimator (mle)
Parameters
----------
x : array_like
A 1d array.
order : int, optional
The order of the autoregressive process. Default is 1.
method : str, optional
Method can be 'unbiased' or 'mle' and this determines
denominator in estimate of autocorrelation function (ACF) at
lag k. If 'mle', the denominator is n=X.shape[0], if 'unbiased'
the denominator is n-k. The default is unbiased.
df : int, optional
Specifies the degrees of freedom. If `df` is supplied, then it
is assumed the X has `df` degrees of freedom rather than `n`.
Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is
False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho : ndarray
AR(p) coefficients computed using the Yule-Walker method.
sigma : float
The estimate of the residual standard deviation.
See Also
--------
burg : Burg's AR estimator.
Notes
-----
See https://en.wikipedia.org/wiki/Autoregressive_moving_average_model for
further details.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load(as_pandas=False)
>>> rho, sigma = sm.regression.yule_walker(data.endog, order=4,
... method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
# TODO: define R better, look back at notes and technical notes on YW.
# First link here is useful
# http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
x = np.array(x, dtype=np.float64)
if demean:
x -= x.mean()
n = df or x.shape[0]
# this handles df_resid ie., n - p
adj_needed = method == "unbiased"
if x.ndim > 1 and x.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (x ** 2).sum() / n
for k in range(1, order+1):
r[k] = (x[0:-k] * x[k:]).sum() / (n - k * adj_needed)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
def burg(endog, order=1, demean=True):
"""
Compute Burg's AP(p) parameter estimator.
Parameters
----------
endog : array_like
The endogenous variable.
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation.
Returns
-------
rho : ndarray
The AR(p) coefficients computed using Burg's algorithm.
sigma2 : float
The estimate of the residual variance.
See Also
--------
yule_walker : Estimate AR parameters using the Yule-Walker method.
Notes
-----
AR model estimated includes a constant that is estimated using the sample
mean (see [1]_). This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load(as_pandas=True)
>>> rho, sigma2 = sm.regression.linear_model.burg(data.endog, order=4)
>>> rho
array([ 1.30934186, -0.48086633, -0.20185982, 0.05501941])
>>> sigma2
271.2467306963966
"""
# Avoid circular imports
from statsmodels.tsa.stattools import levinson_durbin_pacf, pacf_burg
endog = np.squeeze(np.asarray(endog))
if endog.ndim != 1:
raise ValueError('endog must be 1-d or squeezable to 1-d.')
order = int(order)
if order < 1:
raise ValueError('order must be an integer larger than 1')
if demean:
endog = endog - endog.mean()
pacf, sigma = pacf_burg(endog, order, demean=demean)
ar, _ = levinson_durbin_pacf(pacf)
return ar, sigma[-1]
class RegressionResults(base.LikelihoodModelResults):
r"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Parameters
----------
model : RegressionModel
The regression model instance.
params : ndarray
The estimated parameters.
normalized_cov_params : ndarray
The normalized covariance parameters.
scale : float
The estimated scale of the residuals.
cov_type : str
The covariance estimator used in the results.
cov_kwds : dict
Additional keywords used in the covariance specification.
use_t : bool
Flag indicating to use the Student's t in inference.
**kwargs
Additional keyword arguments used to initialize the results.
Attributes
----------
pinv_wexog
See model class docstring for implementation details.
cov_type
Parameter covariance estimator used for standard errors and t-stats.
df_model
Model degrees of freedom. The number of regressors `p`. Does not
include the constant if one is present.
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
history
Estimation history for iterative estimators.
model
A pointer to the model instance that called fit() or results.
params
The linear coefficients that minimize the least squares
criterion. This is usually called Beta for the classical
linear model.
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
super(RegressionResults, self).__init__(
model, params, normalized_cov_params, scale)
self._cache = {}
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {
'description': 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
use_t = True # TODO: class default
self.use_t = use_t
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
for key in kwargs:
setattr(self, key, kwargs[key])
def conf_int(self, alpha=.05, cols=None):
"""
Compute the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Columns to included in returned confidence intervals.
Returns
-------
array_like
The confidence intervals.
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
"""Number of observations n."""
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
"""The predicted values for the original (unwhitened) design."""
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
"""
The residuals of the transformed/whitened regressand and regressor(s).
"""
return self.model.wendog - self.model.predict(
self.params, self.model.wexog)
@cache_readonly
def resid(self):
"""The residuals of the model."""
return self.model.endog - self.model.predict(
self.params, self.model.exog)
# TODO: fix writable example
@cache_writable()
def scale(self):
"""
A scale factor for the covariance matrix.
The Default value is ssr/(n-p). Note that the square root of `scale`
is often called the standard error of the regression.
"""
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
"""Sum of squared (whitened) residuals."""
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
"""The total (weighted) sum of squares centered about the mean."""
model = self.model
weights = getattr(model, 'weights', None)
sigma = getattr(model, 'sigma', None)
if weights is not None:
mean = np.average(model.endog, weights=weights)
return np.sum(weights * (model.endog - mean)**2)
elif sigma is not None:
# Exactly matches WLS when sigma is diagonal
iota = np.ones_like(model.endog)
iota = model.whiten(iota)
mean = model.wendog.dot(iota) / iota.dot(iota)
err = model.endog - mean
err = model.whiten(err)
return np.sum(err**2)
else:
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
"""
Uncentered sum of squares.
The sum of the squared values of the (whitened) endogenous response
variable.
"""
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
"""
The explained sum of squares.
If a constant is present, the centered total sum of squares minus the
sum of squared residuals. If there is no constant, the uncentered total
sum of squares is used.
"""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
"""
R-squared of the model.
This is defined here as 1 - `ssr`/`centered_tss` if the constant is
included in the model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted.
"""
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
"""
Adjusted R-squared.
This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`)
if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if
no constant is included.
"""
return 1 - (np.divide(self.nobs - self.k_constant, self.df_resid)
* (1 - self.rsquared))
@cache_readonly
def mse_model(self):
"""
Mean squared error the model.
The explained sum of squares divided by the model degrees of freedom.
"""
if np.all(self.df_model == 0.0):
return np.full_like(self.ess, np.nan)
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
"""
Mean squared error of the residuals.
The sum of squared residuals divided by the residual degrees of
freedom.
"""
if np.all(self.df_resid == 0.0):
return np.full_like(self.ssr, np.nan)
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
"""
Total mean squared error.
The uncentered total sum of squares divided by the number of
observations.
"""
if np.all(self.df_resid + self.df_model == 0.0):
return np.full_like(self.centered_tss, np.nan)
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
"""
F-statistic of the fully specified model.
Calculated as the mean squared error of the model divided by the mean
squared error of the residuals if the nonrobust covariance is used.
Otherwise computed using a Wald-like quadratic form that tests whether
all coefficients (excluding the constant) are zero.
"""
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implicit constant, e.g. all
# dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing
# to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
if mat.size == 0: # see #3642
return np.nan
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
"""The p-value of the F-statistic."""
# Special case for df_model 0
if self.df_model == 0:
return np.full_like(self.fvalue, np.nan)
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
"""The standard errors of the parameter estimates."""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
r"""
Akaike's information criteria.
For a model with a constant :math:`-2llf + 2(df\_model + 1)`. For a
model without a constant :math:`-2llf + 2(df\_model)`.
"""
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
r"""
Bayes' information criteria.
For a model with a constant :math:`-2llf + \log(n)(df\_model+1)`.
For a model without a constant :math:`-2llf + \log(n)(df\_model)`.
"""
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T,
self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
# TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:, None] * self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
Heteroscedasticity robust covariance matrix. See HC0_se.
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
Heteroscedasticity robust covariance matrix. See HC1_se.
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
Heteroscedasticity robust covariance matrix. See HC2_se.
"""
# probably could be optimized
wexog = self.model.wexog
h = np.diag(wexog @ self.normalized_cov_params @ wexog.T)
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
Heteroscedasticity robust covariance matrix. See HC3_se.
"""
wexog = self.model.wexog
h = np.diag(wexog @ self.normalized_cov_params @ wexog.T)
self.het_scale = (self.wresid / (1 - h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
White's (1980) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i].
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(n/(n-p)*HC_0).
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
array_like
The array `wresid` normalized by the sqrt of the scale to have
unit variance.
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# do not divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:, None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of
# regressors
return np.allclose(score_l2, 0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""
Use Lagrange Multiplier test to test a set of linear restrictions.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the covariance of
the scores are used and the LM test is identical to the large
sample version of the LR test.
use_lr : bool
A flag indicating whether to estimate the covariance of the model
scores using the unrestricted model. Setting the to True improves
the power of the test.
Returns
-------
lm_value : float
The test statistic which has a chi2 distributed.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The LM test examines whether the scores from the restricted model are
0. If the null is true, and the restrictions are valid, then the
parameters of the restricted model should be close to the minimum of
the sum of squared errors, and so the scores should be close to zero,
on average.
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:, None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:, None]
demean = False
if demean:
scores = scores - scores.mean(0)[None, :]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, should use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
xpx = np.dot(wexog.T, wexog) / n
s_inv = inv(sigma2 * xpx)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
s_inv = inv(np.dot(scores.T, scores) / n)
elif cov_type == 'HAC':
maxlags = self.cov_kwds['maxlags']
s_inv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
# cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
s_inv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * (s @ s_inv @ s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""
Use F test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
The test statistic which has an F distribution.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in
df between models.
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
The likelihood ratio which is chisquare distributed with df_diff
degrees of freedom.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
"""
# TODO: put into separate function, needs tests
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (
getattr(restricted, 'cov_type', 'nonrobust') != 'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs):
"""
Create new results instance with robust covariance as default.
Parameters
----------
cov_type : str
The type of robust sandwich estimator to use. See Notes below.
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`True` if the cov_type is nonrobust, and `False` in all other
cases.
**kwargs
Required or optional arguments for robust covariance calculation.
See Notes below.
Returns
-------
RegressionResults
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument 'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` callable or str (optional). The available kernels
are ['bartlett', 'uniform']. The default is Bartlett.
- `use_correction` False or string in ['hac', 'cluster'] (optional).
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
- `df_correction` bool (optional) The adjustment to df_resid, see
cov_type 'cluster' above
# TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series
for each panel unit or cluster need to be stacked. The
membership to a timeseries of an individual or group can
be either specified by group indicators or by increasing
time periods.
keywords
- either `groups` or `time` : array_like (required)
`groups` : indicator for groups
`time` : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` callable or str (optional)
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional)
If False the sandwich covariance is calculated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
# TODO: we need more options here
Reminder:
`use_correction` in "hac-groupsum" and "hac-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.base.covtype import normalize_cov_type, descriptions
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwargs:
kwargs['weights_func'] = kwargs.pop('kernel')
if 'weights_func' in kwargs and not callable(kwargs['weights_func']):
kwargs['weights_func'] = sw.kernel_dict[kwargs['weights_func']]
# TODO: make separate function that returns a robust cov plus info
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t': use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwargs.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user did not explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwargs, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwargs
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = descriptions['fixed_scale']
res.cov_kwds['scale'] = scale = kwargs.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwargs:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type.lower() == 'hac':
# TODO: check if required, default in cov_hac_simple
maxlags = kwargs['maxlags']
res.cov_kwds['maxlags'] = maxlags
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwargs.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags,
correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(
self, nlags=maxlags, weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
# cluster robust standard errors, one- or two-way
groups = kwargs['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwargs.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(
self, groups, use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:, 0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(
self, groups, use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
# cluster robust standard errors
res.cov_kwds['time'] = time = kwargs.get('time', None)
res.cov_kwds['groups'] = groups = kwargs.get('groups', None)
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
time = np.asarray(time)
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwargs['time']
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(
self, maxlags, time, weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
@Appender(pred.get_prediction.__doc__)
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwargs):
return pred.get_prediction(
self, exog=exog, transform=transform, weights=weights,
row_labels=row_labels, **kwargs)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
Name of endogenous (response) variable. The Default is `y`.
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : A class that holds summary results.
"""
from statsmodels.stats.stattools import (
jarque_bera, omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
# TODO: Avoid adding attributes in non-__init__
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
# TODO not used yet
# diagn_left_header = ['Models stats']
# diagn_right_header = ['Residual stats']
# TODO: requiring list/iterable is a bit annoying
# need more control over formatting
# TODO: default do not work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
rsquared_type = '' if self.k_constant else ' (uncentered)'
top_right = [('R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared]),
('Adj. R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue]),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:',
["%#8.3f" % durbin_watson(self.wresid)]
),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
# add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended?
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text)
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""
Experimental summary function to summarize the regression results.
Parameters
----------
yname : str
The name of the dependent variable (optional).
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The format for floats in parameters summary.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary
A class that holds summary results.
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) # in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Parameters
----------
model : RegressionModel
The regression model instance.
params : ndarray
The estimated parameters.
normalized_cov_params : ndarray
The normalized covariance parameters.
scale : float
The estimated scale of the residuals.
cov_type : str
The covariance estimator used in the results.
cov_kwds : dict
Additional keywords used in the covariance specification.
use_t : bool
Flag indicating to use the Student's t in inference.
**kwargs
Additional keyword arguments used to initialize the results.
See Also
--------
RegressionResults
Results store for WLS and GLW models.
Notes
-----
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
"""
def get_influence(self):
"""
Calculate influence and outlier measures.
Returns
-------
OLSInfluence
The instance containing methods to calculate the main influence and
outlier measures for the OLS regression.
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
A class that exposes methods to examine observation influence.
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Test observations for outliers according to method.
Parameters
----------
method : str
The method to use in the outlier test. Must be one of:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
The familywise error rate (FWER).
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below.
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be
sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations
with multiple testing corrected p-values strictly below the cutoff.
The returned array or dataframe can be empty if t.
Returns
-------
array_like
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha, labels=labels,
order=order, cutoff=cutoff)
def el_test(self, b0_vals, param_nums, return_weights=0, ret_params=0,
method='nm', stochastic_exog=1):
"""
Test single or joint hypotheses using Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested.
param_nums : 1darray
The parameter number to be tested.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. The default is False.
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. The default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load(as_pandas=False)
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress(
[],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0,
maxfun=10000, maxiter=10000, full_output=1,
disp=0, args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None,
lower_bound=None, method='nm', stochastic_exog=True):
"""
Compute the confidence interval using Empirical Likelihood.
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired.
sig : float
The significance level. Default is 0.05.
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
lowerl : float
The lower bound of the confidence interval.
upperl : float
The upper bound of the confidence interval.
See Also
--------
el_test : Test parameters using Empirical Likelihood.
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical value.
The function returns the results of each iteration of brentq at each
value of beta.
The current function value of the last printed optimization should be
the critical value at the desired significance level. For alpha=.05,
the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to do
el_test([lower_limit], [param_num]).
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
def f(b0):
return self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq': 'columns',
'sresid': 'rows',
'weights': 'rows',
'wresid': 'rows',
'bcov_unscaled': 'cov',
'bcov_scaled': 'cov',
'HC0_se': 'columns',
'HC1_se': 'columns',
'HC2_se': 'columns',
'HC3_se': 'columns',
'norm_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
| 36.566241 | 132 | 0.578961 |
bf2e86812c9255316519b6a977da47f13ec5dd4c
| 2,103 |
py
|
Python
|
Components.py
|
oransimhony/pygame_components
|
b4d0f2fa1d0de3d37d45a8cb73e29559c2f2149b
|
[
"MIT"
] | null | null | null |
Components.py
|
oransimhony/pygame_components
|
b4d0f2fa1d0de3d37d45a8cb73e29559c2f2149b
|
[
"MIT"
] | null | null | null |
Components.py
|
oransimhony/pygame_components
|
b4d0f2fa1d0de3d37d45a8cb73e29559c2f2149b
|
[
"MIT"
] | null | null | null |
import pygame
class Button:
def __init__(self, screen, x, y, w, h, color, hover_color=None, label="", label_color=(0, 0, 0), on_click=None):
self.screen = screen
self.x = x
self.y = y
self.w = w
self.h = h
self.color = color
self.hover_color = hover_color if hover_color else color
self.label = label
self.label_color = label_color
if self.label.strip() != "":
self.text = Text(self.screen, self.x + self.w / 2, self.y + self.h / 2, label_color, text=self.label)
else:
self.text = False
self.on_click = on_click
def render(self):
if self.hover():
pygame.draw.rect(self.screen, self.hover_color, (self.x, self.y, self.w, self.h))
else:
pygame.draw.rect(self.screen, self.color, (self.x, self.y, self.w, self.h))
if self.text:
self.text.render()
def hover(self):
mouseX, mouseY = pygame.mouse.get_pos()
if self.x <= mouseX <= self.x + self.w:
if self.y <= mouseY <= self.y + self.h:
return True
return False
def clicked(self):
if self.hover() and pygame.mouse.get_pressed()[0]:
if self.on_click:
self.on_click()
class Text:
def __init__(self, screen, x, y, color=(0, 0, 0), font=None, text="", size=22, antialias=False, center=True):
pygame.font.init()
if font:
self.font = font
else:
self.font = pygame.font.SysFont('Arial', size)
self.x = x
self.y = y
self.color = color
self.text = text
self.antialias = antialias
self.screen = screen
self.center = center
def render(self):
text_surface = self.font.render(self.text, self.antialias, self.color)
text_rect = text_surface.get_rect()
if self.center:
text_rect.center = (self.x, self.y)
else:
text_rect.x = self.x
text_rect.y = self.y
self.screen.blit(text_surface, text_rect)
| 32.353846 | 116 | 0.551593 |
c87d2ee6ae703d50c916dbedc7fcc03936880f71
| 11,839 |
py
|
Python
|
tensorflow/python/saved_model/model_utils/export_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848 |
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/saved_model/model_utils/export_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 656 |
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/saved_model/model_utils/export_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 506 |
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model.model_utils import export_output
from tensorflow.python.saved_model.model_utils import export_utils
from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys
class ExportTest(test_util.TensorFlowTestCase):
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_without_receiver_alternatives(self):
receiver_tensor = array_ops.placeholder(dtypes.string)
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def({
"input": receiver_tensor
}, {"some_output_3": output_3})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_with_dict_alternatives(self):
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = {
"foo": array_ops.placeholder(dtypes.int64),
"bar": array_ops.sparse_placeholder(dtypes.float32)}
receiver_tensors_alternatives = {"other": receiver_tensors_alternative_1}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor},
{"some_output_3": output_3}),
"other:head-3":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_1,
{"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and
# 'other:head-2' are invalid, because regession and classification
# signatures must take a single string input. Here we verify that
# these invalid signatures are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_with_single_alternatives(self):
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64)
receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
dtypes.float32)
# Note we are passing single Tensors as values of
# receiver_tensors_alternatives, where normally that is a dict.
# In this case a dict will be created using the default receiver tensor
# name "input".
receiver_tensors_alternatives = {"other1": receiver_tensors_alternative_1,
"other2": receiver_tensors_alternative_2}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor},
{"some_output_3": output_3}),
"other1:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_1},
{"some_output_3": output_3}),
"other2:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_2},
{"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and 'other:head-2'
# are invalid, because regession and classification signatures must take
# a single string input. Here we verify that these invalid signatures
# are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_export_outputs_required(self):
receiver_tensor = constant_op.constant(["11"])
with self.assertRaises(ValueError) as e:
export_utils.build_all_signature_defs(receiver_tensor, None)
self.assertTrue(str(e.exception).startswith(
"export_outputs must be a dict"))
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
@test_util.deprecated_graph_mode_only
def test_build_all_signature_defs_serving_only(self):
receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
output_1 = constant_op.constant([1.])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(outputs=output_1),
"train": export_output.TrainOutput(loss=output_1),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default": signature_def_utils.predict_signature_def(
receiver_tensor, {"output": output_1})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, serving_only=False)
expected_signature_defs.update({
"train": signature_def_utils.supervised_train_signature_def(
receiver_tensor, loss={"loss": output_1})
})
self.assertDictEqual(expected_signature_defs, signature_defs)
@test_util.deprecated_graph_mode_only
def test_export_outputs_for_mode(self):
predictions = {"predictions": constant_op.constant([1.])}
loss = {"loss": constant_op.constant([2.])}
metrics = {
"metrics": (constant_op.constant([3.]), constant_op.constant([4.]))}
expected_metrics = {
"metrics/value": metrics["metrics"][0],
"metrics/update_op": metrics["metrics"][1]
}
def _build_export_output(mode):
return export_utils.export_outputs_for_mode(
mode, None, predictions, loss, metrics)
ret = _build_export_output(KerasModeKeys.TRAIN)
self.assertIn(signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.TrainOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.TEST)
self.assertIn(signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.EvalOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.PREDICT)
self.assertIn(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.PredictOutput)
self.assertEqual(export_out.outputs, predictions)
classes = constant_op.constant(["class5"])
ret = export_utils.export_outputs_for_mode(
KerasModeKeys.PREDICT,
{"classify": export_output.ClassificationOutput(
classes=classes)})
self.assertIn("classify", ret)
export_out = ret["classify"]
self.assertIsInstance(export_out, export_output.ClassificationOutput)
self.assertEqual(export_out.classes, classes)
if __name__ == "__main__":
test.main()
| 40.965398 | 80 | 0.710195 |
6a4bf349ce18d0ff1a0aeec2f724f4290e2da17d
| 3,822 |
py
|
Python
|
Ru_splitter.py
|
mithfin/Sentence-Splitter-for-Russian
|
e4abe23a124f9f5f77490a3328be3632e52b2c0a
|
[
"Apache-2.0"
] | 9 |
2017-03-24T22:30:40.000Z
|
2021-08-19T23:16:57.000Z
|
Ru_splitter.py
|
mithfin/Sentence-Splitter-for-Russian
|
e4abe23a124f9f5f77490a3328be3632e52b2c0a
|
[
"Apache-2.0"
] | null | null | null |
Ru_splitter.py
|
mithfin/Sentence-Splitter-for-Russian
|
e4abe23a124f9f5f77490a3328be3632e52b2c0a
|
[
"Apache-2.0"
] | 2 |
2018-10-07T03:47:28.000Z
|
2019-04-16T17:03:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import re
import numpy as np
import pandas as pd
from keras.models import model_from_json
class Splitter:
def __init__(self):
self.model = model_from_json(open('./Models/keras_model.json').read())
self.model.load_weights('./Models/keras_weights.h5')
self.model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
self.radius = self.model.get_config()[0]['config']['batch_input_shape'][1] / 2
self.length = 2 * self.radius
self.padding = []
for i in range(self.radius):
self.padding.append('^')
header = ['Char']
for i in range(64):
header.append('X' + str(i+1))
embeddings = pd.read_csv('./Models/char_embeddings.csv', names=header)
embeddings_dictionary = {}
for i in xrange(len(embeddings)):
vec = []
for j in xrange (64):
vec += [embeddings['X' + str(j+1)][i]]
embeddings_dictionary[unicode(embeddings['Char'][i], 'utf8')] = vec
embeddings_dictionary[' '] = embeddings_dictionary['_']
embeddings_dictionary['\n '] = embeddings_dictionary['.']
class Embeddings_Reader(dict):
def __missing__(self, key):
return embeddings_dictionary[u'UNK']
self.embeddings_lookup = Embeddings_Reader(embeddings_dictionary)
@staticmethod
def formatting (res):
res = re.sub(u'\xa0', u' ', res)
res = re.sub(u'[\n \r\t]*[\n\r][\n \r\t]*', u'\n', res)
res = re.sub(u'[ \t]+', u' ', res)
res = re.sub(u'\n+', u'\n', res)
return res
@staticmethod
def stop_split(text):
dot_list = map(lambda x: x + '.', text.split('.'))
if dot_list[-1] == '.':
dot_list = dot_list[:-1]
excl_list = reduce(lambda x, y: x + map(lambda z: z + '!', y.split('!')[:-1]) + [y.split('!')[-1]], dot_list,
[])
quest_list = reduce(lambda x, y: x + map(lambda z: z + '?', y.split('?')[:-1]) + [y.split('?')[-1]], excl_list,
[])
return quest_list
@staticmethod
def add_newline(l, stop_type):
if stop_type == 0:
return l + '\n'
else:
return l
@staticmethod
def remove_spaces (sent):
if type(sent) == str:
sent = unicode(sent, 'utf-8')
sent = re.sub(u'^ +', u'', sent)
return sent.encode('utf-8')
def split(self, input_text):
input_text = unicode(input_text, 'utf8')
dw_input_text = self.formatting(input_text)
dw_input_list = self.stop_split(dw_input_text)
padded_list = self.padding + dw_input_list + self.padding
embedded_vectors = collections.deque([])
for linenum in xrange(self.radius, len(padded_list) - self.radius):
left = ''.join(padded_list[linenum - self.radius: linenum + 1])[:-1]
right = ''.join(padded_list[linenum + 1: linenum + self.radius + 1])
features = list(left)[-self.radius:] + list(right)[:self.radius]
feature_vector = map(lambda x: self.embeddings_lookup[x], features)
embedded_vectors.append(feature_vector)
X_text = np.array(embedded_vectors, dtype='float32')
predictions = self.model.predict(X_text)
classes = map (lambda x: 0 if x < 0.5 else 1, predictions)
final_lines = map(self.add_newline, dw_input_list, classes)
final_text = reduce(lambda x, y: x + y, final_lines)
final_text = '\n'.join(map(self.remove_spaces, final_text.splitlines()))
split_text = final_text.split('\n')
return split_text
| 35.719626 | 119 | 0.568289 |
2acb63fc123870bc66462797178e583dc18fe8d5
| 176 |
py
|
Python
|
tests/conftest.py
|
kevinheavey/borsh-py
|
e49dee71716ec217e8c9966aaa621c61669f7c15
|
[
"MIT"
] | 11 |
2021-10-04T19:47:22.000Z
|
2022-03-27T05:27:17.000Z
|
tests/conftest.py
|
kevinheavey/borsh-py
|
e49dee71716ec217e8c9966aaa621c61669f7c15
|
[
"MIT"
] | 8 |
2021-09-30T13:57:43.000Z
|
2022-03-14T11:20:53.000Z
|
tests/conftest.py
|
kevinheavey/borsh-py
|
e49dee71716ec217e8c9966aaa621c61669f7c15
|
[
"MIT"
] | 4 |
2021-11-13T10:46:37.000Z
|
2022-03-27T05:27:20.000Z
|
import logging
def pytest_configure(config):
"""Flake8 is very verbose by default. Silence it.""" # noqa: DAR101
logging.getLogger("flake8").setLevel(logging.ERROR)
| 25.142857 | 72 | 0.721591 |
0a4a8a621ed1fb8a47f6046407125abf84c55ec6
| 8,972 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/operations/_default_security_rules_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728 |
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/operations/_default_security_rules_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773 |
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/operations/_default_security_rules_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916 |
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DefaultSecurityRulesOperations(object):
"""DefaultSecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_06_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
default_security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security rule.
:type default_security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'} # type: ignore
| 48.497297 | 236 | 0.672314 |
1d19ba241cc0de3c1e19d0d1ea860cd18bb43bb1
| 645 |
py
|
Python
|
rentals/migrations/0003_auto_20180308_1706.py
|
dpakach/RentalService
|
2fe2c899b7f149ed3f0fc475100beaa0bc76619e
|
[
"MIT"
] | null | null | null |
rentals/migrations/0003_auto_20180308_1706.py
|
dpakach/RentalService
|
2fe2c899b7f149ed3f0fc475100beaa0bc76619e
|
[
"MIT"
] | 1 |
2018-11-06T03:17:14.000Z
|
2018-11-06T03:17:14.000Z
|
rentals/migrations/0003_auto_20180308_1706.py
|
dpakach/RentalService
|
2fe2c899b7f149ed3f0fc475100beaa0bc76619e
|
[
"MIT"
] | 1 |
2020-02-04T10:44:11.000Z
|
2020-02-04T10:44:11.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-03-08 17:06
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("rentals", "0002_auto_20180306_0823")]
operations = [
migrations.AlterField(
model_name="comment",
name="stars",
field=models.IntegerField(
validators=[
django.core.validators.MaxValueValidator(5),
django.core.validators.MinValueValidator(1),
]
),
)
]
| 25.8 | 64 | 0.590698 |
d06ee9b8d2dc8019e6201c629cf80bda7956b65a
| 11,539 |
py
|
Python
|
ehb_datasources/tests/unit_tests/test_nautilus_driver.py
|
chop-dbhi/ehb-datasources
|
ff26dafa0a0919abbe53277e85e019c6df3f8f88
|
[
"BSD-2-Clause"
] | 2 |
2019-09-13T20:27:39.000Z
|
2020-03-05T02:24:47.000Z
|
ehb_datasources/tests/unit_tests/test_nautilus_driver.py
|
chop-dbhi/ehb-datasources
|
ff26dafa0a0919abbe53277e85e019c6df3f8f88
|
[
"BSD-2-Clause"
] | 46 |
2015-09-25T14:34:34.000Z
|
2020-10-05T22:04:36.000Z
|
ehb_datasources/tests/unit_tests/test_nautilus_driver.py
|
chop-dbhi/ehb-datasources
|
ff26dafa0a0919abbe53277e85e019c6df3f8f88
|
[
"BSD-2-Clause"
] | 5 |
2016-04-25T15:01:39.000Z
|
2016-08-29T20:40:31.000Z
|
import pytest
import json
from ehb_datasources.drivers.nautilus.driver import ehbDriver
from ehb_datasources.drivers.exceptions import RecordCreationError, \
IgnoreEhbExceptions
@pytest.fixture()
def driver():
return ehbDriver(
url='http://example.com/api/',
user='foo',
password='bar',
secure=False
)
def test_initialization(driver):
assert driver.host == 'example.com'
assert driver.path == '/api/'
def test_encode_nau_creds(driver):
assert driver.encode_nau_creds() == 'Basic Zm9vOmJhcg=='
def test_update(driver, mocker):
# Mocks
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=b'[\n {\n "name": "7316-118", \n "status": "200", \n "type": "SDG"\n }\n]')
kwargs = {'fldvals': {'EXTERNAL_REFERENCE': '2J25I1QYGRU1DCTA:7316-118:100', 'STATUS': 'V'}, 'name': '7316-118', 'nau_sub_path': 'sdg'}
driver.PUT = mocker.MagicMock(return_value=MockNautilusResponse)
# Test
response = driver.update(kwargs)
driver.PUT.assert_called_with(
'/api/',
{'Content-Type': 'application/json', 'NAUTILUS-CREDS': 'Basic Zm9vOmJhcg==', 'Accept': 'application/json'},
'[{"null": null, "fldvals": {}}]'
)
assert response == b'[\n {\n "name": "7316-118", \n "status": "200", \n "type": "SDG"\n }\n]'
def test_update_fix_path(driver, mocker):
# Mocks
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=b'[\n {\n "name": "7316-118", \n "status": "200", \n "type": "SDG"\n }\n]')
kwargs = {'fldvals': {'EXTERNAL_REFERENCE': '2J25I1QYGRU1DCTA:7316-118:100', 'STATUS': 'V'}, 'name': '7316-118', 'nau_sub_path': 'sdg'}
driver.path = '/api'
driver.PUT = mocker.MagicMock(return_value=MockNautilusResponse)
# Test
response = driver.update(kwargs)
driver.PUT.assert_called_with(
'/api/',
{'Content-Type': 'application/json', 'NAUTILUS-CREDS': 'Basic Zm9vOmJhcg==', 'Accept': 'application/json'},
'[{"null": null, "fldvals": {}}]'
)
assert response == b'[\n {\n "name": "7316-118", \n "status": "200", \n "type": "SDG"\n }\n]'
def test_get_sample_data(driver, mocker, nautilus_get_sample_payload):
# Mocks
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=nautilus_get_sample_payload)
driver.GET = mocker.MagicMock(return_value=MockNautilusResponse)
# Test
response = driver.get_sample_data(record_id='TESTID')
assert 'SDG' in response.keys()
assert isinstance(response, dict)
def test_get(driver):
assert not driver.get()
def test_delete(driver):
assert not driver.delete()
def test_create(driver):
assert not driver.create('foo', 'bar')
def test_meta(driver):
assert not driver.meta()
def test_configure(driver):
assert not driver.configure()
def test_process_form(driver):
assert not driver.processForm(None, 'foo')
def test_record_new_record_form_required(driver):
assert driver.new_record_form_required()
def test_get_sample_data_error(driver, mocker, nautilus_get_sample_payload):
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=b'{"error": 6}')
driver.GET = mocker.MagicMock(return_value=MockNautilusResponse)
response = driver.get_sample_data(record_id='TESTID')
assert 'error' in response.keys()
assert isinstance(response, dict)
def test_extract_aliquots(driver, mocker, nautilus_get_sample_payload):
# TODO: Needs more mock responses as Nautilus response _will_ vary project
# to project
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=nautilus_get_sample_payload)
driver.GET = mocker.MagicMock(return_value=MockNautilusResponse)
response = driver.get_sample_data(record_id='TESTID')
aliquots = driver.extract_aliquots(response)
assert len(aliquots) == 4
assert len(aliquots[0].keys()) == 89
def test_format_aliquots(driver, mocker, nautilus_get_sample_payload):
# TODO: Needs more mock responses as Nautilus response _will_ vary project
# to project
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=nautilus_get_sample_payload)
driver.GET = mocker.MagicMock(return_value=MockNautilusResponse)
response = driver.get_sample_data(record_id='TESTID')
aliquots = driver.extract_aliquots(response)
formatted_aliquots = driver.format_aliquots(aliquots)
assert len(formatted_aliquots) == 4
assert aliquots[0].keys() == formatted_aliquots[0].keys()
assert aliquots[0]['label'] == 'Blood'
assert aliquots[0]['STATUS'] == '<p class="text-warning"><em>Disposed</em></p>'
assert aliquots[1]['STATUS'] == '<p class="text-success"><em>Available</em></p>'
assert aliquots[1]['U_RECEIVED_DATE_TIME'] is None
assert aliquots[1]['U_COLLECT_DATE_TIME'] == 'Unknown'
assert aliquots[2]['STATUS'] == '<p class="text-warning"><em>Unreceived</em></p>'
assert aliquots[3]['STATUS'] == '<p class="text-danger"><em>Cancelled</em></p>'
assert len(formatted_aliquots[0].keys()) == 91
def test_srsf(driver, mocker, nautilus_get_sample_payload):
# Mocks
MockNautilusResponse = mocker.MagicMock(
status=200
)
MockNautilusResponse.read = mocker.MagicMock(return_value=nautilus_get_sample_payload)
driver.GET = mocker.MagicMock(return_value=MockNautilusResponse)
form = driver.subRecordSelectionForm(form_url='/test/', record_id='foo')
assert '<td>Blood<small class="text-muted"> <small style="font-size:1em"> -- Collected On: 1901-01-01 14:30:19 -- Received On: 2015-03-11 14:35:07</small><span class="label label-primary pull-right muted">7316-118-BLD [108880]</span></td><td align="center"><p class="text-warning"><em>Disposed</em></p></td>' in form
assert '<td>Blood<small class="text-muted"> <small style="font-size:1em"> -- Collected On: Unknown -- Received On: None</small><span class="label label-primary pull-right muted">7316-118-BLD [108881]</span></td><td align="center"><p class="text-success"><em>Available</em></p></td>' in form
assert '<td><span class="label label-primary pull-right muted">7316-118-BLD [108882]</span></td><td align="center"><p class="text-warning"><em>Unreceived</em></p></td>' in form
assert '<td>Blood Flash Frozen<span class="label label-primary pull-right muted">7316-118-BLD [108881]</span></td><td align="center"><p class="text-danger"><em>Cancelled</em></p></td>' in form
def test_srf(driver):
assert not driver.subRecordForm(None)
def test_create_new_record_form_get(driver, mocker):
request = mocker.MagicMock(
method='GET'
)
form = driver.create_new_record_form(request)
assert form == '<table class="table table-bordered table-striped table-condensed"><tr><th>Description</th><th>Field</th></tr><tbody><tr><td>*Enter or Scan Subject ID</td><td><input type="text" onkeypress="return disableEnter(event);" name="SDG_NAME"/></td></tr></tbody></table>'
def test_create_new_record_form_post(driver, mocker):
request = mocker.MagicMock(
method='POST',
_post={'SDG_NAME': '7316-118', 'csrfmiddlewaretoken': 'foo', 'label_id': '1'}
)
form = driver.create_new_record_form(request)
assert form == '<table class="table table-bordered table-striped table-condensed"><tr><th>Description</th><th>Field</th></tr><tbody><tr><td>*Enter or Scan Subject ID</td><td><input type="text" onkeypress="return disableEnter(event);" name="SDG_NAME" value="7316-118"/></td></tr></tbody></table>'
def test_process_new_record_form(driver, mocker):
request = mocker.MagicMock(
method='POST',
_post={'SDG_NAME': '7316-118', 'csrfmiddlewaretoken': 'foo', 'label_id': '1'}
)
driver.update = mocker.MagicMock(return_value=b'[{"status": "200"}]')
validator_func = mocker.MagicMock(return_value=0)
with pytest.raises(IgnoreEhbExceptions) as excinfo:
driver.process_new_record_form(request, 'TESTPREFIX', validator_func)
assert excinfo.typename == 'IgnoreEhbExceptions'
def test_process_new_record_form_record_exists_on_ds(driver, mocker):
request = mocker.MagicMock(
method='POST',
_post={'SDG_NAME': '7316-118', 'csrfmiddlewaretoken': 'foo', 'label_id': '1'}
)
driver.update = mocker.MagicMock(return_value=b'[{"status": "400"}]')
validator_func = mocker.MagicMock(return_value=1)
with pytest.raises(RecordCreationError) as excinfo:
driver.process_new_record_form(request, 'TESTPREFIX', validator_func)
assert excinfo.typename == 'RecordCreationError'
def test_process_new_record_form_assigned_to_other(driver, mocker):
request = mocker.MagicMock(
method='POST',
_post={'SDG_NAME': '7316-118', 'csrfmiddlewaretoken': 'foo', 'label_id': '1'}
)
driver.update = mocker.MagicMock(return_value=b'[{"status": "200"}]')
validator_func = mocker.MagicMock(return_value=-1)
with pytest.raises(RecordCreationError) as excinfo:
driver.process_new_record_form(request, 'TESTPREFIX', validator_func)
assert excinfo.typename == 'RecordCreationError'
def test_process_new_record_form_no_sdg(driver, mocker):
request = mocker.MagicMock(
method='POST',
_post={'csrfmiddlewaretoken': 'foo', 'label_id': '1'}
)
driver.update = mocker.MagicMock(return_value=b'[{"status": "200"}]')
validator_func = mocker.MagicMock(return_value=0)
with pytest.raises(RecordCreationError) as excinfo:
driver.process_new_record_form(request, 'TESTPREFIX', validator_func)
assert excinfo.typename == 'RecordCreationError'
# creating parameratized unit tests for testing error message received from lims
examples = (('expected_error_message', 'status_error_num', 'test_comment'), [
# keeping comments blank because 'error message' in this case is sufficient.
# Keeping here for best practices in the future.
('Username not provided.', "2", ''),
('Password not provided.', "3", ''),
('Request type not provided.', "4", ''),
('Request body not provided.', "5", ''),
('Malformed Request.', "6", ''),
('Unsupported request type.', "7", ''),
('Form data is not valid.', "8", ''),
('NAU socket service not found.', "100", ''),
('NAU invalid authorization header.', "101", '')
])
@pytest.mark.parametrize(*examples)
def test_process_new_record_lims(driver, mocker, expected_error_message, status_error_num, test_comment):
request = mocker.MagicMock(
method='POST',
_post={'SDG_NAME': '7316-118', 'csrfmiddlewaretoken': 'foo', 'label_id': '1'}
)
payload = [
{
"status": status_error_num,
}
]
# using .encode() because fn in driver is expecting bytes
driver.update = mocker.MagicMock(return_value=json.dumps(payload).encode())
validator_func = mocker.MagicMock(return_value=0)
with pytest.raises(RecordCreationError) as excinfo:
driver.process_new_record_form(request, 'TESTPREFIX', validator_func)
assert excinfo.typename == 'RecordCreationError'
assert excinfo.value.cause == expected_error_message
| 41.65704 | 320 | 0.685675 |
a8da001e887065069b4a1289c2a5f95d84b3ea09
| 4,110 |
py
|
Python
|
ssd/nn/multibox_loss.py
|
Guillem96/ssd-pytorch
|
ffbb64d60c6e72c8126c2943f46b0e17298c8f81
|
[
"MIT"
] | 2 |
2020-10-24T09:57:42.000Z
|
2021-11-11T01:20:08.000Z
|
ssd/nn/multibox_loss.py
|
Guillem96/ssd-pytorch
|
ffbb64d60c6e72c8126c2943f46b0e17298c8f81
|
[
"MIT"
] | null | null | null |
ssd/nn/multibox_loss.py
|
Guillem96/ssd-pytorch
|
ffbb64d60c6e72c8126c2943f46b0e17298c8f81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..boxes import match
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, overlap_thresh, neg_pos):
super(MultiBoxLoss, self).__init__()
self.threshold = overlap_thresh
self.negpos_ratio = neg_pos
self.variance = [0.1, 0.2]
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
device = loc_data.device
bs = loc_data.size(0)
priors = priors[:loc_data.size(1), :].to(device)
num_priors = priors.size(0)
num_classes = conf_data.size(-1)
# match priors (default boxes) and ground truth boxes
loc_t = torch.zeros(bs, num_priors, 4, device=device)
conf_t = torch.zeros(bs, num_priors, device=device).long()
for idx in range(bs):
match(threshold=self.threshold,
truths=targets[idx][:, :-1],
priors=priors,
variances=self.variance,
labels=targets[idx][:, -1],
loc_t=loc_t,
conf_t=conf_t,
idx=idx)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch, num_priors, 4]
loc_p = loc_data[pos].view(-1, 4)
loc_t = loc_t[pos].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# Compute classification loss
loss_c = F.cross_entropy(conf_data.view(-1, num_classes),
conf_t.view(-1),
reduction='none')
loss_c = loss_c.view(bs, num_priors)
# Filter out the negative samples and reduce the loss by sum
loss_c_pos = loss_c[pos].sum()
# Hard negative mining, filter out the positive samples and pick the
# top negative losses
num_neg = torch.clamp(self.negpos_ratio * num_pos,
max=pos.size(1) - 1)
loss_c_neg = loss_c * ~pos
loss_c_neg, _ = loss_c_neg.sort(1, descending=True)
neg_mask = torch.zeros_like(loss_c_neg)
neg_mask[torch.arange(bs), num_neg.view(-1)] = 1.
neg_mask = 1 - neg_mask.cumsum(-1)
loss_c_neg = (loss_c_neg * neg_mask).sum()
# Finally we normalize the losses by the number of positives
N = num_pos.sum()
loss_l = loss_l / N
loss_c = (loss_c_pos + loss_c_neg) / N
return loss_l, loss_c
| 38.773585 | 84 | 0.589294 |
9eb8332d352feed0c89bfddcb910b39224af99d4
| 2,159 |
py
|
Python
|
augraphy/augmentations/lowinkline.py
|
kwcckw/augraphy
|
b0e8cc4192eaf827e36fb67ae1b7b9c762f9578b
|
[
"MIT"
] | 36 |
2021-06-25T02:17:57.000Z
|
2022-03-29T02:36:09.000Z
|
augraphy/augmentations/lowinkline.py
|
shaheryar1/augraphy
|
5dd52fdd3b497312606c6d3afa4003f94a8cbcc4
|
[
"MIT"
] | 136 |
2021-06-25T07:39:46.000Z
|
2022-03-31T13:00:30.000Z
|
augraphy/augmentations/lowinkline.py
|
shaheryar1/augraphy
|
5dd52fdd3b497312606c6d3afa4003f94a8cbcc4
|
[
"MIT"
] | 24 |
2021-06-27T21:15:11.000Z
|
2022-03-08T03:28:17.000Z
|
################################################################################
# File: lowinkline.py
#
import random
import numpy as np
from augraphy.base.augmentation import Augmentation
class LowInkLine(Augmentation):
"""Generates streaking behavior common to printers running out of ink.
:param use_consistent_lines: Whether or not to vary the width and alpha of
generated low ink lines.
:type use_consistent_lines: bool, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
use_consistent_lines=True,
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.use_consistent_lines = use_consistent_lines
inconsistent_transparency_line = lambda x: random.randint(0, 255)
self.inconsistent_transparency_line = np.vectorize(
inconsistent_transparency_line,
)
apply_line = lambda x, y: x if x > y else y
self.apply_line = np.vectorize(apply_line)
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"LowInkLine(use_consistent_lines={self.use_consistent_lines}, p={self.p})"
# Takes an image, a vertical position, and an opacity value,
# then adds a line at that position in the image with the given
# opacity.
def add_transparency_line(self, mask, y, alpha=None):
"""Adds a line with some opacity at a vertical position in the image.
:param mask: The image to apply the line to.
:type mask: numpy.array
:param y: The vertical position to apply the line at.
:type y: int
:param alpha: The desired opacity of the line.
:type alpha: int, optional
"""
if alpha is None:
alpha = random.randint(16, 224)
if self.use_consistent_lines:
low_ink_line = np.full(mask[y, :].shape, alpha, dtype="uint8")
else:
low_ink_line = self.inconsistent_transparency_line(mask[y, :])
mask[y, :] = self.apply_line(mask[y, :], low_ink_line)
return mask
| 33.215385 | 90 | 0.628069 |
f2c1132b58e0998db2f0def99459f75d96b04916
| 2,267 |
py
|
Python
|
mindarmour/defenses/natural_adversarial_defense.py
|
zengchen1024/mindarmour
|
eed59453cf048da92fe15f57dbe3ca7de8b7adcb
|
[
"Apache-2.0"
] | null | null | null |
mindarmour/defenses/natural_adversarial_defense.py
|
zengchen1024/mindarmour
|
eed59453cf048da92fe15f57dbe3ca7de8b7adcb
|
[
"Apache-2.0"
] | null | null | null |
mindarmour/defenses/natural_adversarial_defense.py
|
zengchen1024/mindarmour
|
eed59453cf048da92fe15f57dbe3ca7de8b7adcb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Natural Adversarial Defense.
"""
from mindarmour.defenses.adversarial_defense import \
AdversarialDefenseWithAttacks
from mindarmour.attacks.gradient_method import FastGradientSignMethod
class NaturalAdversarialDefense(AdversarialDefenseWithAttacks):
"""
Adversarial training based on FGSM.
Reference: `A. Kurakin, et al., "Adversarial machine learning at scale," in
ICLR, 2017. <https://arxiv.org/abs/1611.01236>`_
Args:
network (Cell): A MindSpore network to be defensed.
loss_fn (Functions): Loss function. Default: None.
optimizer (Cell): Optimizer used to train the network. Default: None.
bounds (tuple): Upper and lower bounds of data. In form of (clip_min,
clip_max). Default: (0.0, 1.0).
replace_ratio (float): Ratio of replacing original samples with
adversarial samples. Default: 0.5.
eps (float): Step size of the attack method(FGSM). Default: 0.1.
Examples:
>>> net = Net()
>>> adv_defense = NaturalAdversarialDefense(net)
>>> adv_defense.defense(inputs, labels)
"""
def __init__(self, network, loss_fn=None, optimizer=None,
bounds=(0.0, 1.0), replace_ratio=0.5, eps=0.1):
attack = FastGradientSignMethod(network,
eps=eps,
alpha=None,
bounds=bounds)
super(NaturalAdversarialDefense, self).__init__(
network,
[attack],
loss_fn=loss_fn,
optimizer=optimizer,
bounds=bounds,
replace_ratio=replace_ratio)
| 39.77193 | 79 | 0.651081 |
226ddb4a0bf7913211f1cc9a09ca633a11848f7d
| 9,119 |
py
|
Python
|
docs/conf.py
|
Azima1985/recommender_package
|
84234b1394d797faa4e21090f99e58e639b27ebd
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Azima1985/recommender_package
|
84234b1394d797faa4e21090f99e58e639b27ebd
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Azima1985/recommender_package
|
84234b1394d797faa4e21090f99e58e639b27ebd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/my_project")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from distutils.version import LooseVersion
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if LooseVersion(sphinx.__version__) >= LooseVersion('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'my_project'
copyright = u'2018, Azima1985'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from my_project import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'my_project-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'my_project Documentation',
u'Azima1985', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| 33.774074 | 85 | 0.704244 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.