max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
vanila_rnn.py | spencerpomme/torchlight | 0 | 12796851 | """
Minimal character-level Vanilla RNN model. Written b_y <NAME> (@karpathy)
BSD License
"""
import numpy as np
import unicodedata
import string
import codecs
# data I/O
data = codecs.open('data/potter.txt', 'r', encoding='utf8', errors='ignore').read()
fake = codecs.open('data/output.txt', 'w', encoding='utf8')
chars = list(set(data))
data_size = len(data) #
vocab_size = len(chars)
print(f'data has {data_size} characters,{vocab_size} unique.') # data has 1109177 characters,80 unique.
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
print(char_to_ix)
print(ix_to_char)
# hyperparameters
hidden_size = 256 # size of hidden layer of neurons
seq_length = 128 # number of steps to unroll the RNN for
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, vocab_size) * 0.01 # weight: input to hidden
W_hh = np.random.randn(hidden_size, hidden_size) * 0.01 # weight: hidden to hidden
W_hy = np.random.randn(vocab_size, hidden_size) * 0.01 # weight: hidden to output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((vocab_size, 1)) # output bias
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers indicating which unique character.
inputs: a seq_length size list
hprev is (H x 1) array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {} # sx[t] = ys[t] = ps[t] size = vocab_size x 1
hs[-1] = np.copy(hprev) # hs[t] size = hidden_size * 1
loss = 0 # xs: input line; ys: output line; hs: hidden states, multiple of them,
# even the weights are reused, the states are different from each other.
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1 # inputs[t] is a index number, xs[t] is a vector
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # (normalized) probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
print(f'loss: {loss}')
# print(f'xs:{len(xs[t])}->{xs[t]}\n hs:{len(hs[t])}->{hs[t]}\n ys:{len(ys[t])}->{ys[t]}\n ps:{len(ps[t])}->{ps[t]}')
# backward pass: compute gradients going backwards
dW_xh = np.zeros_like(W_xh) # gradient of W_xh, same shape as W_xh
dW_hh = np.zeros_like(W_hh) # gradient of W_hh, same shape as W_hh
dW_hy = np.zeros_like(W_hy) # gradient of W_hy, same shape as W_hy
db_h = np.zeros_like(b_h) # gradient of b_h, same shape as b_h
db_y = np.zeros_like(b_y) # gradient of b_y, same shape as b_y
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1
# backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dW_hy += np.dot(dy, hs[t].T)
db_y += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
db_h += dhraw
dW_xh += np.dot(dhraw, xs[t].T)
dW_hh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dW_xh, dW_hh, dW_hy, db_h, db_y]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dW_xh, dW_hh, dW_hy, db_h, db_y, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
i.e. do predictions :)
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mW_xh, mW_hh, mW_hy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mb_h, mb_y = np.zeros_like(b_h), np.zeros_like(b_y) # memory variables for Adagrad
smooth_loss = -np.log(1.0 / vocab_size) * seq_length # loss at iteration 0
while True:
try:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt, ))
# forward seq_length characters through the net and fetch gradient
loss, dW_xh, dW_hh, dW_hy, db_h, db_y, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print(f'iter{n}, loss: {smooth_loss}') # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dW_xh, dW_hh, dW_hy, db_h, db_y],
[mW_xh, mW_hh, mW_hy, mb_h, mb_y]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
except KeyboardInterrupt:
sample_ix = sample(hprev, inputs[0], data_size)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
fake.write(txt)
break
fake.close() | 3.125 | 3 |
GeneralOutputAnalysis_FLYCOP/Utilities/FitnessRanks.py | ivanmm25/FLYCOPtools | 1 | 12796852 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 21:20:59 2021
@author: <NAME>
"""
"""
OUTPUT ANALYSIS AFTER FLYCOP (UTILITIES)
DESCRIPTION
ORGANIZATION OF FITNESS RANKS & ASSOCIATED STATS DESCRIPTION
Series of functions to define and process fitness ranks (utility for scripts related to Statistical Analysis).
- obtain_fitness_rank
- stats_description
- describe_fitness_ranks: combination of the last two functions
INDIVIDUAL COMPARATIVE ANALYSIS (FLYCOP run)
Define fitness ranks for the Individual Comparative Analysis (FLYCOP run, input parameters)
- organize_fitness_ranks
EXTRACT INPUT PARAMETERS FOR A FLYCOP run (configuration) as a single str line
- extract_ratios
ANALYSIS OF BIOMASS LOSS
- when_death_starts (when this effect is first registered in a given simulation)
EXPECTED INPUT
- Dataframe: dataframe to be processed
- rank_limits_set: tuple with a series of inner tuples (fitness rank intervals)
- rank_limits: smaller tuple (fitness rank individual interval)
- ref_colum: reference column to extract the fraction of the dataframe. Default :'fitness'
- frac_dataframe: fraction of a particular dataframe
- descr_columns: columns to be described with 'Pandas' statistical description (method .describe())
- string_line_config. Example: -8.0,0.3,-12.0,0.05
OUTPUT
See each particular function
NOTE THAT:
Script in development (...)
"""
# import re
# import pandas as pd
# import os.path
# -----------------------------------------------------------------------------
# ORGANIZATION OF FITNESS RANKS & ASSOCIATED STATS DESCRIPTION
# -----------------------------------------------------------------------------
# RETURNS FRACTION OF INTEREST (fitness rank, all columns) OF THE DATAFRAME
# For a fitness interval, retrieves all related information (rest of the parameters)
def obtain_fitness_rank(rank_limits, dataframe, ref_colum):
frac_dataframe = dataframe[dataframe[ref_colum] < rank_limits[1]] # Higher limit
final_frac_dataframe = frac_dataframe[frac_dataframe[ref_colum] > rank_limits[0]] # Lower limit
return final_frac_dataframe
# STATISTICAL DESCRIPTION for the selected fitness rank, columns selected
# descr_columns are those columns (parameters) for which the statistical description is required
def stats_description(frac_dataframe, descr_columns):
stat_description = frac_dataframe[descr_columns].describe()
return stat_description
# COMBINES THE TWO LAST FUNCTIONS
# 1. Obtains every single fitness rank
# 2. Makes the subsequent statistical description
def describe_fitness_ranks(rank_limits_set, dataframe, descr_columns, ref_column):
# 'SAVE STATS' version, if wanted to be stored in a file
"""
filename = "stats_description.txt" # Which name?
with open(filename, "w") as stats_file:
for rank_limits_tuple in rank_limits_set:
fitness_rank = obtain_fitness_rank(rank_limits_tuple, dataframe, ref_column)
stat_descr = stats_description(fitness_rank, descr_columns)
stats_file.write(stat_descr+"\n")
"""
# 'PRINT' version
for rank_limits_tuple in rank_limits_set:
fitness_rank = obtain_fitness_rank(rank_limits_tuple, dataframe, ref_column)
stat_descr = stats_description(fitness_rank, descr_columns)
print(f"{ref_column} rank: {rank_limits_tuple[0]}-{rank_limits_tuple[1]}")
print(stat_descr)
print()
# LIMITACIONES
# ------------
# Limitación: el primero de los rangos, hay que pasarle un límite superior más alto que el mejor de los fitness
# Limitación: ¿cómo haríamos cálculos individuales de un único parámetro estadístico? Véase mean(), median() (individualmente)
# AUTOMATIZAR
# -------------------------------
# Set con tuplas para los rangos
# NUEVA IDEA: llevar análisis estadístico a archivo para posterior 'ComparativeAnalysis' entre configuraciones
# Array 3D
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# INDIVIDUAL COMPARATIVE ANALYSIS (FLYCOP run)
# -----------------------------------------------------------------------------
# DEFINE FITNESS RANKS (new column 'FitRank') for the Individual Comparative Analysis within each FLYCOP run
# Utility required for further comparison of parameter ratios of each fitness rank in FLYCOP output tables
def organize_fitness_ranks(dataframe, rank_limits_set, ref_column):
for row in dataframe.itertuples(): # row[0] = Index Number
ref_variable = dataframe.loc[row[0], ref_column]
for i in range(1, len(rank_limits_set)+1):
rank_tuple = rank_limits_set[i-1]
if rank_tuple[0] < ref_variable < rank_tuple[1]:
dataframe.loc[row[0], "FitRank"] = int(i)
break
elif ref_variable == 0:
ConfigError = dataframe.loc[row[0] , "ZeroDivisionError"]
if ConfigError == 0:
dataframe.loc[row[0] , "FitRank"] = 0
else:
dataframe.loc[row[0] , "FitRank"] = -1
return dataframe
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# INDIVIDUAL COMPARATIVE ANALYSIS (FLYCOP run)
# -----------------------------------------------------------------------------
# DEFINE RANKS (new column) for the Individual Comparative Analysis within each FLYCOP run
# The ranks are defined based on the desired 'ref_column' in the given dataframe
# 'New_column' contains the categorical classification of values, depending on the established ranks
# Instead of a number, the fitness interval itself
def organize_ranks(dataframe, rank_limits_set, ref_column, new_column):
for row in dataframe.itertuples(): # row[0] = Index Number
ref_variable = dataframe.loc[row[0], ref_column]
for i in range(1, len(rank_limits_set)+1):
rank_tuple = rank_limits_set[i-1]
if rank_tuple[0] < ref_variable < rank_tuple[1]:
dataframe.loc[row[0], new_column] = str(rank_tuple)
break
return dataframe
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# EXTRACT RATIOS OF INPUT PARAMETERS FOR FLYCOP run (configuration) as a single str line
# -----------------------------------------------------------------------------
# Currently: specific to E.coli_iEC1364-P.putida_KT2440 configuration:
# Sucrose/ fructose uptake rates ratio
# E.coli/ P.putida KT initial biomass ratio
# EXAMPLE: -8.0,0.3,-12.0,0.05
def extract_ratios(string_line_config):
list_line = string_line_config.split(",")
sucr_ur = float(list_line[0])
frc_ur = float(list_line[2])
uptake_ratio = round(sucr_ur/frc_ur, 3)
Ec_init = float(list_line[1])
KT_init = float(list_line[3])
initbiomass_ratio = round(Ec_init/KT_init, 3)
return uptake_ratio, initbiomass_ratio
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# EXTRACT RATIOS OF INPUT PARAMETERS FOR FLYCOP run (configuration) as a single str line
# -----------------------------------------------------------------------------
# Currently: specific to E.coli_iEC1364-P.putida_KT2440 configuration, with NP_uptake rates
# Sucrose/ fructose uptake rates ratio
# E.coli/ P.putida KT initial biomass ratio
# NH4 uptake ratio (E.coli/P.putida)
# Pi uptake ratio (E.coli/P.putida)
# EXAMPLE: -4.0,0.15,-18.0,0.05,-6.0,-10.0,-0.2,-0.25
def extract_ratios_NP(string_line_config):
list_line = string_line_config.split(",")
sucr_ur = float(list_line[0])
frc_ur = float(list_line[2])
uptake_ratio = round(sucr_ur/frc_ur, 3)
Ec_init = float(list_line[1])
KT_init = float(list_line[3])
initbiomass_ratio = round(Ec_init/KT_init, 3)
NH4_Ec = float(list_line[4])
NH4_KT = float(list_line[5])
NH4_Ec_KT = round(NH4_Ec/NH4_KT, 3)
# Pi_Ec = float(list_line[6])
# Pi_KT = float(list_line[7])
# Pi_Ec_KT = round(Pi_Ec/Pi_KT, 3)
return uptake_ratio, initbiomass_ratio, NH4_Ec_KT
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# EXTRACT INPUT PARAMETERS FOR FLYCOP run (configuration) as a single str line
# -----------------------------------------------------------------------------
# Currently: specific to E.coli_iEC1364-P.putida_KT2440 configuration, with NP_uptake rates
# EXAMPLE: -4.0,0.15,-18.0,0.05,-6.0,-10.0,-0.2,-0.25
def extract_baseConfig_NP(string_line_config):
list_line = string_line_config.split(",")
sucr_ur = float(list_line[0])
frc_ur = float(list_line[3])
Ec_init = float(list_line[1])
# Ec_init_glyc = float(list_line[2])
KT_init = float(list_line[4])
NH4_Ec = float(list_line[5])
NH4_KT = float(list_line[6])
# Pi_Ec = float(list_line[6])
# Pi_KT = float(list_line[7])
return sucr_ur, frc_ur, Ec_init, KT_init, NH4_Ec, NH4_KT
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# FUNCTION TO: obtain the initial cycle for death effect, from the dataframe where it has previously registered
# INPUT: dataframe where to operate (pandas dataframe)
# OUTPUT: same dataframe with new column:
# 'DT_cycles_init': cycle when dead effect starts
# NOTE THAT: "NoDeadTracking" means there is no death effect, thus the value for 'DT_cycles_init' is 0.
# If the mean for the death effect (initial cycle) was computed, all configurations would be taken into
# account (in the denominator) and those with no biomass loss would "sum 0" (to the numerator)
# TO-DO: further implementation / reorganization of code lines
# -----------------------------------------------------------------------------
def when_death_starts(dataframe):
dataframe["DT_cycles_init"] = 0
for row in dataframe.itertuples():
DT_cycles = dataframe.loc[row[0], "DT_cycles"].split("-")
DT_cycles_init = DT_cycles[0]
if DT_cycles_init != "NoDeadTracking":
dataframe.loc[row[0], "DT_cycles_init"] = int(DT_cycles_init)
return dataframe
# -----------------------------------------------------------------------------
| 3 | 3 |
core/tests/test_trezor.crypto.hashlib.sha3_512.py | Kayuii/trezor-crypto | 0 | 12796853 | <gh_stars>0
from common import *
from trezor.crypto import hashlib
class TestCryptoSha3_512(unittest.TestCase):
# vectors from http://www.di-mgt.com.au/sha_testvectors.html
vectors = [
(b'', 'a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26'),
(b'abc', 'b751850b1a57168a5693cd924b6b096e08f621827444f70d884f5d0240d2712e10e116e9192af3c91a7ec57647e3934057340b4cf408d5a56592f8274eec53f0'),
(b'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq', '04a371e84ecfb5b8b77cb48610fca8182dd457ce6f326a0fd3d7ec2f1e91636dee691fbe0c985302ba1b0d8dc78c086346b533b49c030d99a27daf1139d6e75e'),
(b'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu', 'afebb2ef542e6579c50cad06d2e578f9f8dd6881d7dc824d26360feebf18a4fa73e3261122948efcfd492e74e82e2189ed0fb440d187f382270cb455f21dd185'),
]
vectors_keccak = [
(b'', '<KEY>'),
(b'abc', '18587dc2ea106b9a1563e32b3312421ca164c7f1f07bc922a9c83d77cea3a1e5d0c69910739025372dc14ac9642629379540c17e2a65b19d77aa511a9d00bb96'),
(b'<KEY>', '<KEY>'),
(b'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu', 'ac2fb35251825d3aa48468a9948c0a91b8256f6d97d8fa4160faff2dd9dfcc24f3f1db7a983dad13d53439ccac0b37e24037e7b95f80f59f37a2f683c4ba4682'),
]
def test_digest(self):
for b, d in self.vectors:
self.assertEqual(hashlib.sha3_512(b).digest(), unhexlify(d))
def test_digest_keccak(self):
for b, d in self.vectors_keccak:
self.assertEqual(hashlib.sha3_512(b, keccak=True).digest(), unhexlify(d))
def test_update(self):
for b, d in self.vectors:
x = hashlib.sha3_512()
x.update(b)
self.assertEqual(x.digest(), unhexlify(d))
x = hashlib.sha3_512()
for i in range(1000000):
x.update(b'a')
self.assertEqual(x.digest(), unhexlify('3c3a876da14034ab60627c077bb98f7e120a2a5370212dffb3385a18d4f38859ed311d0a9d5141ce9cc5c66ee689b266a8aa18ace8282a0e0db596c90b0a7b87'))
'''
x = hashlib.sha3_512()
for i in range(16777216):
x.update(b'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno')
self.assertEqual(x.digest(), unhexlify('235ffd53504ef836a1342b488f483b396eabbfe642cf78ee0d31feec788b23d0d18d5c339550dd5958a500d4b95363da1b5fa18affc1bab2292dc63b7d85097c'))
'''
def test_update_keccak(self):
for b, d in self.vectors_keccak:
x = hashlib.sha3_512(keccak=True)
x.update(b)
self.assertEqual(x.digest(), unhexlify(d))
def test_digest_multi(self):
x = hashlib.sha3_512()
d0 = x.digest()
d1 = x.digest()
d2 = x.digest()
self.assertEqual(d0, d1)
self.assertEqual(d0, d2)
def test_digest_multi_keccak(self):
x = hashlib.sha3_512(keccak=True)
d0 = x.digest()
d1 = x.digest()
d2 = x.digest()
self.assertEqual(d0, d1)
self.assertEqual(d0, d2)
if __name__ == '__main__':
unittest.main()
| 2.375 | 2 |
bot/bot_manager.py | NgLamVN/HiBot | 0 | 12796854 | <reponame>NgLamVN/HiBot<gh_stars>0
from discord.ext import commands
from bot_constant import bot_constant
from command_handler import command_handler
from bot_listener import bot_listener
class bot_manager:
def __init__(self):
self.bot = commands.Bot(command_prefix=bot_constant.bot_prefix, description=bot_constant.bot_description)
self.command_handler = command_handler(self.bot)
self.bot.add_cog(self.command_handler)
self.bot_listener = bot_listener()
self.bot.add_cog(self.bot_listener)
self.start_bot()
def start_bot(self) -> None:
self.bot.run(bot_constant.bot_token)
def get_command_handler(self) -> command_handler:
return self.command_handler
def get_bot_listener(self) -> bot_listener:
return self.bot_listener
def get_bot(self) -> commands.Bot:
return self.bot
| 2.359375 | 2 |
statistics_check.py | bugo99iot/airbnb_k_nearest | 9 | 12796855 | <filename>statistics_check.py<gh_stars>1-10
import pandas as pd
import numpy as np
from scipy.spatial import distance
import sys
import json
import math
columns = ["price", "room_type", "accommodates", "bedrooms", "bathrooms", "beds", "number_of_reviews", "latitude", "longitude", "review_scores_value"]
#load cities' information
with open('cities_dictionary.json') as json_data:
cities_dict = json.load(json_data)
del cities_dict['EXAMPLE']
#choose the city
city = "ATHENS"
#upload data
try:
city_listings = pd.read_csv("DATA/raw/" + city + "_listings.csv")
except Exception:
if city == "HONG KONG":
city = "HK"
city_listings = pd.read_csv("DATA/raw/" + city + "_listings.csv")
city = "HONG KONG"
if city == "LOS ANGELES":
city = "LA"
city_listings = pd.read_csv("DATA/raw/" + city + "_listings.csv")
city = "LOS ANGELES"
if city == "SAN FRANCISCO":
city = "SF"
city_listings = pd.read_csv("DATA/raw/" + city + "_listings.csv")
city = "SAN FRANCISCO"
#select relevant columns from the data
city_listings = city_listings[columns]
#drop room types that are not well formatted
TF = (city_listings["room_type"] == "Entire home/apt") | (city_listings["room_type"] == "Private room")
city_listings = city_listings[TF]
#drop NaN rows, which means we mostly drop items which have no reviews
city_listings = city_listings.dropna()
#shuffle
city_listings = city_listings.sample(frac=1,random_state=0)
#remove unwanted sharacters
city_listings['price'] = city_listings.price.str.replace("\$|,",'').astype(float)
#set private room type to 0 and entire home/apt to 1
city_listings['room_type'].replace("Private room", 0.0,inplace=True)
city_listings['room_type'].replace("Entire home/apt", 1.0,inplace=True)
mean_price = city_listings["price"].mean()
split_value = int(round(float(city_listings.shape[0])*75/100))
#we use 75% of the dataset as train, 25% as test
train_set = city_listings.iloc[:split_value]
test_set = city_listings.iloc[split_value:]
print test_set.head(5)
#we normalise
for items in columns[1:]:
mean = city_listings[items].mean()
std = np.std(city_listings[items])
N_items = "N_"+items
city_listings[N_items] = (city_listings[items] - mean) / std
N_columns = ["price", "N_room_type", "N_accommodates", "N_bedrooms", "N_bathrooms", "N_beds", "N_number_of_reviews", "N_latitude", "N_longitude", "N_review_scores_value"]
#drop old columns
normal_city_listings = city_listings[N_columns]
train_set = normal_city_listings.iloc[:2888]
test_set = normal_city_listings.iloc[2888:]
#choose columns you want to take into account for the purpose of calculating the price
feature_cols = ["N_room_type", "N_accommodates", "N_bedrooms", "N_bathrooms", "N_beds", "N_latitude", "N_longitude", "N_review_scores_value", "N_number_of_reviews"]
train_set_f = train_set[feature_cols]
test_set_f = test_set[feature_cols]
standard_deviation = 0
k = 5
aces = 0
differences_squared = []
precision = 0.30
for index, rows in test_set_f.iterrows():
distance_series = train_set_f.apply(lambda row: distance.euclidean(rows, row), axis=1)
train_set = train_set.assign(distance=distance_series)
train_set.sort_values("distance", inplace=True)
knn = train_set.iloc[:k]
predicted_price = knn["price"].mean()
predicted_price = predicted_price.item()
real_price = test_set.loc[[index], :]["price"]
real_price = real_price.item()
differences_squared.append((predicted_price - real_price)**2)
if predicted_price/real_price < 1 + precision and predicted_price/real_price > 1 - precision:
aces += 1
del train_set["distance"]
average_deviation = sum(differences_squared) / float(len(differences_squared))
print "Aces: ", aces
rmse = (average_deviation)**0.5
print "Rmse: ", rmse, "for a price mean: ", mean_price
acespercent = float(aces)/float(test_set.shape[0])
print "Accuracy %:", acespercent, "with a precision: ", precision
| 3.203125 | 3 |
13975.py | WaiNaat/BOJ-Python | 0 | 12796856 | import sys
input = sys.stdin.readline
import heapq as hq
# input
t = int(input())
for _ in range(t):
chapter = int(input())
pages = list(map(int, input().split()))
# process
'''
각 장이 섞여도 됨.
>> 힙에서 제일 작은 거 두 개 합치고 다시 넣음.
'''
sol = 0
hq.heapify(pages)
for _ in range(chapter - 1):
cost = hq.heappop(pages) + hq.heappop(pages)
sol += cost
hq.heappush(pages, cost)
# output
print(sol) | 3.0625 | 3 |
pypesto/result/optimize.py | m-philipps/pyPESTO | 0 | 12796857 | """Optimization result."""
import warnings
from collections import Counter
from copy import deepcopy
from typing import Sequence, Union
import numpy as np
import pandas as pd
from ..objective import History
from ..problem import Problem
from ..util import assign_clusters, delete_nan_inf
OptimizationResult = Union['OptimizerResult', 'OptimizeResult']
class OptimizerResult(dict):
"""
The result of an optimizer run.
Used as a standardized return value to map from the individual result
objects returned by the employed optimizers to the format understood by
pypesto.
Can be used like a dict.
Attributes
----------
id:
Id of the optimizer run. Usually the start index.
x:
The best found parameters.
fval:
The best found function value, `fun(x)`.
grad:
The gradient at `x`.
hess:
The Hessian at `x`.
res:
The residuals at `x`.
sres:
The residual sensitivities at `x`.
n_fval
Number of function evaluations.
n_grad:
Number of gradient evaluations.
n_hess:
Number of Hessian evaluations.
n_res:
Number of residuals evaluations.
n_sres:
Number of residual sensitivity evaluations.
x0:
The starting parameters.
fval0:
The starting function value, `fun(x0)`.
history:
Objective history.
exitflag:
The exitflag of the optimizer.
time:
Execution time.
message: str
Textual comment on the optimization result.
optimizer: str
The optimizer used for optimization.
Notes
-----
Any field not supported by the optimizer is filled with None.
"""
def __init__(
self,
id: str = None,
x: np.ndarray = None,
fval: float = None,
grad: np.ndarray = None,
hess: np.ndarray = None,
res: np.ndarray = None,
sres: np.ndarray = None,
n_fval: int = None,
n_grad: int = None,
n_hess: int = None,
n_res: int = None,
n_sres: int = None,
x0: np.ndarray = None,
fval0: float = None,
history: History = None,
exitflag: int = None,
time: float = None,
message: str = None,
optimizer: str = None,
):
super().__init__()
self.id = id
self.x: np.ndarray = np.array(x) if x is not None else None
self.fval: float = fval
self.grad: np.ndarray = np.array(grad) if grad is not None else None
self.hess: np.ndarray = np.array(hess) if hess is not None else None
self.res: np.ndarray = np.array(res) if res is not None else None
self.sres: np.ndarray = np.array(sres) if sres is not None else None
self.n_fval: int = n_fval
self.n_grad: int = n_grad
self.n_hess: int = n_hess
self.n_res: int = n_res
self.n_sres: int = n_sres
self.x0: np.ndarray = np.array(x0) if x0 is not None else None
self.fval0: float = fval0
self.history: History = history
self.exitflag: int = exitflag
self.time: float = time
self.message: str = message
self.optimizer = optimizer
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def summary(self):
"""Get summary of the object."""
message = (
"### Optimizer Result \n\n"
f"* optimizer used: {self.optimizer} \n"
f"* message: {self.message} \n"
f"* number of evaluations: {self.n_fval} \n"
f"* time taken to optimize: {self.time} \n"
f"* startpoint: {self.x0} \n"
f"* endpoint: {self.x} \n"
)
# add fval, gradient, hessian, res, sres if available
if self.fval is not None:
message += f"* final objective value: {self.fval} \n"
if self.grad is not None:
message += f"* final gradient value: {self.grad} \n"
if self.hess is not None:
message += f"* final hessian value: {self.hess} \n"
if self.res is not None:
message += f"* final residual value: {self.res} \n"
if self.sres is not None:
message += f"* final residual sensitivity: {self.sres} \n"
return message
def update_to_full(self, problem: Problem) -> None:
"""
Update values to full vectors/matrices.
Parameters
----------
problem:
problem which contains info about how to convert to full vectors
or matrices
"""
self.x = problem.get_full_vector(self.x, problem.x_fixed_vals)
self.grad = problem.get_full_vector(self.grad)
self.hess = problem.get_full_matrix(self.hess)
self.x0 = problem.get_full_vector(self.x0, problem.x_fixed_vals)
class OptimizeResult:
"""Result of the :py:func:`pypesto.optimize.minimize` function."""
def __init__(self):
self.list = []
def __deepcopy__(self, memo):
other = OptimizeResult()
other.list = deepcopy(self.list)
return other
def __getattr__(self, key):
"""Define `optimize_result.key`."""
try:
return [res[key] for res in self.list]
except KeyError:
raise AttributeError(key)
def __getitem__(self, index):
"""Define `optimize_result[i]` to access the i-th result."""
try:
return self.list[index]
except IndexError:
raise IndexError(
f"{index} out of range for optimize result of "
f"length {len(self.list)}."
)
def __len__(self):
return len(self.list)
def summary(self):
"""Get summary of the object."""
# perform clustering for better information
clust, clustsize = assign_clusters(delete_nan_inf(self.fval)[1])
counter_message = '\n'.join(
["\tCount\tMessage"]
+ [
f"\t{count}\t{message}"
for message, count in Counter(self.message).most_common()
]
)
times_message = (
f'\n\tMean execution time: {np.mean(self.time)}s\n'
f'\tMaximum execution time: {np.max(self.time)}s,'
f'\tid={self[np.argmax(self.time)].id}\n'
f'\tMinimum execution time: {np.min(self.time)}s,\t'
f'id={self[np.argmin(self.time)].id}'
)
summary = (
"## Optimization Result \n\n"
f"* number of starts: {len(self)} \n"
f"* execution time summary: {times_message}\n"
f"* summary of optimizer messages:\n{counter_message}\n"
f"* best value found (approximately) {clustsize[0]} time(s) \n"
f"* number of plateaus found: "
f"{1 + max(clust) - sum(clustsize == 1)} \n"
f"* best value: {self[0]['fval']}, "
f"worst value: {self[-1]['fval']} \n\n"
f"A summary of the best run:\n\n{self[0].summary()}"
)
return summary
def append(
self,
optimize_result: OptimizationResult,
sort: bool = True,
prefix: str = '',
):
"""
Append an OptimizerResult or an OptimizeResult to the result object.
Parameters
----------
optimize_result:
The result of one or more (local) optimizer run.
sort:
Boolean used so we only sort once when appending an
optimize_result.
prefix:
The IDs for all appended results will be prefixed with this.
"""
current_ids = set(self.id)
if isinstance(optimize_result, OptimizeResult):
new_ids = [
prefix + identifier
for identifier in optimize_result.id
if identifier is not None
]
if current_ids.isdisjoint(new_ids) and new_ids:
raise ValueError(
"Some id's you want to merge coincide with "
"the existing id's. Please use an "
"appropriate prefix such as 'run_2_'."
)
for optimizer_result in optimize_result.list:
self.append(optimizer_result, sort=False, prefix=prefix)
elif isinstance(optimize_result, OptimizerResult):
# if id is None, append without checking for duplicate ids
if optimize_result.id is None:
self.list.append(optimize_result)
else:
new_id = prefix + optimize_result.id
if new_id in current_ids:
raise ValueError(
"The id you want to merge coincides with "
"the existing id's. Please use an "
"appropriate prefix such as 'run_2_'."
)
optimize_result.id = new_id
self.list.append(optimize_result)
if sort:
self.sort()
def sort(self):
"""Sort the optimizer results by function value fval (ascending)."""
def get_fval(res):
return res.fval if not np.isnan(res.fval) else np.inf
self.list = sorted(self.list, key=get_fval)
def as_dataframe(self, keys=None) -> pd.DataFrame:
"""
Get as pandas DataFrame.
If keys is a list, return only the specified values, otherwise all.
"""
lst = self.as_list(keys)
df = pd.DataFrame(lst)
return df
def as_list(self, keys=None) -> Sequence:
"""
Get as list.
If keys is a list, return only the specified values.
Parameters
----------
keys: list(str), optional
Labels of the field to extract.
"""
lst = self.list
if keys is not None:
lst = [{key: res[key] for key in keys} for res in lst]
return lst
def get_for_key(self, key) -> list:
"""Extract the list of values for the specified key as a list."""
warnings.warn(
"get_for_key() is deprecated in favour of "
"optimize_result['key'] and will be removed in future "
"releases."
)
return [res[key] for res in self.list]
| 2.9375 | 3 |
lecture_dl_21_examples/002_cifar10/01_dataset_test.py | Daniel1586/Initiative_tensorflow_tutorials | 1 | 12796858 | <filename>lecture_dl_21_examples/002_cifar10/01_dataset_test.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
if not os.path.exists('example_pic'):
os.makedirs('example_pic/')
with tf.Session() as sess:
filename = ['A.jpg', 'B.jpg', 'C.jpg']
# string_input_producer会产生一个文件名队列,shuffle=False不打乱数据顺序,num_epochs训练周期
filename_queue = tf.train.string_input_producer(filename, shuffle=False, num_epochs=5)
# reader从文件名队列中读数据,对应的方法是reader.read--输出文件名(key)和该文件内容(value)
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
# tf.train.string_input_producer定义了一个epoch变量(num_epochs不为None),要对它进行初始化
tf.local_variables_initializer().run()
# 使用start_queue_runners之后,才会真正把tensor推入到文件名队列
threads = tf.train.start_queue_runners(sess=sess)
i = 0
while True:
i += 1
image_data = sess.run(value)
with open('example_pic/test_%d.jpg' % i, 'wb') as f:
f.write(image_data)
# 程序最后会抛出一个OutOfRangeError,这是epoch跑完,队列关闭的标志
| 2.625 | 3 |
tests/test_q0505.py | mirzadm/ctci-5th-py | 0 | 12796859 | <filename>tests/test_q0505.py
"""Unit tests for q0505.py."""
import unittest
from src.q0505 import count_unequal_bits
class TestCountUnequalBits(unittest.TestCase):
def test_count_unequal_bits(self):
self.assertRaises(ValueError, count_unequal_bits, -1, 1)
self.assertEqual(count_unequal_bits(0b0, 0b0), 0)
self.assertEqual(count_unequal_bits(0b1, 0b1), 0)
self.assertEqual(count_unequal_bits(0b01, 0b10), 2)
self.assertEqual(count_unequal_bits(0b001, 0b110), 3)
if __name__ == '__main__':
unittest.main()
| 2.5 | 2 |
mafComparator/comparatorSummarizer.py | dadidange/mafTools | 0 | 12796860 | <gh_stars>0
#!/usr/bin/env python
"""
comparatorSummarizer.py
dent earl, dearl (a) soe ucsc edu
22 November 2011
Simple utility to summarize output of mafComparator
for use with alignathon competetition.
"""
# Note: Actually belongs to https://github.com/dentearl/mwgAlignAnalysis
# but was moved in here for convenience
##################################################
# Copyright (C) 2009-2011 by
# <NAME> (<EMAIL>, <EMAIL>)
# ... and other members of the Reconstruction Team of <NAME>'s
# lab (BME Dept. UCSC)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##################################################
import xml.etree.ElementTree as ET
import xml.parsers.expat
from optparse import OptionParser
import os
class ComparisonPair:
def __init__(self, speciesA, speciesB):
assert(speciesA is not None)
assert(speciesB is not None)
self.species = set([speciesA, speciesB])
self.truePosA = 0 # with respect to the A->B comparison
self.truePosB = 0 # with respect to the B->A comparison
self.falsePos = 0
self.falseNeg = 0
# region numbers are for comparisons using .bed files
self.truePosRegionA = 0 # wrt A->B
self.truePosRegionB = 0 # wrt B->A
self.falsePosRegion = 0
self.falseNegRegion = 0
self.truePosRegionOutsideA = 0 # wrt A->B
self.truePosRegionOutsideB = 0 # wrt B->A
self.falsePosRegionOutside = 0
self.falseNegRegionOutside = 0
self.precision = None
self.recall = None
self.precisionRegion = None
self.recallRegion = None
self.precisionRegionOutside = None
self.recallRegionOutside = None
names = list(self.species)
names = sorted(names, key = lambda x: x[3:]) # ignore the "sim" part of the name
self.niceNames = '-'.join(names)
if len(names) == 1:
self.niceNames = 'self-%s' % names[0]
def calcPrecision(self):
# Precision is calculated as
# TP_B / (TP_B + FP)
# We use the TP_B since FP comes from the B->A comparison
if (self.truePosB + self.falsePos) == 0:
self.precision = float('nan')
else:
self.precision = float(self.truePosB) / (self.truePosB + self.falsePos)
if (self.truePosRegionB + self.falsePosRegion) == 0:
self.precisionRegion = float('nan')
else:
self.precisionRegion = float(self.truePosRegionB) / (self.truePosRegionB + self.falsePosRegion)
if (self.truePosRegionOutsideB + self.falsePosRegionOutside) == 0:
self.precisionRegionOutside = float('nan')
else:
self.precisionRegionOutside = (float(self.truePosRegionOutsideB) /
(self.truePosRegionOutsideB + self.falsePosRegionOutside))
def calcRecall(self):
# Recall is calculated as
# TP_A / (TP_A + FN)
# We use the TP_A since FN comes from the A->B comparison
if (self.truePosA + self.falseNeg) == 0:
self.recall = float('nan')
else:
self.recall = float(self.truePosA) / (self.truePosA + self.falseNeg)
if (self.truePosRegionA + self.falseNegRegion) == 0:
self.recallRegion = float('nan')
else:
self.recallRegion = float(self.truePosRegionA) / (self.truePosRegionA + self.falseNegRegion)
if (self.truePosRegionOutsideA + self.falseNegRegionOutside) == 0:
self.recallRegionOutside = float('nan')
else:
self.recallRegionOutside = (float(self.truePosRegionOutsideA) /
(self.truePosRegionOutsideA + self.falseNegRegionOutside))
def initOptions(parser):
parser.add_option('--xml', dest = 'xml',
help = 'location of mafComparator output xml to summarize.')
def checkOptions(options, args, parser):
if options.xml is None:
parser.error('specify --xml')
if not os.path.exists(options.xml):
parser.error('--xml %s does not exist' % options.xml)
def addPairData(pairs, homTests, falsePosMode = False):
""" given the dict `pairs' and a part of the xml tree `homTests',
addPairData() walks the tree to add data to the pairs dict.
falsePosMode vs truePosMode:
the first homology test in the mafComparator output is A->B and the
results of this comparison will be truePositives(A) and falseNegatives(A).
the second homology test in the mC output is B->A and the results
of this comparison will be truePositives(B) and falsePositives(B) (this is falsePosMode).
"""
hpTests = homTests.find('homologyPairTests')
tests = hpTests.findall('homologyTest')
for t in tests:
seqA = t.attrib['sequenceA'].split('.')[0]
seqB = t.attrib['sequenceB'].split('.')[0]
if seqA == 'self' or seqB == 'self':
continue
if seqA == seqB:
pass
# do not compare a genome to itself
# continue
if t.attrib['sequenceA'] == 'aggregate' or t.attrib['sequenceB'] == 'aggregate':
# ignore the aggregate sequences
continue
p = findPair(seqA, seqB, pairs)
if p is None:
p = ComparisonPair(seqA, seqB)
pairs['%s-%s' % (seqA, seqB)] = p
if falsePosMode:
# the second homology test in the xml, B->A
p.truePosB += int(t.find('aggregateResults').find('all').attrib['totalTrue'])
p.falsePos += int(t.find('aggregateResults').find('all').attrib['totalFalse'])
if t.find('aggregateResults').find('both') is not None:
# bed file established regions
p.truePosRegionB += int(t.find('aggregateResults').find('both').attrib['totalTrue'])
p.falsePosRegion += int(t.find('aggregateResults').find('both').attrib['totalFalse'])
p.truePosRegionOutsideB += int(t.find('aggregateResults').find('neither').attrib['totalTrue'])
p.falsePosRegionOutside += int(t.find('aggregateResults').find('neither').attrib['totalFalse'])
else:
# the first homology test in the xml, A->B
p.truePosA += int(t.find('aggregateResults').find('all').attrib['totalTrue'])
p.falseNeg += int(t.find('aggregateResults').find('all').attrib['totalFalse'])
if t.find('aggregateResults').find('both') is not None:
# bed file established regions
p.truePosRegionA += int(t.find('aggregateResults').find('both').attrib['totalTrue'])
p.falseNegRegion += int(t.find('aggregateResults').find('both').attrib['totalFalse'])
p.truePosRegionOutsideA += int(t.find('aggregateResults').find('neither').attrib['totalTrue'])
p.falseNegRegionOutside += int(t.find('aggregateResults').find('neither').attrib['totalFalse'])
def findPair(seqA, seqB, pairs):
# Check to see if the pair (seqA, seqB) is stored in pairs. Return None if not, return the pair if so.
if '%s-%s' % (seqA, seqB) in pairs:
# if '%s-%s' % (seqB, seqA) in pairs:
# raise RuntimeError('Duplicate pair found in `pairs\' dict: %s-%s' % (seqA, seqB))
return pairs['%s-%s' % (seqA, seqB)]
if '%s-%s' % (seqB, seqA) in pairs:
# if '%s-%s' % (seqA, seqB) in pairs:
# raise RuntimeError('Duplicate pair found in `pairs\' dict: %s-%s' % (seqA, seqB))
return pairs['%s-%s' % (seqB, seqA)]
return None
def reportPairs(pairs, options):
print('')
sortedPairs = sorted(pairs, key = lambda x: pairs[x].niceNames)
for pair in sortedPairs:
p = pairs[pair]
p.calcRecall()
p.calcPrecision()
if p.precision == -1.0 or (p.precision + p.recall) == 0:
precStr = 'nan'
fStr = 'nan'
else:
precStr = '%.5f' % p.precision
fStr = '%.5f' % (2 * ((p.precision * p.recall)/
(p.precision + p.recall)))
if p.precisionRegion == -1.0 or (p.precisionRegion + p.recallRegion) == 0:
precRegStr = 'nan'
fRegStr = 'nan'
else:
precRegStr = '%.5f' % p.precisionRegion
fRegStr = '%.5f' % (2 * ((p.precisionRegion * p.recallRegion)/
(p.precisionRegion + p.recallRegion)))
if p.precisionRegionOutside == -1.0 or (p.precisionRegionOutside + p.recallRegionOutside) == 0:
precRegOutStr = 'nan'
fRegOutStr = 'nan'
else:
precRegOutStr = '%.5f' % p.precisionRegionOutside
fRegOutStr = '%.5f' % (2 * ((p.precisionRegionOutside * p.recallRegionOutside)/
(p.precisionRegionOutside + p.recallRegionOutside)))
if not isRegionMode(pairs):
print('%35s %10s %10.5f %10s %9d %9d %9d %9d' %
(p.niceNames, precStr, p.recall, fStr,
p.truePosA, p.truePosB, p.falsePos, p.falseNeg))
else:
print('%35s %10s %10.5f %10s %9d %9d %9d %9d' %
('%s inside' % p.niceNames, precRegStr, p.recallRegion,
fRegStr,
p.truePosRegionA, p.truePosRegionB, p.falsePosRegion, p.falseNegRegion))
print('%35s %10s %10.5f %10s %9d %9d %9d %9d' %
('%s outside' % p.niceNames, precRegOutStr, p.recallRegionOutside,
fRegOutStr,
p.truePosRegionOutsideA, p.truePosRegionOutsideB,
p.falsePosRegionOutside, p.falseNegRegionOutside))
def summarize(options):
""" summarize() summizes the information contained in file stored in options.xml
"""
try:
tree = ET.parse(options.xml)
except xml.parsers.expat.ExpatError:
raise RuntimeError('Input xml, %s is not a well formed xml document.' % options.xml)
root = tree.getroot()
homTests = root.findall('homologyTests')
pairs = {}
addPairData(pairs, homTests[0])
addPairData(pairs, homTests[1], falsePosMode = True)
if isRegionMode(pairs):
# if a BED was used by mafComparator then the xml will be in Region mode
suffix = 'Region'
truePosOutA = getItem(pairs, 'truePosRegionOutsideA', False)
truePosOutB = getItem(pairs, 'truePosRegionOutsideB', False)
falseNegOut = getItem(pairs, 'falseNegRegionOutside', False)
falsePosOut = getItem(pairs, 'falsePosRegionOutside', False)
truePosSelfOutA = getItem(pairs, 'truePosRegionOutsideA', True)
truePosSelfOutB = getItem(pairs, 'truePosRegionOutsideB', True)
falsePosSelfOut = getItem(pairs, 'falsePosRegionOutside', True)
falseNegSelfOut = getItem(pairs, 'falseNegRegionOutside', True)
precisionOut = float(truePosOutB) / (truePosOutB + falsePosOut)
recallOut = float(truePosOutA) / (truePosOutA + falseNegOut)
precisionSelfOut = float(truePosSelfOutB) / (truePosSelfOutB + falsePosSelfOut)
recallSelfOut = float(truePosSelfOutA) / (truePosSelfOutA + falseNegSelfOut)
else:
suffix = ''
truePosA = getItem(pairs, 'truePos' + suffix + 'A', False)
truePosB = getItem(pairs, 'truePos' + suffix + 'B', False)
falseNeg = getItem(pairs, 'falseNeg' + suffix, False)
falsePos = getItem(pairs, 'falsePos' + suffix, False)
truePosSelfA = getItem(pairs, 'truePos' + suffix + 'A', True)
truePosSelfB = getItem(pairs, 'truePos' + suffix + 'B', True)
falsePosSelf = getItem(pairs, 'falsePos' + suffix, True)
falseNegSelf = getItem(pairs, 'falseNeg' + suffix, True)
if (truePosB + falsePos) == 0:
precision = float('nan')
else:
precision = float(truePosB) / (truePosB + falsePos)
if (truePosA + falseNeg) == 0:
recall = float('nan')
else:
recall = float(truePosA) / (truePosA + falseNeg)
if (truePosSelfB + falsePosSelf) == 0:
precisionSelf = float('nan')
else:
precisionSelf = float(truePosSelfB) / (truePosSelfB + falsePosSelf)
if (truePosSelfA + falseNegSelf) == 0:
recallSelf = float('nan')
else:
recallSelf = float(truePosSelfA) / (truePosSelfA + falseNegSelf)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('dataset', 'Precision', 'Recall', 'F-score', 'TP (A)', 'TP (B)', 'FP (B)', 'FN (A)')
if isRegionMode(pairs):
sanityCheckRegionMode(truePosA, truePosB, falsePos, falseNeg,
truePosOutA, truePosOutB, falsePosOut, falseNegOut,
truePosSelfA, truePosSelfB, falsePosSelf, falseNegSelf,
truePosSelfOutA, truePosSelfOutB, falsePosSelfOut, falseNegSelfOut,
pairs, options)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('Overall (w/o self) inside', precision, recall,
2 * (precision * recall) / (precision + recall),
truePosA, truePosB, falsePos, falseNeg)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('Overall (w/o self) outside', precisionOut, recallOut,
2 * ((precisionOut * recallOut) /
(precisionOut + recallOut)),
truePosOutA, truePosOutA, falsePosOut, falseNegOut)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('Overall (w/ self) inside', precisionSelf, recallSelf,
2 * ((precisionSelf * recallSelf) /
(precisionSelf + recallSelf)),
truePosSelfA, truePosSelfB, falsePosSelf, falseNegSelf)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('Overall (w/ self) outside', precisionSelfOut,
recallSelfOut,
2 * ((precisionSelfOut * recallSelfOut) /
(precisionSelfOut + recallSelfOut)),
truePosSelfOutA, truePosSelfOutB, falsePosSelfOut, falseNegSelfOut)
else:
sanityCheck(truePosA, truePosB, falsePos, falseNeg, truePosSelfA,
truePosSelfB, falsePosSelf, falseNegSelf, pairs, options)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('Overall (w/o self)', precision, recall,
2 * (precision * recall) / (precision + recall),
truePosA, truePosB, falsePos, falseNeg)
print '%35s, %10s, %10s, %10s, %9s, %9s, %9s, %9s' % ('Overall (w/ self)', precisionSelf, recallSelf,
2 * ((precisionSelf * recallSelf) /
(precisionSelf + recallSelf)),
truePosSelfA, truePosSelfB, falsePosSelf, falseNegSelf)
reportPairs(pairs, options)
def sanityCheckRegionMode(truePosA, truePosB, falsePos, falseNeg,
truePosOutA, truePosOutB, falsePosOut, falseNegOut,
truePosSelfA, truePosSelfB, falsePosSelf, falseNegSelf,
truePosSelfOutA, truePosSelfOutB, falsePosSelfOut, falseNegSelfOut,
pairs, options):
# Each column of numbers reported in the rows labeled "Overall" should be the sum of
# the numbers contained in the column corresponding to "inside" or "outside" status.
obsTruePosA = 0
obsTruePosB = 0
obsFalsePos = 0
obsFalseNeg = 0
obsTruePosOutA = 0
obsTruePosOutB = 0
obsFalsePosOut = 0
obsFalseNegOut = 0
obsTruePosASelf = 0
obsTruePosBSelf = 0
obsFalsePosSelf = 0
obsFalseNegSelf = 0
obsTruePosASelfOut = 0
obsTruePosBSelfOut = 0
obsFalsePosSelfOut = 0
obsFalseNegSelfOut = 0
for pair in pairs:
p = pairs[pair]
if p.niceNames.startswith('self-'):
obsTruePosASelf += p.truePosRegionA
obsTruePosBSelf += p.truePosRegionB
obsFalsePosSelf += p.falsePosRegion
obsFalseNegSelf += p.falseNegRegion
obsTruePosASelfOut += p.truePosRegionOutsideA
obsTruePosBSelfOut += p.truePosRegionOutsideB
obsFalsePosSelfOut += p.falsePosRegionOutside
obsFalseNegSelfOut += p.falseNegRegionOutside
else:
obsTruePosA += p.truePosRegionA
obsTruePosB += p.truePosRegionB
obsFalsePos += p.falsePosRegion
obsFalseNeg += p.falseNegRegion
obsTruePosOutA += p.truePosRegionOutsideA
obsTruePosOutB += p.truePosRegionOutsideB
obsFalsePosOut += p.falsePosRegionOutside
obsFalseNegOut += p.falseNegRegionOutside
obsTruePosASelf += obsTruePosA
obsTruePosBSelf += obsTruePosB
obsFalsePosSelf += obsFalsePos
obsFalseNegSelf += obsFalseNeg
obsTruePosASelfOut += obsTruePosOutA
obsTruePosBSelfOut += obsTruePosOutB
obsFalsePosSelfOut += obsFalsePosOut
obsFalseNegSelfOut += obsFalseNegOut
for obs, exp in [(obsTruePosA, truePosA), (obsTruePosB, truePosB),
(obsFalsePos, falsePos), (obsFalseNeg, falseNeg),
(obsTruePosOutA, truePosOutA), (obsTruePosOutB, truePosOutB),
(obsFalsePosOut, falsePosOut), (obsFalseNegOut, falseNegOut),
(obsTruePosASelf, truePosSelfA), (obsTruePosBSelf, truePosSelfB),
(obsFalsePosSelf, falsePosSelf), (obsFalseNegSelf, falseNegSelf),
(obsTruePosASelfOut, truePosSelfOutA), (obsTruePosBSelfOut, truePosSelfOutB),
(obsFalsePosSelfOut, falsePosSelfOut), (obsFalseNegSelfOut, falseNegSelfOut),
]:
assert(obs == exp)
def sanityCheck(truePosA, truePosB, falsePos, falseNeg, truePosASelf,
truePosBSelf, falsePosSelf, falseNegSelf, pairs, options):
# Each column of numbers reported in the rows labeled "Overall" should be the sum of
# the numbers contained in the column.
obsTruePosA = 0
obsTruePosB = 0
obsFalsePos = 0
obsFalseNeg = 0
obsTruePosASelf = 0
obsTruePosBSelf = 0
obsFalsePosSelf = 0
obsFalseNegSelf = 0
for pair in pairs:
p = pairs[pair]
if p.niceNames.startswith('self-'):
obsTruePosASelf += p.truePosA
obsTruePosBSelf += p.truePosB
obsFalsePosSelf += p.falsePos
obsFalseNegSelf += p.falseNeg
else:
obsTruePosA += p.truePosA
obsTruePosB += p.truePosB
obsFalsePos += p.falsePos
obsFalseNeg += p.falseNeg
obsTruePosASelf += obsTruePosA
obsTruePosBSelf += obsTruePosB
obsFalsePosSelf += obsFalsePos
obsFalseNegSelf += obsFalseNeg
for obs, exp in [(obsTruePosA, truePosA), (obsTruePosB, truePosB),
(obsFalsePos, falsePos), (obsFalseNeg, falseNeg),
(obsTruePosASelf, truePosASelf), (obsTruePosBSelf, truePosBSelf),
(obsFalsePosSelf, falsePosSelf), (obsFalseNegSelf, falseNegSelf),]:
assert(obs == exp)
def isRegionMode(pairs):
""" Detects if a BED was used to restrict tests to a region
"""
for pair in pairs:
p = pairs[pair]
if p.truePosRegionA > 0 or p.truePosRegionB > 0 or p.falsePosRegion > 0 or p.falseNegRegion > 0:
return True
def getItem(pairs, item, alignSelf):
ans = 0
for pair in pairs:
p = pairs[pair]
if not alignSelf:
if len(p.species) == 1:
continue
ans += p.__dict__[item]
return ans
def main():
usage = ('usage: %prog \n\n'
'%prog \n')
parser = OptionParser(usage)
initOptions(parser)
options, args = parser.parse_args()
checkOptions(options, args, parser)
summarize(options)
if __name__ == '__main__':
main()
| 1.765625 | 2 |
tests/test_blocks.py | regulusweb/wagtail-extensions | 3 | 12796861 | import datetime
import pytest
from unittest.mock import patch
from django.core.exceptions import ValidationError
from freezegun import freeze_time
from phonenumber_field.phonenumber import PhoneNumber
from wagtail.core.models import Page
from wagtail_extensions.blocks import (
DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock
)
@pytest.mark.django_db
@pytest.fixture
def page():
# Homepage is created by Wagtail's initial migrations
# But let's create our own child page for testing with.
homepage = Page.objects.get(url_path='/home/')
page = Page(title='A test page', slug="test")
homepage.add_child(instance=page)
return page
def test_department_block_clean_invalid():
department = DepartmentBlock()
with pytest.raises(ValidationError):
department.clean({})
def test_department_block_clean_valid_with_both():
department = DepartmentBlock()
department.clean({'name':'Test', 'email':'<EMAIL>', 'phones': ['+447528712345']})
def test_department_block_to_python_empty():
department = DepartmentBlock()
department.to_python({})
def test_department_block_to_python_strip_empty_phonenumbers():
department = DepartmentBlock()
value = department.get_prep_value({'phones': ['', '+447528712345', '']})
assert value['phones'] == ['+447528712345']
def test_link_block_with_url():
block = LinkBlock()
value = block.to_python({
'link': [{'type': 'url', 'value': '/hello/'}]
})
assert value.link_url == '/hello/'
assert value.link_text == '/hello/'
def test_link_block_with_url_and_text():
block = LinkBlock()
value = block.to_python({
'text': 'Hello World',
'link': [{'type': 'url', 'value': '/hello/'}]
})
assert value.link_url == '/hello/'
assert value.link_text == 'Hello World'
def test_link_block_with_empty_string_text():
block = LinkBlock()
value = block.to_python({
'text': '',
'link': [{'type': 'url', 'value': '/hello/'}]
})
assert value.link_url == '/hello/'
assert value.link_text == '/hello/'
def test_link_block_with_missing_streamblock():
block = LinkBlock()
value = block.to_python({
'text': '',
'link': []
})
assert value.link_url == ''
assert value.link_text == ''
@pytest.mark.django_db
def test_link_block_with_page(page):
block = LinkBlock()
value = block.to_python({
'link': [{'type': 'page', 'value': page.pk}]
})
assert value.link_url == page.url
assert value.link_text == page.title
@pytest.mark.django_db
def test_link_block_with_page_that_no_longer_exists(page):
"""
If a page referenced by a PageChooserBlock has been deleted, the block value will be None.
"""
block = LinkBlock()
value = block.to_python({
'link': [{'type': 'page', 'value': None}]
})
assert value.link_url == ''
assert value.link_text == ''
@pytest.mark.django_db
def test_link_block_with_page_and_text(page):
block = LinkBlock()
value = block.to_python({
'text': '<NAME>',
'link': [{'type': 'page', 'value': page.pk}]
})
assert value.link_url == page.url
assert value.link_text == 'Hello World'
def test_link_block_clean_for_required():
block = LinkBlock()
value = block.to_python({
'text': 'Hello World',
'link': [] # This must not be empty if the field is required
})
with pytest.raises(ValidationError):
block.clean(value)
def test_link_block_clean_for_not_required():
block = LinkBlock(required=False)
value = block.to_python({
'text': '<NAME>',
'link': [] # Can be empty if the field is not required
})
# This should not raise an exception
block.clean(value)
@freeze_time("2017-01-01")
def test_openingtime_block_clean_date_in_past():
openingtime = OpeningTimeBlock()
with pytest.raises(ValidationError):
openingtime.clean({'date': '2016-01-01'})
def test_openingtime_block_clean_end_before_start():
openingtime = OpeningTimeBlock()
with pytest.raises(ValidationError):
openingtime.clean({'start': '20:00', 'end': '08:00', 'weekday': '1'})
def test_openingtime_block_clean_no_weekday_or_date():
openingtime = OpeningTimeBlock()
with pytest.raises(ValidationError):
openingtime.clean({'start': '20:00', 'end': '08:00'})
@freeze_time("2017-01-01")
def test_openingtime_block_clean_valid():
openingtime = OpeningTimeBlock()
openingtime.clean({'start': '08:00', 'end': '20:00', 'date': '2017-01-01'})
def test_openingtime_block_to_python_empty():
openingtime = OpeningTimeBlock()
openingtime.to_python({'label': '', 'date': None, 'closed': False, 'start': None, 'end': None, 'weekday': ''})
# Pass without error
def test_openingtime_block_to_python_cast_weekday():
openingtime = OpeningTimeBlock()
value = openingtime.to_python({'weekday': '5'})
assert value['weekday'] == 5
def test_openingtime_block_to_python_public_label():
openingtime = OpeningTimeBlock()
value = openingtime.to_python({'weekday': '7'})
assert value['label'] == OpeningTimeBlock.PUBLIC_LABEL
def test_openingtime_block_to_python_public_with_label():
openingtime = OpeningTimeBlock()
label = 'Easter sunday'
value = openingtime.to_python({'weekday': '7', 'label': label})
assert value['label'] == label
def test_openingtime_block_single_date_empty():
assert OpeningTimeBlock.single_date({}) == False
def test_openingtime_block_single_date_with_date():
assert OpeningTimeBlock.single_date({'date': 'some date'}) == True
def test_openingtime_block_single_date_public():
assert OpeningTimeBlock.single_date({'weekday': 7}) == True
def test_openingtime_block_next_date_empty():
assert OpeningTimeBlock.next_date({}) is None
@freeze_time("2017-12-13")
def test_openingtime_block_next_date_today():
assert OpeningTimeBlock.next_date({'weekday': 2}) == datetime.date(2017, 12, 13)
@freeze_time("2017-12-13")
def test_openingtime_block_next_date_sunday():
assert OpeningTimeBlock.next_date({'weekday': 6}) == datetime.date(2017, 12, 17)
@freeze_time("2017-12-13")
def test_openingtime_block_next_date_public():
assert OpeningTimeBlock.next_date({'weekday': 7}) is None
def test_openingtimes_block_time_keyfunc_specific():
openingtime = OpeningTimeBlock()
value = openingtime.to_python({})
with patch.object(openingtime, 'single_date', return_value=True):
out = OpeningTimesBlock.time_keyfunc(value)
assert out is value
def test_openingtimes_block_time_keyfunc_non_specific():
value = OpeningTimeBlock().to_python({'closed': False, 'start': '5:00', 'end': '10:00'})
out = OpeningTimesBlock.time_keyfunc(value)
assert out == (False, datetime.time(5), datetime.time(10))
@patch('wagtail_extensions.blocks.groupby')
def test_openingtimes_block_group_times(mocked_groupby):
value = {}
mocked_groupby.return_value = [('first', [1, 4, 5]), ('second', [7, 10])]
out = OpeningTimesBlock.group_times(value)
assert out == [[1, 4, 5], [7, 10]]
mocked_groupby.assert_called_once_with(value, OpeningTimesBlock.time_keyfunc)
def test_openingtimes_block_get_time_for_date_empty():
assert OpeningTimesBlock.get_time_for_date(None, None) is None
def test_openingtimes_block_get_time_for_date_no_times():
assert OpeningTimesBlock.get_time_for_date({}, datetime.date(2017, 12, 10)) is None
def test_openingtimes_block_get_time_for_date_times_date():
match = {'date': datetime.date(2017, 12, 10)}
value = {
'times': [
{'weekday': 4},
match,
],
}
assert OpeningTimesBlock.get_time_for_date(value, datetime.date(2017, 12, 10)) == match
def test_openingtimes_block_get_time_for_date_times_weekday():
match = {'weekday': 6}
value = {
'times': [
{'weekday': 4},
{'date': datetime.date(2017, 12, 10)},
match,
],
}
assert OpeningTimesBlock.get_time_for_date(value, datetime.date(2017, 12, 17)) == match
def test_openingtimes_block_get_time_for_date_times_no_match():
value = {
'times': [
{'weekday': 4},
{'date': datetime.date(2017, 12, 10)},
{'weekday': 2},
],
}
assert OpeningTimesBlock.get_time_for_date(value, datetime.date(2017, 12, 17)) is None
@freeze_time('2017-06-28')
def test_openingtimes_block_opening_today():
openingtimes = OpeningTimesBlock
with patch.object(openingtimes, 'get_time_for_date') as mocked_get:
value = 'test'
out = openingtimes.opening_today(value)
mocked_get.assert_called_once_with(value, datetime.date(2017, 6, 28))
assert out == mocked_get.return_value
def test_openingtimes_block_get_context():
openingtimes = OpeningTimesBlock()
value = {'times': [1, 5, 10]}
with patch.object(openingtimes, 'group_times') as mocked_group,\
patch.object(openingtimes, 'opening_today') as mocked_today:
ctx = openingtimes.get_context(value)
mocked_group.assert_called_once_with([1, 5, 10])
mocked_today.assert_called_once_with(value)
assert ctx['times'] == mocked_group.return_value
assert ctx['today'] == mocked_today.return_value
def test_phone_block_get_prep_value():
phone = PhoneBlock()
number = PhoneNumber.from_string('+447528712345')
number_str = phone.get_prep_value(number)
assert number_str == '+447528712345'
def test_phone_block_to_python():
phone = PhoneBlock()
number = phone.to_python('+447528712345')
assert number == PhoneNumber.from_string('+447528712345')
def test_phone_block_to_python_empty():
phone = PhoneBlock()
assert phone.to_python('') == ''
def test_images_block_get_context():
block = ImagesBlock()
assert block.get_context({'images': ['an image', 'another image', 'yet another image']})['column_width'] == 4
def test_images_block_get_context_empty_list():
block = ImagesBlock()
assert block.get_context({})['column_width'] == 12
| 2.140625 | 2 |
django_workflow_system/utils/response_schema_handlers/__init__.py | eikonomega/django-workflow-system | 2 | 12796862 | """Convenience imports."""
from .date_range_question import get_response_schema as date_range_question_schema
from .free_form_question import get_response_schema as free_form_question_schema
from .multiple_choice_question import (
get_response_schema as multiple_choice_question_schema,
)
from .numeric_range_question import get_response_schema as numeric_range_question_schema
from .single_choice_question import get_response_schema as single_choice_question_schema
from .true_false_question import get_response_schema as true_false_question_schema
| 1.0625 | 1 |
utils/start.py | shadowcompiler/Jo | 0 | 12796863 | <filename>utils/start.py
# coding:utf-8
# open start message file
start_file = open('ressources/start.txt', 'r', encoding='utf-8')
message = start_file.read()
start_file.close()
| 2.078125 | 2 |
bluebird/sim_client/bluesky/sim_client.py | rkm/bluebird | 8 | 12796864 | """
BlueSky simulation client class
"""
# TODO: Need to re-add the tests for string parsing/units from the old API tests
import os
from typing import List
from semver import VersionInfo
from .bluesky_aircraft_controls import BlueSkyAircraftControls
from .bluesky_simulator_controls import BlueSkySimulatorControls
from bluebird.settings import Settings
from bluebird.sim_client.bluesky.bluesky_client import BlueSkyClient
from bluebird.utils.abstract_sim_client import AbstractSimClient
from bluebird.utils.timer import Timer
_BS_MIN_VERSION = os.getenv("BS_MIN_VERSION")
if not _BS_MIN_VERSION:
raise ValueError("The BS_MIN_VERSION environment variable must be set")
MIN_SIM_VERSION = VersionInfo.parse(_BS_MIN_VERSION)
# TODO Check cases where we need this
def _assert_valid_args(args: list):
"""
Since BlueSky only accepts commands in the form of (variable-length) strings, we
need to check the arguments for each command string we construct before sending it
"""
# Probably a cleaner way of doing this...
assert all(
x and not x.isspace() and x != "None" for x in map(str, args)
), f"Invalid argument in : {args}"
class SimClient(AbstractSimClient):
"""AbstractSimClient implementation for BlueSky"""
@property
def aircraft(self) -> BlueSkyAircraftControls:
return self._aircraft_controls
@property
def simulation(self) -> BlueSkySimulatorControls:
return self._sim_controls
@property
def sim_version(self) -> VersionInfo:
return self._client.host_version
def __init__(self, **kwargs):
self._client = BlueSkyClient()
self._aircraft_controls = BlueSkyAircraftControls(self._client)
self._sim_controls = BlueSkySimulatorControls(self._client)
def start_timers(self) -> List[Timer]:
return self._client.start_timers()
def connect(self, timeout=1) -> None:
self._client.connect(
Settings.SIM_HOST,
event_port=Settings.BS_EVENT_PORT,
stream_port=Settings.BS_STREAM_PORT,
timeout=timeout,
)
def shutdown(self, shutdown_sim: bool = False) -> bool:
self._client.stop()
return True
| 2.546875 | 3 |
Stability.py | ahmadborzou/Study_DM_in_Galaxies-master | 0 | 12796865 | <reponame>ahmadborzou/Study_DM_in_Galaxies-master<gh_stars>0
"""
Created on May 19 2019
author : <NAME>, PhD
email : <EMAIL>
affiliation: Baylor University
"""
import numpy as np
from Galaxies import profile
import Constants as co
import Inputs as ins
import warnings
## a galaxy(DM mass,n0,T0)
gal = profile(ins.m,ins.n0,ins.T0)
def SolveStability():
## increments of xi
dxi = gal.xi(co.parsec*0.1)
## start xi from dxi rather than 0 to avoid singularity
xi = np.copy(dxi)
## temperature setup
y = ins.y(xi)
y_p = ins.y_p(xi)
## fugacity setup
lns = 0. ## s0=1 =>lns0 = 0
lns_p = 0.
## number of loops in the while loop
N = 0
## mass of galaxy
M = 0.
## for free falling speed
v02_vr2 = 0.
## To calculate the Gravitational Potential energy
temphi = 0.
## containers to store the outputs
xi_arr = []
r_arr = []
lns_arr = []
lnsp_arr = []
lnspp_arr = []
y_arr = []
yp_arr = []
ypp_arr = []
rho_arr = []
rhop_arr = []
P_arr = []
M_arr = []
v02_vr2_arr = []
temphi_arr = []
## append the initial values
xi_arr.append(xi)
r_arr.append(gal.r(xi))
lns_arr.append(lns)
lnsp_arr.append(lns_p)
lnspp_arr.append(0.)
y_arr.append(y)
yp_arr.append(y_p)
ypp_arr.append(0.)
rho_arr.append(gal.m*gal.n(lns,y))
rhop_arr.append(gal.m*gal.n_p(lns_p,lns,y_p,y))
P_arr.append(gal.P(lns,y))
M_arr.append(M)
v02_vr2_arr.append(v02_vr2)
temphi_arr.append(temphi)
## stop the loop when density is 1/1000 of the initial value
while gal.n(lns,y) > 1.e-3*gal.n0:
## second derivative of y and s
y_pp = ins.y_pp(xi)
lns_pp = gal.lnspp(lns_p,lns,y_pp,y_p,y,xi)
## set the temperature and its derivative
y = ins.y(xi)
y_p = ins.y_p(xi)
## determine s and y
if N == 0: ## Newton method in the first loop
lns += lns_p*dxi
lns_p += lns_pp*dxi
############
## adapt dxi
############
## tolerance
toler = 0.003
## at dxi/2. forward
lns_12 = lns_arr[-1] + lns_p*(dxi/2.)
lns_p_12 = lnsp_arr[-1] + lns_pp*(dxi/2.)
## at dxi
lns_ = lns_12 + lns_p_12*(dxi/2.)
lns_pp_12 = gal.lnspp(lns_p_12,lns_12,ins.y_pp(xi+dxi/2.),ins.y_p(xi+dxi/2.),ins.y(xi+dxi/2.),xi+dxi/2.)
lns_p_ = lns_p_12 + lns_pp_12*(dxi/2.)
## compare densities
n1 = gal.n(lns,y)
n2 = gal.n(lns_,y)
n0 = gal.n(lns_arr[-1],y_arr[-1])
error = abs(n1-n2)/(n0*dxi)
## suggested dxi
dxi_suggest = toler/error*dxi
## the smallest dxi
if dxi_suggest < gal.xi(co.parsec*0.01):
dxi = gal.xi(co.parsec*0.01)
## the largest dxi
elif dxi_suggest > gal.xi(co.parsec):
dxi = gal.xi(co.parsec)
## if in the range, accept the suggested dxi
else:
dxi = dxi_suggest
print("new dxi: %g (pc)"%(dxi/gal.xi(co.parsec)))
############
## end adapt
############
else: ## Verlet method
lns = -lns_arr[-2] + 2.*lns + lns_pp*dxi**2
lns_p = lnsp_arr[-2] + 2.*lns_pp*dxi
## for unrealistic temperature profiles temperature can turn negative
## break the loop and inform the user
if y < 0.:
print ("***\n***\nNegative temperature. Breaking ...\n***\n***")
raise(Exception("Negative temperature. Unacceptable solution"))
break
## determine xi
xi += dxi
## mass at radius r in SI units using M = M + dM
M += 4.*np.pi*(gal.r(xi))**2*gal.m*gal.n(lns,y)*gal.r(dxi)
v02_vr2 +=2.*co.G*M*gal.r(dxi)/(gal.r(xi))**2
temphi += gal.r(xi)*gal.m*gal.n(lns,y)*gal.r(dxi)
## break the loop and inform the user if derivative of density is infinite
if gal.n_p(lns_p,lns,y_p,y) == np.inf:
print ("***\n***\nderivative of number density is infinite.\nnumber density will sharply fall to zero.\nthis is usually the edge of the system.\nBreaking the while loop ...\n***\n***")
break
## break the loop and inform the user if fugacity or its derivatives are infinite
if lns == np.inf:
print ("***\n***\nlog of z/z0 is infinite.\nThis is full degeneracy limit. We can't handle this at this point.\nBreaking the loop ...\n***\n***")
break
if lns_p == np.inf:
print ("***\n***\n1st derivative of log of z/z0 is infinite.\nWe can't handle this at this point.\nBreaking the loop ...\n***\n***")
break
if lns_pp == np.inf:
print ("***\n***\n2nd derivative of log of z/z0 is infinite.\nWe can't handle this at this point.\nBreaking the loop ...\n***\n***")
break
## add the calculated quantities into the containers
xi_arr.append(xi)
r_arr.append(gal.r(xi))
lns_arr.append(lns)
lnsp_arr.append(lns_p)
lnspp_arr.append(lns_pp)
y_arr.append(y)
yp_arr.append(y_p)
ypp_arr.append(y_pp)
rho_arr.append(gal.m*gal.n(lns,y))
rhop_arr.append(gal.m*gal.n_p(lns_p,lns,y_p,y))
P_arr.append(gal.P(lns,y))
M_arr.append(M)
v02_vr2_arr.append(v02_vr2)
temphi_arr.append(temphi)
try:
if N%1000 == 0 and N>0:
print ("r: %1.1e (kpc) xi: %1.1e "%(gal.r(xi)/(co.parsec*1000.),xi))
print ("lnz: %1.1e lns: %1.1e lnsp: %1.1e lnspp: %1.1e"%(gal.lnz0+lns,lns,lns_p,lns_pp))
print ("T: %1.1e (Kelvin) y: %1.1e yp: %1.1e ypp: %1.1e"%(gal.T0*y,y,y_p,y_pp))
print ("n: %1.1e (1/m^3) rho: %1.1e (kg/m^3)"%(gal.n(lns,y),gal.n(lns,y)*gal.m))
print ("M: %1.1e (sun)"%(M/co.MSun))
print ("--")
except:
## this is just a print. no action needed
pass
## change the counter
N+=1
###################################
## convert the lists to numpy array
y_arr = np.array(y_arr)
yp_arr = np.array(yp_arr)
ypp_arr = np.array(ypp_arr)
lns_arr = np.array(lns_arr)
lnsp_arr = np.array(lnsp_arr)
r_arr = np.array(r_arr)
rho_arr = np.array(rho_arr)
P_arr = np.array(P_arr)
M_arr = np.array(M_arr)
v02_vr2_arr = np.array(v02_vr2_arr)
temphi_arr = np.array(temphi_arr)
## return the containers
return lns_arr, lnsp_arr, lnspp_arr,\
y_arr, yp_arr, ypp_arr,\
r_arr, rho_arr, P_arr,\
M_arr, v02_vr2_arr, temphi_arr
| 2.265625 | 2 |
gerateData_RuleClass.py | rafael-veiga/Classification-algorithm-of-Congenital-Zika-Syndrome-characterizations-diagnosis-and-validation | 0 | 12796866 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 08:49:21 2020
@author: rafae
"""
import pandas as pd
import numpy as np
def gerarbanco():
banco = pd.read_stata("Microcefalia MS analysis 20160609.dta")
circ = pd.read_csv("circumference.csv",sep=";",index_col='sem')
banco.index = range(len(banco.index))
bancoNovo = pd.DataFrame()
banco.NV_USG_RESULT.replace( ['ALTERADO','NORMAL'],['Alterado','Normal'],inplace=True)
# rule classification
sexo = banco.TP_SEXO
classFinal = pd.Series([np.nan]*len(sexo))
classFinal[banco.NV_CMV=='IgM reagente'] = 'Discarded'
classFinal[banco.NV_HCV=='Reagente'] = 'Discarded'
classFinal[banco.NV_RUBEOLA=='IgM reagente'] = 'Discarded'
classFinal[banco.NV_TOXO=='IgM reagente'] = 'Discarded'
classFinal[banco.NV_RM_RESULT=='Normal'] = 'Discarded'
classFinal[banco.NV_TC_RESULT=='Normal'] = 'Discarded'
classFinal[banco.NV_ZIKA=='Positivo'] = 'Definite'
# organize database
tamanhoCabe = banco.headcirc
bancoNovo['sexo'] = list(sexo)
bancoNovo['tamanhoCabe'] = list(tamanhoCabe)
bancoNovo['classFeto'] = list(banco.TP_CLASSIFICACAO_FETO_RN)
semanaGes = banco.SINASC_SEMAGESTAC
missing = pd.Series([np.nan]*len(sexo))
missing[bancoNovo.tamanhoCabe.isnull()]=1
missing[sexo.isnull()]=1
missing[bancoNovo.classFeto.isnull()]=1
missing[semanaGes.isnull()]=1
micro = pd.Series([np.nan]*len(sexo))
for i in range(len(sexo)):
if missing[i]!=1:
if semanaGes[i]<=42 and semanaGes[i]>=14:
ref1 =0
if sexo[i]=='Masculino':
ref1 = circ.boy_min[semanaGes[i]]
else:
ref1 = circ.girl_min[semanaGes[i]]
if tamanhoCabe[i]<ref1:
micro[i]=1
else:
micro[i]=0
bancoNovo['micro'] = list(micro)
banco['micro']=micro
bancoNovo['NV_TC_MICRO'] = list(banco.NV_TC_MICRO)
#sorologia
bancoNovo['NV_Storch'] =list(banco.lab_STORCH)
bancoNovo['NV_sifilis'] = list(banco.NV_SIFILIS)
bancoNovo['NV_TOXO'] = list(banco.NV_TOXO.replace(['IgG reagente','IgM reagente'],['Reagente','Reagente']))
bancoNovo['NV_CMV'] =list( banco.NV_CMV)
bancoNovo['NV_DENGUE']=list(banco.NV_DENGUE.replace(['IgG reagente','IgM reagente'],['Reagente','Reagente']))
bancoNovo['NV_CHIK']=list(banco.NV_CHIK)
count_storch = pd.Series([np.nan]*len(sexo))
for i in range(len(sexo)):
if len(bancoNovo.NV_sifilis[i].strip())>1:
count_storch[i]=1
if len(bancoNovo.NV_CMV[i].strip())>1:
if count_storch.isnull()[i]:
count_storch[i]=1
else:
count_storch[i]=count_storch[i]+1
if len(bancoNovo.NV_TOXO[i].strip())>1:
if count_storch.isnull()[i]:
count_storch[i]=1
else:
count_storch[i]=count_storch[i]+1
banco['count_storch'] = count_storch
bancoNovo['count_storch'] = list(count_storch)
#exames
bancoNovo['NV_USG_MICRO']=list(banco.NV_USG_MICRO)
bancoNovo['NV_TC_MICRO']=list(banco.NV_TC_MICRO)
bancoNovo['NV_RM_MICRO']=list(banco.NV_RM_MICRO)
bancoNovo['NV_USG_RESULT']=list(banco.NV_USG_RESULT)
bancoNovo['NV_TC_RESULT']=list(banco.NV_TC_RESULT)
bancoNovo['NV_RM_RESULT']=list(banco.NV_TC_RESULT)
texto = banco.NV_RM_CALC
texto = texto + ' ' + banco.NV_USG_CALC_DESC
texto = texto + ' ' + banco.NV_RM_CALC
texto = texto + ' ' + banco.NV_TC_CALC
texto = texto + ' ' + banco.NV_USG_OUTRO
texto = texto + ' ' + banco.NV_TC_OUTRO
texto = texto + ' ' + banco.NV_RM_OUTRO
texto = texto + ' ' + banco.NV_USG_VENTR
texto = texto + ' ' + banco.NV_TC_VENTR
texto = texto + ' ' + banco.NV_RM_VENTR
missImagem = pd.Series([np.nan]*len(sexo))
for i in range(len(sexo)):
if len(banco.NV_USG_RESULT[i].strip())<2 and len(banco.NV_TC_RESULT[i].strip())<2 and len(banco.NV_RM_RESULT[i].strip())<2 and len(texto[i].strip())<2:
missImagem[i] = 1
else:
missImagem[i] = 0
texto = texto + ' ' + banco.DS_OBSERVACOES_GERAIS
for i in range(len(texto)):
texto[i] = texto[i].strip().replace('.',' ').replace(';',' ').replace(',',' ').replace('?',' ').replace("'",' ').replace('=','').replace('-',' ').replace('+',' ').replace('/',' ').replace('(',' ').replace(')',' ').replace('<',' ').replace('>',' ').replace(':',' ').replace('&',' ').replace('¿',' ').replace('%',' ').replace('\n',' ').replace('"',' ').lower()
bancoNovo['missImagem'] = list(missImagem)
bancoNovo['casegr'] = list(banco.casegr)
bancoNovo['classFinal']=list(classFinal)
return texto,bancoNovo
texto,bancoNovo = gerarbanco()
bancoNovo['texto'] = list(texto)
# type class and save
typeClass= pd.Series([np.nan]*len(bancoNovo))
typeClass[bancoNovo.classFinal.isnull()==False]='rule'
typeClass[(typeClass.isnull()) & (bancoNovo.texto.str.strip()!='')]='group2'
typeClass[typeClass.isnull()]='group1'
bancoNovo['typeClass']=list(typeClass)
bancoNovo.to_csv('banco_total.csv') | 2.53125 | 3 |
itertools_ex.py | byaka/functionsEx_new | 0 | 12796867 | <gh_stars>0
# -*- coding: utf-8 -*-
import types, collections
"""
Collection of methods for working with Generators, which respects `yield .. send ..` ability.
"""
def gExtend(prepend, g, append=None):
_gType=types.GeneratorType
_iType=collections.Iterable
if not isinstance(g, _gType):
raise TypeError('Wrong type, second arg must be Generator')
if isinstance(prepend, _gType): gPrepend=prepend
elif isinstance(prepend, _iType): gPrepend=g
else:
raise TypeError('Wrong type, first arg must be Generator or Iterable')
if append is None: gAppend=None
elif isinstance(append, _gType): gAppend=append
elif isinstance(append, _iType): gAppend=None
else:
raise TypeError('Wrong type, arg `append` must be Generator or Iterable')
for v in prepend:
extCmd=(yield v)
if extCmd is not None:
yield # this allows to use our generator inside `for .. in ..` without skipping on `send`
gPrepend.send(extCmd)
#
for v in g:
extCmd=(yield v)
if extCmd is not None:
yield # this allows to use our generator inside `for .. in ..` without skipping on `send`
g.send(extCmd)
#
if append is not None:
for v in prepend:
extCmd=(yield v)
if extCmd is not None:
yield # this allows to use our generator inside `for .. in ..` without skipping on `send`
if gAppend is not None:
gAppend.send(extCmd)
def gCheck(g):
try:
return gExtend((next(g),), g)
except StopIteration:
return ()
def gChain(*gens):
for g in gens:
isGen=isinstance(g, types.GeneratorType)
for o in g:
extCmd=(yield o)
if extCmd is not None:
yield # this allows to use our generator inside `for .. in ..` without skipping on `send`
if isGen:
g.send(extCmd)
def grouper(n, obj, fill=None):
# group items by n (ABCDEFG --> ABC DEF Gxx if n=3)
args=[iter(obj)]*n
return izip_longest(fill=fill, *args)
| 2.859375 | 3 |
iseq/gencode.py | EBI-Metagenomics/iseq | 0 | 12796868 | <filename>iseq/gencode.py
# NCBI genetic code table version 4.6
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
__all__ = ["GeneticCode"]
genetic_codes: List[Tuple[str, str, int]] = []
names: Dict[str, int] = {}
alt_names: Dict[str, int] = {}
ids: Dict[int, int] = {}
@dataclass
class GeneticCode:
name: str
alt_name: str
id: int
def __init__(
self,
name: Optional[str] = None,
alt_name: Optional[str] = None,
id: Optional[int] = None,
):
n = sum([name is None, alt_name is None, id is None])
if n != 2:
raise ValueError("You must use one, and only one, parameter.")
if name is not None:
if name in names:
self.name, self.alt_name, self.id = genetic_codes[names[name]]
return
raise ValueError(f"Unknown name {name}.")
if alt_name is not None:
if alt_name in alt_names:
self.name, self.alt_name, self.id = genetic_codes[alt_names[alt_name]]
return
raise ValueError(f"Unknown alternative name {alt_name}.")
assert id is not None
self.name, self.alt_name, self.id = genetic_codes[ids[id]]
def register_ncbi_genetic_code(name: str, alt_name: str, id: int):
names[name] = len(genetic_codes)
if alt_name != "":
alt_names[alt_name] = len(genetic_codes)
ids[id] = len(genetic_codes)
genetic_codes.append((name, alt_name, id))
register_ncbi_genetic_code(
"Standard",
"SGC0",
1,
)
register_ncbi_genetic_code(
"Vertebrate Mitochondrial",
"SGC1",
2,
)
register_ncbi_genetic_code(
"Yeast Mitochondrial",
"SGC2",
3,
)
register_ncbi_genetic_code(
"Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate "
"Mitochondrial; Mycoplasma; Spiroplasma",
"SGC3",
4,
)
register_ncbi_genetic_code(
"Invertebrate Mitochondrial",
"SGC4",
5,
)
register_ncbi_genetic_code(
"Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear",
"SGC5",
6,
)
register_ncbi_genetic_code(
"Echinoderm Mitochondrial; Flatworm Mitochondrial",
"SGC8",
9,
)
register_ncbi_genetic_code(
"Euplotid Nuclear",
"SGC9",
10,
)
register_ncbi_genetic_code(
"Bacterial, Archaeal and Plant Plastid",
"",
11,
)
register_ncbi_genetic_code(
"Alternative Yeast Nuclear",
"",
12,
)
register_ncbi_genetic_code(
"Ascidian Mitochondrial",
"",
13,
)
register_ncbi_genetic_code(
"Alternative Flatworm Mitochondrial",
"",
14,
)
register_ncbi_genetic_code(
"Blepharisma Macronuclear",
"",
15,
)
register_ncbi_genetic_code(
"Chlorophycean Mitochondrial",
"",
16,
)
register_ncbi_genetic_code(
"Trematode Mitochondrial",
"",
21,
)
register_ncbi_genetic_code(
"Scenedesmus obliquus Mitochondrial",
"",
22,
)
register_ncbi_genetic_code(
"Thraustochytrium Mitochondrial",
"",
23,
)
register_ncbi_genetic_code(
"Rhabdopleuridae Mitochondrial",
"",
24,
)
register_ncbi_genetic_code(
"Candidate Division SR1 and Gracilibacteria",
"",
25,
)
register_ncbi_genetic_code(
"Pachysolen tannophilus Nuclear",
"",
26,
)
register_ncbi_genetic_code(
"Karyorelict Nuclear",
"",
27,
)
register_ncbi_genetic_code(
"Condylostoma Nuclear",
"",
28,
)
register_ncbi_genetic_code(
"Mesodinium Nuclear",
"",
29,
)
register_ncbi_genetic_code(
"Peritrich Nuclear",
"",
30,
)
register_ncbi_genetic_code(
"Blastocrithidia Nuclear",
"",
31,
)
register_ncbi_genetic_code(
"Balanophoraceae Plastid",
"",
32,
)
register_ncbi_genetic_code(
"Cephalodiscidae Mitochondrial",
"",
33,
)
| 2.484375 | 2 |
questions/permutations/Solution.py | marcus-aurelianus/leetcode-solutions | 141 | 12796869 | '''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
'''
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def generate_permutation(nums, ret, curr, visited):
if len(curr) == len(nums):
ret.append(list(curr))
return
for num in nums:
if num in visited:
continue
visited.add(num)
curr.append(num)
generate_permutation(nums, ret, curr, visited)
curr.pop()
visited.remove(num)
ret = []
curr = []
visited = set()
generate_permutation(nums, ret, curr, visited)
return ret
| 3.8125 | 4 |
Redis/sentinel.py | Oliver-Yan-2019/algorithm | 1 | 12796870 | from redis import StrictRedis
class SentinelClient(object):
def __init__(self, url, db=None, name='default', health_check_interval=30):
"""create a redis client.
Args:
url: redis server url.
db: redis database, default 0.
name: client name, default 'default'.
health_check_interval: how many seconds to check whether the redis server is healthy.
"""
self.client = StrictRedis.from_url(
url=url,
db=db,
client_name=name,
health_check_interval=health_check_interval,
decode_responses=True
)
if __name__ == '__main__':
_client = SentinelClient(url='redis://localhost:26379')
_pub_sub = _client.client.pubsub()
_pub_sub.psubscribe('*')
for i in _pub_sub.listen():
print(i)
| 2.90625 | 3 |
pymaster/field.py | LSSTDESC/NaMaster | 37 | 12796871 | <reponame>LSSTDESC/NaMaster
from pymaster import nmtlib as lib
import numpy as np
from pymaster.utils import NmtWCSTranslator
class NmtField(object):
"""
An NmtField object contains all the information describing the fields to
correlate, including their observed maps, masks and contaminant templates.
:param mask: array containing a map corresponding to the field's mask. \
Should be 1-dimensional for a HEALPix map or 2-dimensional for a map \
with rectangular pixelization.
:param maps: array containing the observed maps for this field. Should be \
at least 2-dimensional. The first dimension corresponds to the number \
of maps, which should be 1 for a spin-0 field and 2 otherwise. \
The other dimensions should be [npix] for HEALPix maps or \
[ny,nx] for maps with rectangular pixels. For a spin>0 field, the two \
maps to pass should be the usual Q/U Stokes parameters for \
polarization, or e1/e2 (gamma1/gamma2 etc.) in the case of cosmic \
shear. It is important to note that NaMaster uses the same \
polarization convention as HEALPix (i.e. with the x-coordinate \
growing with increasing colatitude theta). It is however more common \
for galaxy ellipticities to be provided using the IAU convention \
(i.e. x grows with declination). In this case, the sign of the \
e2/gamma2 map should be swapped before using it to create an \
NmtField. See more \
`here <https://healpix.jpl.nasa.gov/html/intronode12.htm>`_ . \
If `None`, this field will only contain a mask but no maps. The field \
can then be used to compute a mode-coupling matrix, for instance, \
but not actual power spectra.
:param spin: field's spin. If `None` it will be set to 0 if there is
a single map on input, and will default to 2 if there are 2 maps.
:param templates: array containing a set of contaminant templates for \
this field. This array should have shape [ntemp][nmap]..., where \
ntemp is the number of templates, nmap should be 1 for spin-0 fields \
and 2 otherwise. The other dimensions should be [npix] for \
HEALPix maps or [ny,nx] for maps with rectangular pixels. The \
best-fit contribution from each contaminant is automatically removed \
from the maps unless templates=None.
:param beam: spherical harmonic transform of the instrumental beam \
(assumed to be rotationally symmetric - i.e. no m dependence). If \
None, no beam will be corrected for. Otherwise, this array should \
have at least as many elements as the maximum multipole sampled by \
the maps + 1 (e.g. if a HEALPix map, it should contain 3*nside \
elements, corresponding to multipoles from 0 to 3*nside-1).
:param purify_e: use pure E-modes?
:param purify_b: use pure B-modes?
:param n_iter_mask_purify: number of iterations used to compute an \
accurate SHT of the mask when using E/B purification.
:param tol_pinv: when computing the pseudo-inverse of the contaminant \
covariance matrix, all eigenvalues below tol_pinv * max_eval will be \
treated as singular values, where max_eval is the largest eigenvalue. \
Only relevant if passing contaminant templates that are likely to be \
highly correlated.
:param wcs: a WCS object if using rectangular pixels (see \
http://docs.astropy.org/en/stable/wcs/index.html).
:param n_iter: number of iterations when computing a_lms.
:param lmax_sht: maximum multipole up to which map power spectra will be \
computed. If negative or zero, the maximum multipole given the map \
resolution will be used (e.g. 3 * nside - 1 for HEALPix maps).
:param masked_on_input: set to `True` if input maps and templates are \
already multiplied by the masks. Note that this is not advisable \
if you're using purification.
:param lite: set to `True` if you want to only store the bare minimum \
necessary to run a standard pseudo-Cl with deprojection and \
purification, but you don't care about deprojection bias. This \
will reduce the memory taken up by the resulting object.
"""
def __init__(self, mask, maps, spin=None, templates=None, beam=None,
purify_e=False, purify_b=False, n_iter_mask_purify=3,
tol_pinv=1E-10, wcs=None, n_iter=3, lmax_sht=-1,
masked_on_input=False, lite=False):
self.fl = None
pure_e = 0
if purify_e:
pure_e = 1
pure_b = 0
if purify_b:
pure_b = 1
masked_input = 0
if masked_on_input:
masked_input = 1
wt = NmtWCSTranslator(wcs, mask.shape)
if wt.is_healpix == 0:
if wt.flip_th:
mask = mask[::-1, :]
if wt.flip_ph:
mask = mask[:, ::-1]
mask = mask.reshape(wt.npix)
if maps is None:
mask_only = True
if spin is None:
raise ValueError("Please supply field spin")
lite = True
else:
mask_only = False
if (len(maps) != 1) and (len(maps) != 2):
raise ValueError("Must supply 1 or 2 maps per field")
if spin is None:
if len(maps) == 1:
spin = 0
else:
spin = 2
else:
if (((spin != 0) and len(maps) == 1) or
((spin == 0) and len(maps) != 1)):
raise ValueError("Spin-zero fields are "
"associated with a single map")
if wt.is_healpix and (len(maps[0]) != len(mask)):
raise ValueError("All maps must have the same resolution")
if (pure_e or pure_b) and spin != 2:
raise ValueError("Purification only implemented for spin-2 fields")
# Flatten if 2D maps
if (not mask_only) and (wt.is_healpix == 0):
try:
maps = np.array(maps)
if wt.flip_th:
maps = maps[:, ::-1, :]
if wt.flip_ph:
maps = maps[:, :, ::-1]
maps = maps.reshape([len(maps), wt.npix])
except (IndexError, ValueError):
raise ValueError("Input maps have the wrong shape")
if isinstance(templates, (list, tuple, np.ndarray)):
ntemp = len(templates)
if (len(templates[0]) != 1) and (len(templates[0]) != 2):
raise ValueError("Must supply 1 or 2 maps per field")
if wt.is_healpix == 0: # Flatten if 2D maps
try:
templates = np.array(templates)
if wt.flip_th:
templates = templates[:, :, ::-1, :]
if wt.flip_ph:
templates = templates[:, :, :, ::-1]
templates = templates.reshape([ntemp, len(maps), wt.npix])
except (IndexError, ValueError):
raise ValueError("Input templates have the wrong shape")
if len(templates[0][0]) != len(mask):
raise ValueError("All maps must have the same resolution")
else:
if templates is not None:
raise ValueError("Input templates can only be an array "
"or None\n")
if lmax_sht > 0:
lmax = lmax_sht
else:
lmax = wt.get_lmax()
if isinstance(beam, (list, tuple, np.ndarray)):
if len(beam) <= lmax:
raise ValueError("Input beam must have at least %d elements "
"given the input map resolution" % (lmax))
beam_use = beam
else:
if beam is None:
beam_use = np.ones(lmax+1)
else:
raise ValueError("Input beam can only be an array or None\n")
if mask_only:
self.fl = lib.field_alloc_empty(wt.is_healpix, wt.nside, lmax_sht,
wt.nx, wt.ny,
wt.d_phi, wt.d_theta,
wt.phi0, wt.theta_max, spin,
mask, beam_use, pure_e, pure_b,
n_iter_mask_purify)
else:
if isinstance(templates, (list, tuple, np.ndarray)):
self.fl = lib.field_alloc_new(wt.is_healpix, wt.nside,
lmax_sht, wt.nx, wt.ny,
wt.d_phi, wt.d_theta,
wt.phi0, wt.theta_max, spin,
mask, maps, templates, beam_use,
pure_e, pure_b,
n_iter_mask_purify,
tol_pinv, n_iter, masked_input,
int(lite))
else:
self.fl = lib.field_alloc_new_notemp(
wt.is_healpix, wt.nside, lmax_sht, wt.nx, wt.ny, wt.d_phi,
wt.d_theta, wt.phi0, wt.theta_max, spin,
mask, maps, beam_use, pure_e, pure_b, n_iter_mask_purify,
n_iter, masked_input, int(lite))
self.lite = lite
def __del__(self):
if self.fl is not None:
if lib.field_free is not None:
lib.field_free(self.fl)
self.fl = None
def get_mask(self):
"""
Returns this field's mask as a 1D array.
:return: mask
"""
return lib.get_mask(self.fl, int(self.fl.npix))
def get_maps(self):
"""
Returns a 2D array ([nmap][npix]) corresponding to the observed maps \
for this field. If the field was initialized with contaminant \
templates, the maps returned by this function have their best-fit \
contribution from these contaminants removed.
:return: 2D array of maps
"""
if self.lite:
raise ValueError("Input maps unavailable for lightweight fields")
maps = np.zeros([self.fl.nmaps, self.fl.npix])
for imap in range(self.fl.nmaps):
maps[imap, :] = lib.get_map(self.fl, imap, int(self.fl.npix))
else:
mps = maps
return mps
def get_alms(self):
"""
Returns a 2D array ([nmap][nlm]) corresponding to the observed \
harmonic coefficients of this field.
:return: 2D array of alms
"""
if self.lite:
raise ValueError("Alms unavailable for lightweight fields")
alms = []
for imap in range(self.fl.nmaps):
alms.append(lib.get_alms(self.fl, imap, int(2*self.fl.nalms)))
alms = np.array(alms).reshape([self.fl.nmaps, self.fl.nalms, 2])
alms = alms[:, :, 0] + 1j * alms[:, :, 1]
return alms
def get_templates(self):
"""
Returns a 3D array ([ntemp][nmap][npix]) corresponding to the \
contaminant templates passed when initializing this field.
:return: 3D array of maps
"""
if self.lite:
raise ValueError("Input maps unavailable for lightweight fields")
temp = np.zeros([self.fl.ntemp, self.fl.nmaps, self.fl.npix])
for itemp in range(self.fl.ntemp):
for imap in range(self.fl.nmaps):
temp[itemp, imap, :] = lib.get_temp(
self.fl, itemp, imap, int(self.fl.npix)
)
else:
tmps = temp
return tmps
class NmtFieldFlat(object):
"""
An NmtFieldFlat object contains all the information describing the \
flat-sky fields to correlate, including their observed maps, masks \
and contaminant templates.
:param float lx,ly: size of the patch in the x and y directions (in \
radians)
:param mask: 2D array (nx,ny) containing a HEALPix map corresponding \
to the field's mask.
:param maps: 2 2D arrays (nmaps,nx,ny) containing the observed maps \
for this field. The first dimension corresponds to the number of \
maps, which should be 1 for a spin-0 field and 2 otherwise. \
If `None`, this field will only contain a mask but no maps. The field \
can then be used to compute a mode-coupling matrix, for instance, \
but not actual power spectra.
:param spin: field's spin. If `None` it will be set to 0 if there is
a single map on input, and will default to 2 if there are 2 maps.
:param templates: array of maps (ntemp,nmaps,nx,ny) containing a set \
of contaminant templates for this field. This array should have \
shape [ntemp][nmap][nx][ny], where ntemp is the number of \
templates, nmap should be 1 for spin-0 fields and 2 for spin-2 \
fields, and nx,ny define the patch. The best-fit contribution \
from each contaminant is automatically removed from the maps \
unless templates=None
:param beam: 2D array (2,nl) defining the FT of the instrumental beam \
(assumed to be rotationally symmetric). beam[0] should contain \
the values of l for which de beam is defined, with beam[1] \
containing the beam values. If None, no beam will be corrected \
for.
:param purify_e: use pure E-modes?
:param purify_b: use pure B-modes?
:param tol_pinv: when computing the pseudo-inverse of the contaminant \
covariance matrix, all eigenvalues below tol_pinv * max_eval will \
be treated as singular values, where max_eval is the largest \
eigenvalue. Only relevant if passing contaminant templates that \
are likely to be highly correlated.
:param masked_on_input: set to `True` if input maps and templates are
already multiplied by the masks. Note that this is not advisable
if you're using purification.
:param lite: set to `True` if you want to only store the bare minimum \
necessary to run a standard pseudo-Cl with deprojection and \
purification, but you don't care about deprojection bias. This \
will reduce the memory taken up by the resulting object.
"""
def __init__(self, lx, ly, mask, maps, spin=None, templates=None,
beam=None, purify_e=False, purify_b=False,
tol_pinv=1E-10, masked_on_input=False, lite=False):
self.fl = None
pure_e = 0
if purify_e:
pure_e = 1
pure_b = 0
if purify_b:
pure_b = 1
masked_input = 0
if masked_on_input:
masked_input = 1
if (lx < 0) or (ly < 0):
raise ValueError("Must supply sensible dimensions for "
"flat-sky field")
# Flatten arrays and check dimensions
shape_2D = np.shape(mask)
self.ny = shape_2D[0]
self.nx = shape_2D[1]
if maps is None:
mask_only = True
if spin is None:
raise ValueError("Please supply field spin")
lite = True
else:
mask_only = False
nmaps = len(maps)
if (nmaps != 1) and (nmaps != 2):
raise ValueError("Must supply 1 or 2 maps per field")
if spin is None:
if nmaps == 1:
spin = 0
else:
spin = 2
else:
if (((spin != 0) and nmaps == 1) or
((spin == 0) and nmaps != 1)):
raise ValueError("Spin-zero fields are "
"associated with a single map")
if (pure_e or pure_b) and spin != 2:
raise ValueError("Purification only implemented for spin-2 fields")
# Flatten mask
msk = (mask.astype(np.float64)).flatten()
if (not mask_only):
# Flatten maps
mps = []
for m in maps:
if np.shape(m) != shape_2D:
raise ValueError("Mask and maps don't have the same shape")
mps.append((m.astype(np.float64)).flatten())
mps = np.array(mps)
# Flatten templates
if isinstance(templates, (list, tuple, np.ndarray)):
tmps = []
for t in templates:
tmp = []
if len(t) != nmaps:
raise ValueError("Maps and templates should have the "
"same number of maps")
for m in t:
if np.shape(m) != shape_2D:
raise ValueError("Mask and templates don't have "
"the same shape")
tmp.append((m.astype(np.float64)).flatten())
tmps.append(tmp)
tmps = np.array(tmps)
else:
if templates is not None:
raise ValueError("Input templates can only be an array "
"or None")
# Form beam
if isinstance(beam, (list, tuple, np.ndarray)):
beam_use = beam
else:
if beam is None:
beam_use = np.array([[-1.], [-1.]])
else:
raise ValueError("Input beam can only be an array or "
"None")
if mask_only:
self.fl = lib.field_alloc_empty_flat(self.nx, self.ny,
lx, ly, spin,
msk, beam_use,
pure_e, pure_b)
else:
# Generate field
if isinstance(templates, (list, tuple, np.ndarray)):
self.fl = lib.field_alloc_new_flat(self.nx, self.ny, lx, ly,
spin, msk, mps, tmps,
beam_use, pure_e, pure_b,
tol_pinv, masked_input,
int(lite))
else:
self.fl = lib.field_alloc_new_notemp_flat(self.nx, self.ny,
lx, ly, spin,
msk, mps,
beam_use, pure_e,
pure_b, masked_input,
int(lite))
self.lite = lite
def __del__(self):
if self.fl is not None:
if lib.field_flat_free is not None:
lib.field_flat_free(self.fl)
self.fl = None
def get_mask(self):
"""
Returns this field's mask as a 2D array ([ny][nx]).
:return: 2D mask.
"""
msk = lib.get_mask_flat(self.fl,
int(self.fl.npix)).reshape([self.ny,
self.nx])
return msk
def get_maps(self):
"""
Returns a 3D array ([nmap][ny][nx]) corresponding to the observed \
maps for this field. If the field was initialized with contaminant \
templates, the maps returned by this function have their best-fit \
contribution from these contaminants removed.
:return: 3D array of flat-sky maps
"""
if self.lite:
raise ValueError("Input maps unavailable for lightweight fields. "
"To use this function, create an `NmtFieldFlat` "
"object with `lite=False`.")
maps = np.zeros([self.fl.nmaps, self.fl.npix])
for imap in range(self.fl.nmaps):
maps[imap, :] = lib.get_map_flat(self.fl, imap, int(self.fl.npix))
mps = maps.reshape([self.fl.nmaps, self.ny, self.nx])
return mps
def get_templates(self):
"""
Returns a 4D array ([ntemp][nmap][ny][nx]) corresponding to the \
contaminant templates passed when initializing this field.
:return: 4D array of flat-sky maps
"""
if self.lite:
raise ValueError("Input maps unavailable for lightweight fields. "
"To use this function, create an `NmtFieldFlat` "
"object with `lite=False`.")
temp = np.zeros([self.fl.ntemp, self.fl.nmaps, self.fl.npix])
for itemp in range(self.fl.ntemp):
for imap in range(self.fl.nmaps):
temp[itemp, imap, :] = lib.get_temp_flat(
self.fl, itemp, imap, int(self.fl.npix)
)
tmps = temp.reshape([self.fl.ntemp, self.fl.nmaps, self.ny, self.nx])
return tmps
| 2.515625 | 3 |
data-structures/p482.py | sajjadt/competitive-programming | 10 | 12796872 | <filename>data-structures/p482.py
from os import linesep
num_tests = int(input())
for i in range(num_tests):
input()
perms = list(map(int, input().split()))
nums = list(map(str, input().split()))
out = [0] * len(perms)
for j in range(len(perms)):
out[perms[j]-1] = str(nums[j])
print(linesep.join(out))
if i != num_tests - 1:
print()
| 3.109375 | 3 |
gem/constants.py | praekeltfoundation/Molo-Merge-CMS-Scratchpad | 1 | 12796873 | from django.utils.translation import ugettext_lazy as _
MALE = "m"
FEMALE = "f"
UNSPECIFIED = "-"
GENDERS = {(MALE, _("male")),
(FEMALE, _("female")),
(UNSPECIFIED, _("don't want to answer"))}
| 1.9375 | 2 |
BaseTools/PythonLibrary/Uefi/Capsule/CatGenerator_test.py | StefMa/mu_basecore | 0 | 12796874 | import os
import logging
import unittest
from Uefi.Capsule.CatGenerator import *
#must run from build env or set PYTHONPATH env variable to point to the PythonLibrary folder
class CatGeneratorTest(unittest.TestCase):
def test_win10_OS(self):
o = CatGenerator("x64", "win10")
self.assertEqual(o.OperatingSystem, "10")
def test_10_OS(self):
o = CatGenerator("x64", "10")
self.assertEqual(o.OperatingSystem, "10")
def test_win10Server_OS(self):
o = CatGenerator("x64", "Server10")
self.assertEqual(o.OperatingSystem, "Server10")
def test_invalid_OS(self):
with self.assertRaises(ValueError):
CatGenerator("x64", "Invalid Junk")
def test_x64_arch(self):
o = CatGenerator("x64", "win10")
self.assertEqual(o.Arch, "X64")
def test_amd64_arch(self):
o = CatGenerator("amd64", "win10")
self.assertEqual(o.Arch, "X64")
def test_arm_arch(self):
o = CatGenerator("arm", "win10")
self.assertEqual(o.Arch, "ARM")
def test_arm64_arch(self):
o = CatGenerator("arm64", "win10")
self.assertEqual(o.Arch, "ARM64")
def test_aarch64_arch(self):
o = CatGenerator("aarch64", "win10")
self.assertEqual(o.Arch, "ARM64")
def test_invalid_arch(self):
with self.assertRaises(ValueError):
CatGenerator("Invalid Arch", "win10")
def test_invalid_pathtotool(self):
o = CatGenerator("amd64", "10")
with self.assertRaises(Exception) as cm:
o.MakeCat("garbage", os.path.join("c:", "test", "badpath", "inf2cat.exe"))
self.assertTrue(str(cm.exception).startswith("Can't find Inf2Cat on this machine."))
| 2.5 | 2 |
src/goa/utils.py | deeplego/goa | 0 | 12796875 | import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Union, TypeVar, Iterable, Dict
from goa import problems
T = TypeVar("T")
def plot_population(
problem: problems.BaseProblem,
X: Union[T, Iterable[T]],
ax: plt.Axes = None,
c: str = "darkblue",
linestyle: str = ":",
marker: str = "X",
markersize: int = 6,
markevery: int = 2,
antialiased: bool = True,
figsize: Tuple[float, float] = (12, 8),
kwargs: Dict = None,
) -> plt.Axes:
knobs = dict()
if kwargs is not None:
knobs.update(kwargs)
if not ax:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection="3d")
if X.shape == (2,):
X = [X]
for x, y in X:
ax.plot(
[x, x],
[y, y],
[problem(np.asarray([x, y])), 0],
c=c,
linestyle=linestyle,
marker=marker,
markersize=markersize,
markevery=markevery,
antialiased=antialiased,
**knobs
)
return ax
def root_mean_squared_error(
x: Union[float, np.ndarray], y: Union[float, np.ndarray]
) -> float:
return np.sqrt(np.mean(np.power(np.subtract(x, y), 2)))
def custom_init_view_function(
y: float = 20, x: float = 120, a: float = 30, b: float = 15
) -> Tuple[float, float]:
return a - np.cos(y) * b, x
| 2.890625 | 3 |
generate_crypto_key.py | erictapia/cryptography | 0 | 12796876 | # Standard library
import os
from pathlib import Path
# Third party
from cryptography.fernet import Fernet
from dotenv import load_dotenv
# Constants
CRYPTO_KEY_ENV_VAR = 'CRYPTO_KEY'
ENV_VAR_EXIST = f'The {CRYPTO_KEY_ENV_VAR} environment variable already exist. Cannot continue as the crypto key may still be in use.'
ENV_VAR_PATH = Path.home() / '.my_python_env'
# Load virtual environmental variables
load_dotenv(dotenv_path=ENV_VAR_PATH)
def environment_var_exist():
return os.environ.get(CRYPTO_KEY_ENV_VAR)
def generate_key():
return Fernet.generate_key()
def write_key_env_var(crypto_key):
# Only write if environmental variable does not exist.
# Otherwise raise an exception - environment variable already exists.
if not environment_var_exist():
with ENV_VAR_PATH.open(mode='w') as file:
file.write(f'{CRYPTO_KEY_ENV_VAR}={crypto_key}')
else:
raise Exception(ENV_VAR_EXIST)
if __name__ == '__main__':
crypto_key = generate_key().decode()
write_key_env_var(crypto_key)
| 2.578125 | 3 |
getelhid.py | dlindem/ahotsak-wikibase | 0 | 12796877 | import csv
import config
import awb
infilename = config.datafolder+'wikidata/wdlid_ElhId.csv'
resourceitem = "Q19" #Q19: Elhuyar
#positem = "Q8" # Q7: substantibo, Q8: aditza
with open(infilename, encoding="utf-8") as csvfile:
sourcedict = csv.DictReader(csvfile)
lex_elh = {}
for row in sourcedict:
lex_elh[row['lexemeId'].replace("http://www.wikidata.org/entity/","")] = row['ElhId']
for awbid in awb.wdmappings:
wdid = awb.wdmappings[awbid]
if awbid.startswith('L') and wdid in lex_elh:
wdstatement = awb.updateclaim(awbid, "P1", wdid, "string")
quali = awb.setqualifier(awbid, "P1", wdstatement, "P7", lex_elh[wdid], "string")
| 2.484375 | 2 |
tests/test_calculations.py | godzilla-but-nicer/cellularautomata | 0 | 12796878 | import numpy as np
from casim.calculations import word_entropy
def test_word_entropy():
test_arr = np.array([1, 0, 0, 1, 1, 0, 1, 0])
assert np.round(word_entropy(test_arr, 3), decimals=1) == 2.5
| 2.484375 | 2 |
mtp_api/apps/security/migrations/0018_auto_20181003_1117.py | ministryofjustice/mtp-api | 5 | 12796879 | <reponame>ministryofjustice/mtp-api
# Generated by Django 2.0.8 on 2018-10-03 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prison', '0016_auto_20171121_1110'),
('security', '0017_auto_20180914_1613'),
]
operations = [
migrations.AddField(
model_name='prisonerprofile',
name='disbursement_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='prisonerprofile',
name='disbursement_total',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='prisonerprofile',
name='recipients',
field=models.ManyToManyField(related_name='prisoners', to='security.RecipientProfile'),
),
migrations.AddField(
model_name='recipientprofile',
name='prisons',
field=models.ManyToManyField(related_name='recipients', to='prison.Prison'),
),
migrations.AlterField(
model_name='prisonerprofile',
name='prisoner_dob',
field=models.DateField(blank=True, null=True),
),
]
| 1.585938 | 2 |
main.py | LCRT215/Conways-Game-of-Life | 0 | 12796880 | <filename>main.py
import pygame
import sys
from game_window_class import *
WIDTH, HEIGHT = 800, 800
BACKGROUND = (33, 47, 60)
#initial game setup
def get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
def update():
game_window.update()
def draw():
window.fill(BACKGROUND)
game_window.draw()
pygame.init()
window = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
game_window = Game_window(window, 150, 180)
running = True
while running:
get_events()
update()
draw()
pygame.display.update()
clock.tick()
pygame.quit()
sys.exit()
| 3.25 | 3 |
mp_calc/app/serverlibrary.py | seancze/d2w_mini_project_mp_sort | 0 | 12796881 | <gh_stars>0
def mergesort(array, byfunc=None):
if len(array) > 1:
start, end = 0, len(array)
mid = len(array) // 2
left = array[start:mid]
right = array[mid:end]
i, j, k = 0, 0, 0
# Sort recursively (I.e. Starting from 2 elements)
mergesort(left, byfunc=byfunc)
mergesort(right, byfunc=byfunc)
# Add to array in-place
while i < len(left) and j < len(right):
if byfunc(left[i]) <= byfunc(right[j]):
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
k += 1
# Remaining elements
while i < len(left):
array[k] = left[i]
i += 1
k += 1
while j < len(right):
array[k] = right[j]
j += 1
k += 1
class Stack:
def __init__(self):
self._items = []
def push(self, item):
self._items.append(item)
def pop(self):
if len(self._items):
return self._items.pop()
def peek(self):
if len(self._items):
return self._items[-1]
@property
def is_empty(self):
return len(self._items) == 0
@property
def size(self):
return len(self._items)
class EvaluateExpression:
valid_char = '0123456789+-*/() '
valid_num = '0123456789'
valid_operator = '+-*/()'
def __init__(self, string=""):
self.expr = string
@property
def expression(self):
return self.expr
@expression.setter
def expression(self, new_expr):
for char in new_expr:
if char not in EvaluateExpression.valid_char:
self.expr = ""
return
self.expr = new_expr
def insert_space(self):
s = ""
for char in self.expr:
if char in EvaluateExpression.valid_operator:
s += f" {char} "
else:
s += char
return s
def process_operator(self, operand_stack, operator_stack):
a = operand_stack.pop()
operator = operator_stack.pop()
b = operand_stack.pop()
if operator == "+":
ans = b + a
elif operator == "-":
ans = b - a
elif operator == "*":
ans = b * a
elif operator == "/":
ans = b // a
operand_stack.push(ans)
def evaluate(self):
operand_stack = Stack()
operator_stack = Stack()
expression = self.insert_space()
tokens = expression.split()
for el in tokens:
try:
# Note: "10.5" will NOT be able to be converted into an int
my_int = int(el)
operand_stack.push(my_int)
except:
if el == "+" or el == "-":
while not operator_stack.is_empty and operator_stack.peek() not in "()":
self.process_operator(operand_stack, operator_stack)
operator_stack.push(el)
elif el == "*" or el == "/":
while operator_stack.peek() in "*/":
self.process_operator(operand_stack, operator_stack)
operator_stack.push(el)
elif el == "(":
operator_stack.push(el)
elif el == ")":
while operator_stack.peek() != "(":
self.process_operator(operand_stack, operator_stack)
while operator_stack.size > 0:
if operator_stack.peek() in "()":
operator_stack.pop()
continue
self.process_operator(operand_stack, operator_stack)
return operand_stack.peek()
def get_smallest_three(challenge):
records = challenge.records
times = [r for r in records]
mergesort(times, lambda x: x.elapsed_time)
return times[:3]
| 3.71875 | 4 |
seqseqpan/modifier.py | sekang2/seq-seq-pan | 0 | 12796882 | <reponame>sekang2/seq-seq-pan<gh_stars>0
import collections
import itertools
from operator import itemgetter
import math
from Bio import pairwise2
from seqseqpan.base import *
class Separator:
def separate_lcbs(self, alignment, length):
separated = Alignment(alignment.xmfa_file)
for nr, genome in alignment.genomes.items():
separated.add_genome(genome, nr)
for lcb in alignment.lcbs:
if lcb.length <= length:
for entry in lcb.entries:
seq = entry.sequence.replace("-", "")
new_entry = SequenceEntry(entry.genome_nr, entry.start, entry.end, entry.strand, seq)
separated.add_lcb_entries(new_entry)
else:
separated.add_lcb(lcb)
return separated
class Merger:
def merge_lcbs(self, alignment, consensus_genome_nr, new_genome_nr, block_length):
if len(alignment.genomes) > 2:
raise ConsensusXMFAInputError()
lcbs = alignment.get_sorted_lcbs(new_genome_nr)
# do not create small (less bp than blocklength) LCBs by splitting, but append/prepend sequence
merged_split_lcbs = []
for i in range(len(lcbs)):
lcb = lcbs[i]
new_entry = lcb.get_entry(new_genome_nr)
consensus_entry = lcb.get_entry(consensus_genome_nr)
prepend = False
append = False
use_prev = False
use_next = False
prev_gap = False
next_gap = False
to_reverse_complement = False
next_new_entry = None
prev_new_entry = None
# check if new entry is small and only created by splitting or aligning of new genome (consensus is None)
if consensus_entry is None and new_entry is not None and len(new_entry.sequence) <= block_length:
nr_gaps = len(new_entry.sequence)
# check if can be appended to previous entry and if that ends with a gap
if len(merged_split_lcbs) > 0:
prev_lcb = merged_split_lcbs[-1]
prev_new_entry = prev_lcb.get_entry(new_genome_nr)
if prev_new_entry is not None and (new_entry.start - prev_new_entry.end) == 1:
use_prev = True
if (prev_new_entry.strand == "+" and prev_new_entry.sequence[-1] == "-") \
or ( prev_new_entry.strand == "-" and prev_new_entry.sequence[0] == "-"):
prev_gap = True
# check if can be prepended to next entry and if that starts with a gap
if len(lcbs) > (i+1):
next_lcb = lcbs[i + 1]
next_new_entry = next_lcb.get_entry(new_genome_nr)
if next_new_entry is not None and (next_new_entry.start - new_entry.end) == 1:
use_next = True
if (next_new_entry.strand == "+" and next_new_entry.sequence[0] == "-")\
or (next_new_entry.strand == "-" and next_new_entry.sequence[-1] == "-"):
next_gap = True
neighbour_new_entry = None
# if both, choose the one with gap at start or end
if (not next_gap) and use_prev:
neighbour_new_entry = prev_new_entry
neighbour_lcb = prev_lcb
if neighbour_new_entry.strand == "+":
append = True
else:
prepend = True
elif use_next:
neighbour_new_entry = next_new_entry
neighbour_lcb = next_lcb
if neighbour_new_entry.strand == "+":
prepend = True
else:
append = True
if neighbour_new_entry is not None:
if new_entry.strand != neighbour_new_entry.strand:
to_reverse_complement = True
if append or prepend:
if to_reverse_complement:
new_entry.reverse_complement()
sequence = neighbour_new_entry.sequence
neighbour_consensus_entry = neighbour_lcb.get_entry(consensus_genome_nr)
neighbour_lcb.length += nr_gaps
neighbour_new_entry.end = max(new_entry.end, neighbour_new_entry.end)
neighbour_new_entry.start = min(new_entry.start, neighbour_new_entry.start)
if append:
neighbour_new_entry.sequence = sequence + new_entry.sequence
if neighbour_consensus_entry is not None:
neighbour_consensus_entry.sequence += ("-" * nr_gaps)
elif prepend:
neighbour_new_entry.sequence = new_entry.sequence + sequence
if neighbour_consensus_entry is not None:
neighbour_consensus_entry.sequence = ("-" * nr_gaps) + neighbour_consensus_entry.sequence
# entry should not be merged or could be neither appended nor prepended
# add LCBs to alignment as it is
else:
merged_split_lcbs.append(lcb)
merged = Alignment(alignment.xmfa_file)
for nr, genome in alignment.genomes.items():
merged.add_genome(genome, nr)
for lcb in merged_split_lcbs:
merged.add_lcb(lcb)
return merged
class Realigner:
# local realignment around overlapping or consecutive gaps in two sequences
# if border_aln_length is not None align only sequences at block border up to given length
def realign(self, alignment, processor, border_aln_length=0):
if len(alignment.genomes) > 2:
raise ConsensusXMFAInputError()
realigned = Alignment(alignment.xmfa_file)
for nr, genome in alignment.genomes.items():
realigned.add_genome(genome, nr)
# go through lcbs, skip one-entry ones
for lcb in alignment.get_sorted_lcbs(0):
if len(lcb.entries) == 1:
realigned.add_lcb(lcb)
else:
entry_one = lcb.entries[0]
entry_two = lcb.entries[1]
# get regions to realign
one_first_two_second = self._get_realign_regions(entry_one.gaps, entry_two.gaps)
if len(one_first_two_second) > 0:
seq_one, seq_two = self._realign(entry_one.sequence, entry_two.sequence, one_first_two_second,
processor, border_aln_length)
entry_one.sequence = seq_one
entry_two.sequence = seq_two
# get regions to realign for updated entries with second entry first
two_first_one_second = self._get_realign_regions(entry_two.gaps, entry_one.gaps)
if len(two_first_one_second) > 0:
seq_two, seq_one = self._realign(entry_two.sequence, entry_one.sequence, two_first_one_second,
processor, border_aln_length)
entry_one.sequence = seq_one
entry_two.sequence = seq_two
new_lcb = LCB()
new_lcb.add_entries([entry_one, entry_two])
realigned.add_lcb(new_lcb)
return realigned
def _get_realign_regions(self, gaps_for_start, gaps_for_end):
ends_dict = {end: start for start, end in gaps_for_end.items()}
location_list = [start for start, end in gaps_for_start.items()] + list(ends_dict.keys())
region_starts = [item for item, count in collections.Counter(location_list).items() if count > 1]
regions = []
for start in region_starts:
regions.append([(start, gaps_for_start[start]), (ends_dict[start], start)])
return regions
def _realign(self, seq_one, seq_two, realign_regions, processor, border_aln_length):
# if border_aln_length is not None align only sequences at block border up to given length
realign_regions = sorted(realign_regions)
index_offset = 0
for interval in realign_regions:
max_index, max_seq_length = max(
enumerate([interval[0][1] - interval[0][0], interval[1][1] - interval[1][0]]), key=lambda p: p[1])
# get length of gaps at start or end of block
short_border_intervals = [(i[1] - i[0]) <= border_aln_length for i in interval # check border length
if i[0] == 0 # start of block
or ((i[1] - index_offset) == len(seq_one)) # end of block
or ((i[1] - index_offset) == len(seq_two))] # end of block
interval_start = interval[max_index][0] - index_offset
interval_end = interval[max_index][1] - index_offset
# border_aln_length not set OR small sequence at start or end of block
if border_aln_length == 0 or any(short_border_intervals):
# check if interval only 'N' - if yes: do not realign
n_stretch = 'N' * max_seq_length
if not (seq_one[interval_start:interval_end] == n_stretch or
seq_two[interval_start:interval_end] == n_stretch):
max_seq_length = math.ceil(max_seq_length * 1.5)
# get surrounding sequences
seq_start = interval_start - max_seq_length
seq_end = interval_end + max_seq_length
# do not go over boundaries of sequences!
seq_start = max(seq_start, 0)
min_orig_seq_length = min(len(seq_one), len(seq_two))
seq_end = min(seq_end, min_orig_seq_length)
# N-stretches in sequences
# find N-stretch between start and interval and start sub-sequence after nearest stretch
n_stretch_length = 10
n_stretch = 'N' * n_stretch_length
n_stretch_idx = seq_one.rfind(n_stretch, seq_start, interval_start)
if n_stretch_idx > -1:
seq_start = max(seq_start, (n_stretch_idx + n_stretch_length))
n_stretch_idx = seq_two.rfind(n_stretch, seq_start, interval_start)
if n_stretch_idx > -1:
seq_start = max(seq_start, (n_stretch_idx + n_stretch_length))
# find N-stretch between interval and end and end sub-sequence before nearest stretch
n_stretch_idx_one = seq_one.find(n_stretch, interval_end, seq_end)
n_stretch_idx_two = seq_two.find(n_stretch, interval_end, seq_end)
seq_end = min(seq_end,
(n_stretch_idx_one if n_stretch_idx_one > -1 else seq_end),
(n_stretch_idx_two if n_stretch_idx_two > -1 else seq_end)
)
#if border_aln_length > 0:
# print(seq_one[seq_start:seq_end])
# print(seq_two[seq_start:seq_end])
seq_one_nogap = seq_one[seq_start:seq_end].replace("-", "")
seq_two_nogap = seq_two[seq_start:seq_end].replace("-", "")
if not (seq_one_nogap == '' or seq_two_nogap == ''): # else: do nothing for current interval
if (seq_end - seq_start) < 1000: #https://github.com/biopython/biopython/pull/782
alignments = pairwise2.align.globalxs(seq_one_nogap.upper(),
seq_two_nogap.upper(),
-0.5, -0.1, #default
one_alignment_only=True)
if len(alignments) > 0:
max_score = max([x[2] for x in alignments])
alignments = (lambda max_score=max_score: [item for item in alignments if item[2] == max_score])()
max_length = min([x[4] for x in alignments])
alignments = (lambda max_length=max_length: [item for item in alignments if item[4] == max_length])()
aln_seq_one = alignments[0][0]
aln_seq_two = alignments[0][1]
else:
# no alignment, do nothing for current interval
break
else:
# do external blat alignment
aln_seq_one, aln_seq_two = self._external_blat(seq_one_nogap, seq_two_nogap, processor)
if aln_seq_one is not None:
max_length = len(aln_seq_one)
else:
# no alignment, do nothing for current interval
break
if max_length > 0 and max_length < ( seq_end - seq_start ): # only use new alignment if better than old one
#if border_aln_length > 0:
# print(aln_seq_one)
# print(aln_seq_two)
seq_one = aln_seq_one.join([seq_one[:seq_start], seq_one[seq_end:]])
seq_two = aln_seq_two.join([seq_two[:seq_start], seq_two[seq_end:]])
index_offset += ((seq_end - seq_start) - max_length)
return seq_one, seq_two
def _external_blat(self, seq_one, seq_two, processor):
def _check_tuple_overlap(tuple_list):
num_tuple = len(tuple_list)
last_ok_end = -1
exclude_idx = []
#overlap_detected = False
for idx in range(num_tuple):
if tuple_list[idx][0] < last_ok_end: #current start smaller than last end -> exclude
exclude_idx.append(idx)
else:
last_ok_end = tuple_list[idx][1] # current tuple is ok -> store end
return exclude_idx
align_result = processor.external_blat(seq_one, seq_two)
if align_result is not None:
# seq_one equals hit seq_two equals query
# only one query and first hit has highest score per definition of blat
match = align_result[0][0]
# add starting sequences, in case query or hit do not start at "0"
seq_one_list = []
seq_two_list = []
if match.hit_start > 0:
seq_one_list.append(seq_one[0:match.hit_start])
seq_two_list.append("-" * match.hit_start)
if match.query_start > 0:
seq_one_list.append("-" * match.query_start)
seq_two_list.append(seq_two[0:match.query_start])
if match.is_fragmented:
# in really rare cases fragments are overlapping!
exclude_keys = _check_tuple_overlap(match.hit_range_all)
exclude_keys = exclude_keys + _check_tuple_overlap(match.query_range_all)
hspfrag_keys = list(set(range(len(match.hit_range_all))) - set(exclude_keys))
hspfrag_key_num = len(hspfrag_keys)
for hspfrag_idx in range(hspfrag_key_num):
hspfrag_key = hspfrag_keys[hspfrag_idx]
hspfrag = match[hspfrag_key]
seq_one_list.append(str(hspfrag.hit.seq))
seq_two_list.append(str(hspfrag.query.seq))
# add sequences between aligned intervals to sequences
if hspfrag_idx < (hspfrag_key_num - 1):
next_hspfrag = match[hspfrag_keys[hspfrag_idx + 1]]
inter_hit_len = next_hspfrag.hit_start - hspfrag.hit_end
if inter_hit_len > 0:
seq_one_list.append(seq_one[hspfrag.hit_end:next_hspfrag.hit_start])
seq_two_list.append("-" * inter_hit_len)
inter_query_len = next_hspfrag.query_start - hspfrag.query_end
if inter_query_len > 0:
seq_one_list.append("-" * inter_query_len)
seq_two_list.append(seq_two[hspfrag.query_end:next_hspfrag.query_start])
else:
seq_rec_one = match.aln[0]
if seq_rec_one.id == "seq1":
seq_one_list.append(str(match.aln[0].seq))
seq_two_list.append(str(match.aln[1].seq))
else:
seq_one_list.append(str(match.aln[1].seq))
seq_two_list.append(str(match.aln[0].seq))
# add last sequence parts if hit or query do not include sequence ends
if match.hit_end < len(seq_one):
seq_len = len(seq_one) - match.hit_end
seq_one_list.append(seq_one[match.hit_end:])
seq_two_list.append("-" * seq_len)
if match.query_end < len(seq_two):
seq_len = len(seq_two) - match.query_end
seq_one_list.append("-" * seq_len)
seq_two_list.append(seq_two[match.query_end:])
return "".join(seq_one_list).upper(), "".join(seq_two_list).upper()
else:
return None, None
class SingletonAligner:
def genome_count_split(self, alignment):
if len(alignment.genomes) > 2:
raise ConsensusXMFAInputError()
single_alignment_1 = Alignment(alignment.xmfa_file)
single_alignment_2 = Alignment(alignment.xmfa_file)
single_alignment_1.genomes[1] = alignment.genomes[1]
single_alignment_2.genomes[2] = alignment.genomes[2]
pairlcbs = []
for lcb in alignment.lcbs:
if len(lcb.entries) == 1:
if lcb.entries[0].strand == "-":
lcb.reverse_complement_entries()
if lcb.entries[0].genome_nr == 1:
single_alignment_1.add_lcb(lcb)
elif lcb.entries[0].genome_nr == 2:
single_alignment_2.add_lcb(lcb)
else:
pairlcbs.append(lcb)
alignment.lcbs = pairlcbs
return alignment, single_alignment_1, single_alignment_2
def join(self, alignment, alignment_two):
if len(alignment.genomes) > 2:
print("ERROR: I don't want to think about cases with more than 2 genomes now.")
genome_names_one = [genome.file_path for genome in alignment.genomes.values()]
genome_names_two = [genome.file_path for genome in alignment_two.genomes.values()]
if genome_names_one.sort() != genome_names_two.sort():
print("ERROR: Can only join alignments from same set of genomes.")
if alignment.genomes[1].file_path != alignment_two.genomes[1].file_path:
genome_nr_dict = {1: 2, 2: 1}
else:
genome_nr_dict = {1: 1, 2: 2}
for lcb in alignment_two.lcbs:
for entry in lcb.entries:
entry.genome_nr = genome_nr_dict[entry.genome_nr]
alignment.add_lcb(lcb)
return alignment
class Remover:
def remove(self, alignment, rm_genome):
if len(alignment.genomes) >= rm_genome > -1:
for lcb in alignment.lcbs:
entries = [entry for entry in lcb.entries if entry.genome_nr != rm_genome]
# did LCB include entry of genome to remove?
if len(entries) < len(lcb.entries):
# are there any entries left in LCB?
if len(entries) > 1:
# if more than one entry left search for gaps that are present in all remaining entries
rm_gaps = set(itertools.chain.from_iterable(
[list(range(k, entries[0].gaps[k])) for k in entries[0].gaps]))
for entry in entries[1:]:
rm_gaps &= set(
itertools.chain.from_iterable([list(range(k, entry.gaps[k])) for k in entry.gaps]))
rm_gaps = sorted(list(rm_gaps))
# make intervals of consecutive gap positions for faster join()
gap_ranges = []
for k, g in itertools.groupby(enumerate(rm_gaps), lambda x: x[0] - x[1]):
group = list(map(itemgetter(1), g))
gap_ranges.append((group[0], group[-1])) # tuples with intervals
if len(gap_ranges) > 0:
if gap_ranges[0][0] != 0:
gap_ranges = [(-1, -1)] + gap_ranges
if gap_ranges[-1] != lcb.length:
gap_ranges = gap_ranges + [(lcb.length, lcb.length)]
for entry in entries:
entry.sequence = ''.join(
[entry.sequence[(gap_ranges[i][1] + 1):gap_ranges[i + 1][0]] for i in
range(len(gap_ranges) - 1)])
if entry.genome_nr > rm_genome:
entry.genome_nr -= 1
# if no gaps found only reduce genome nr (avoid looping through entries twice if gaps present)
else:
for entry in entries:
if entry.genome_nr > rm_genome:
entry.genome_nr -= 1
elif len(entries) == 1: # if only one entry left replace all gaps in sequence
entries[0].sequence = entries[0].sequence.replace("-", "")
if entries[0].genome_nr > rm_genome:
entries[0].genome_nr -= 1
else:
for entry in entries:
if entry.genome_nr > rm_genome:
entry.genome_nr -= 1
lcb.entries[:] = entries
alignment.lcbs[:] = [lcb for lcb in alignment.lcbs if len(lcb.entries) > 0]
max_genome = len(alignment.genomes)
for nr in range(rm_genome + 1, max_genome + 1):
alignment.genomes[nr - 1] = alignment.genomes[nr]
del alignment.genomes[max_genome]
return alignment
else:
raise ParameterError("remove_genome", rm_genome,
"between 0 and " + str(len(alignment.genomes)) + " (number of genomes in XMFA)")
def merge(self, alignment):
for lcb in alignment.lcbs:
lcb.entries = sorted(lcb.entries, key=lambda entry: entry.genome_nr)
new_alignment = Alignment(alignment.xmfa_file)
for nr, genome in alignment.genomes.items():
new_alignment.add_genome(genome, nr)
for order in alignment.genomes:
if len(alignment.lcbs) > 1: # if more than one lcb is unchecked try to merge
alignment.lcbs = alignment.get_sorted_lcbs(order)
j = 0
if alignment.lcbs[0].get_entry(order) is not None:
new_alignment.add_lcb(alignment.lcbs[0])
for lcb in range(1, len(alignment.lcbs)):
j += 1
if alignment.lcbs[lcb].get_entry(order) is not None:
i = 0
if len(alignment.lcbs[lcb].entries) == len(alignment.lcbs[lcb - 1].entries):
strand = alignment.lcbs[lcb].entries[0].strand == alignment.lcbs[lcb - 1].entries[0].strand
for entry in range(0, len(alignment.lcbs[lcb].entries)):
if alignment.lcbs[lcb].entries[entry].genome_nr != alignment.lcbs[lcb - 1].entries[entry].genome_nr \
or alignment.lcbs[lcb].entries[entry].start - alignment.lcbs[lcb - 1].entries[entry].end != 1 \
or (alignment.lcbs[lcb].entries[entry].strand == alignment.lcbs[lcb - 1].entries[entry].strand) != strand:
# if an entry does not fulfill all conditions stop and do not merge this lcb
new_alignment.add_lcb(alignment.lcbs[lcb])
break
else:
i += 1
if i == len(alignment.lcbs[lcb].entries):
if not strand: # if all entries have an unequal strand reverse complement this lcb
alignment.lcbs[lcb].reverse_complement_entries()
new_alignment.lcbs[-1].length += alignment.lcbs[lcb].length
for pos in range(0, len(new_alignment.lcbs[-1].entries)):
new_alignment.lcbs[-1].entries[pos].sequence += alignment.lcbs[lcb].entries[pos].sequence
new_alignment.lcbs[-1].entries[pos].end = alignment.lcbs[lcb].entries[pos].end
else:
new_alignment.add_lcb(alignment.lcbs[lcb])
else:
break
alignment.lcbs[:] = alignment.lcbs[j:len(alignment.lcbs)] # continue with unchecked lcbs
# if only one lcb left check whether it is already checked or not
elif len(alignment.lcbs) == 1 and new_alignment.lcbs[-1].entries[0].genome_nr != alignment.lcbs[0].entries[0].genome_nr:
new_alignment.add_lcb(alignment.lcbs[0]) # if not checked yet add it as last lcb and finish
break
else:
break
return new_alignment
| 2.6875 | 3 |
generator/txmultivariablegenerator.py | zarppy/MUREIL_2014 | 0 | 12796883 | <filename>generator/txmultivariablegenerator.py
#
#
# Copyright (C) University of Melbourne 2013
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
"""Module for a variable generator using the txmultigeneratormultisite base class.
"""
from tools import configurablebase, mureilexception
from generator import txmultigeneratormultisite
from master import interfacesflowmaster
import copy
import numpy
import string
class TxMultiVariableGeneratorBase(txmultigeneratormultisite.TxMultiGeneratorMultiSite,
interfacesflowmaster.InterfaceSemiScheduledDispatch):
"""A simple implementation of a variable generator, providing
per-unit capital costs.
"""
def get_details(self):
"""Return a list of flags indicating the properties of the generator.
"""
flags = txmultigeneratormultisite.TxMultiGeneratorMultiSite.get_details(self)
flags['technology'] = self.config['tech_type']
flags['dispatch'] = 'semischeduled'
return flags
def get_config_spec(self):
"""Return a list of tuples of format (name, conversion function, default),
e.g. ('capex', float, 2.0). Put None if no conversion required, or if no
default value, e.g. ('name', None, None)
Configuration:
as for txmultigenerator.TxMultiGeneratorBase, plus:
tech_type: string - the generic technology type, to report in get_details() as technology.
detail_type: string - a specific name, e.g. 'onshore_wind_vic', for printing in an output string
data_name: string - the name of the data array holding the timeseries capacity factor data, e.g. ts_wind.
data_map_name: string - the name of the data array e.g. ts_wind_map which holds an n x 2 array
where n is the number of site indices mapped. The first in each pair is the site index and the second
the index into the data table. If this is not provided, a 1:1 is assumed.
data_ts_length: the length of the data timeseries, typically provided globally.
"""
return txmultigeneratormultisite.TxMultiGeneratorMultiSite.get_config_spec(self) + [
('tech_type', None, 'generic_variable'),
('detail_type', None, 'generic_variable'),
('data_name', None, None),
('data_map_name', None, ''),
('data_ts_length', int, None)
]
def get_data_types(self):
"""Return a list of keys for each type of
data required, for example ts_wind, ts_demand.
Outputs:
data_type: list of strings - each a key name
describing the data required for this generator.
"""
data_types = txmultigeneratormultisite.TxMultiGeneratorMultiSite.get_data_types(self)
data_types.append(self.config['data_name'])
if len(self.config['data_map_name']) > 0:
data_types.append(self.config['data_map_name'])
return data_types
def set_data(self, data):
"""Set the data dict with the data series required
for the generator.
Inputs:
data: dict - with keys matching those requested by
get_data_types.
"""
txmultigeneratormultisite.TxMultiGeneratorMultiSite.set_data(self, data)
self.data = data[self.config['data_name']]
self.site_to_data = {}
if len(self.config['data_map_name']) > 0:
map_data = data[self.config['data_map_name']]
for i in range(0, map_data.shape[0]):
self.site_to_data[map_data[i, 0]] = map_data[i, 1]
if len(self.params_to_site) == 0:
# We have a data map, but no params to site, so assume
# all data are used and map 1:1 to this.
self.params_to_site = map_data[:,0]
elif len(self.params_to_site) > 0:
# No data map is provided, but params_to_site is. If the
# lengths agree, map the site index list to the data 1:1.
if not (len(self.params_to_site) == self.data.shape[1]):
raise mureilexception.ConfigException('In model ' + self.config['section'] +
', no data map is provided, the data is width ' + str(self.data.shape[1]) +
' and the provided params_to_site list is ' + str(len(self.params_to_site)) +
' so no automatic mapping is possible.', {})
for i in range(len(self.params_to_site)):
self.site_to_data[self.params_to_site[i]] = i
else:
# No list of sites is provided. Just map to ordinal numbers.
self.params_to_site = range(self.data.shape[1])
for i in range(0, self.data.shape[1]):
self.site_to_data[i] = i
# Check that all of the values in site_to_data are within the
# self.data matrix.
max_data = self.data.shape[1]
for data_index in self.site_to_data.itervalues():
if (data_index < 0) or (data_index >= max_data):
raise mureilexception.ConfigException('data_index ' + str(data_index) +
' was requested by the model in section ' + self.config['section'] +
' but the maximum index in the data array is ' + str(max_data), {})
def calculate_dispatch_offer(self, period, param=None):
"""Calculate the dispatch offer as the SRMC. This is the VOM for variable generators.
"""
return self.period_configs[period]['vom']
def get_offers_semischeduled(self, state_handle, ts_length):
"""Get offers for this semi-scheduled generator.
Outputs:
site_indices: the identifying indices of each site with active capacity. All lists of
sites below will correspond with this list.
offer_price: the offer price, one per site (interpreted as same for all timesteps)
quantity: the offer quantity, one timeseries per site, in MW.
"""
offer_price = self.calculate_dispatch_offer(state_handle['curr_period'])
site_indices, quantity = self.calculate_outputs(state_handle, ts_length)
return site_indices, offer_price, quantity
def calculate_outputs(self, state_handle, ts_length):
"""Calculate the maximum outputs, before scheduling.
Inputs:
state_handle
ts_length: an integer - the length of the timeseries
Outputs:
site_indices: the list of sites with active capacity
output: a set of timeseries, corresponding to site_indices
"""
cap_list = state_handle['capacity']
site_indices = self.get_site_indices(state_handle)
num_sites = len(site_indices)
output = numpy.zeros((num_sites, ts_length))
for i in range(num_sites):
site = site_indices[i]
total_cap = sum([tup[0] for tup in cap_list[site]])
data_index = self.site_to_data[site]
### TODO - it may be expensive to have self.data this way -
### would it help to transpose it when it's read in, so
### the memory lines up better?
output[i,:] = self.data[:,data_index] * total_cap
return site_indices, output
def calculate_variable_costs(self, state_handle, site_indices, schedule):
"""Calculate variable costs and carbon based on schedule.
Inputs:
state_handle
site_indices
schedule: The scheduled output, a set of timeseries
Outputs:
variable_cost, carbon, other
"""
vom = self.period_configs[state_handle['curr_period']]['vom'] * 1e-6
num_sites = len(site_indices)
vble_cost = numpy.zeros(num_sites)
carbon = numpy.zeros(num_sites)
for i in range(num_sites):
vble_cost[i] = numpy.sum(schedule[i,:]) * vom
return vble_cost, carbon, {}
def calculate_outputs_and_costs(self, state_handle, supply_request, max_supply=[], price=[]):
"""Implement calculate_outputs_and_costs as defined in TxMultiGeneratorBase, for the
variable generators.
"""
site_indices, supply = self.calculate_outputs(state_handle, len(supply_request))
vble_cost, carbon, other = self.calculate_variable_costs(state_handle, site_indices, supply)
return supply, vble_cost, carbon, other
def get_simple_desc_string(self, results, state_handle):
"""Implement get_simple_desc_string as defined in TxMultiGeneratorBase, for the
variable generator.
"""
return self.config['detail_type'] + ' with site capacities (MW): ' + (
string.join(map('({:d}: {:.2f}) '.format, results['site_indices'],
results['capacity'])))
def get_full_desc_string(self, results, state_handle):
"""Implement get_full_desc_string as defined in TxMultiGeneratorBase, for the
variable generator.
"""
return self.get_simple_desc_string(results, state_handle)
| 2.078125 | 2 |
backend/app/controllers/interval_controller.py | codestrange/calendario-matcom | 8 | 12796884 | from flask import jsonify
from . import api
from ..database import Interval
@api.route('/intervals')
def get_intervals():
intervals = Interval.query.all()
return jsonify([{
'id': interval.id,
'name': interval.name,
'start': str(interval.start),
'end': str(interval.end)
} for interval in intervals])
| 2.640625 | 3 |
ImgEdit.py | yujiecong/PyQt-Digit-Image-Process | 3 | 12796885 | class Edit:
@staticmethod
| 1.25 | 1 |
tests/unit/asynchronous/test_expand_dict.py | GQMai/mbed-cloud-sdk-python | 12 | 12796886 | from mbed_cloud.subscribe.subscribe import expand_dict_as_keys
from tests.common import BaseCase
class Test(BaseCase):
def test_empty(self):
self.assertEqual(expand_dict_as_keys(dict()), [
tuple()
])
def test_static(self):
self.assertEqual(expand_dict_as_keys(dict(a=1, b=2)), [
tuple([('a', 1), ('b', 2)])
])
def test_static_sorted(self):
self.assertEqual(expand_dict_as_keys(dict(c=1, b=2)), [
tuple([('b', 2), ('c', 1)])
])
def test_expand(self):
self.assertEqual(expand_dict_as_keys(dict(a=1, b=[2, 3])), [
tuple([('a', 1), ('b', 2)]),
tuple([('a', 1), ('b', 3)]),
])
def test_product(self):
self.assertEqual(expand_dict_as_keys(dict(a=1, b=[2, 3], c=[4, 5])), [
tuple([('a', 1), ('b', 2), ('c', 4)]),
tuple([('a', 1), ('b', 2), ('c', 5)]),
tuple([('a', 1), ('b', 3), ('c', 4)]),
tuple([('a', 1), ('b', 3), ('c', 5)]),
])
def test_usage(self):
keys = set(expand_dict_as_keys(dict(a=1, b=[2, 3], c=[4, 5])))
self.assertIn(expand_dict_as_keys(dict(a=1, b=2, c=5))[0], keys)
self.assertNotIn(expand_dict_as_keys(dict(a=1, b=2, c=6))[0], keys)
| 2.296875 | 2 |
other/py/dmopc14c5p4.py | tylertian123/CompSciSolutions | 0 | 12796887 | n, m = input().split()
n = int(n)
m = int(m)
good = []
bad = []
for _ in range(n):
s, p = input().split()
if p == "1":
bad.append(int(s))
else:
good.append(int(s))
good.sort()
bad.sort()
prot = 0
space = 0
gi = 0
bi = 0
while space < m:
g = good[gi] if gi < len(good) else None
b = bad[bi] if bi < len(bad) else None
# Both good and bad left
if g is not None and b is not None:
# Can put good
if space + g <= m:
# Can't put bad or good is more efficient
if space + b > m or g / 2 < m:
prot += 2
space += g
gi += 1
# Can put bad and bad is more efficient
else:
prot += 1
space += b
bi += 1
# Can't put good but can put bad
elif space + b <= m:
prot += 1
space += b
bi += 1
# Can't put either one
else:
break
# Only good left
elif g is not None:
# Still room left
if space + g <= m:
prot += 2
space += g
gi += 1
else:
break
# Only bad left
elif b is not None:
if space + b <= m:
prot += 1
space += b
bi += 1
else:
break
# Nothing left
else:
break
print(prot)
| 3.28125 | 3 |
src/components/kankeiforms/__init__.py | BigJerBD/Kankei-Backend | 0 | 12796888 | <reponame>BigJerBD/Kankei-Backend<gh_stars>0
from components.kankeiforms.kankeiform import KankeiForm
def init(config):
KankeiForm.timeout = config.DB_TIMEOUT_SEC
from . import exploration
from . import comparison
from . import random
def get_kankeiforms(config):
init(config)
return KankeiForm.registry
def get_kankeiforms_dict(config):
"""
note:: currently simply forward query config, however it is not ideal
since we want to present information about `querying` and not the config
:return:
"""
init(config)
return {
grp: {name: content.asdict() for name, content in content.items()}
for grp, content in KankeiForm.registry.items()
}
| 2.3125 | 2 |
Roller.py | AndrewTruett/histogrammer | 0 | 12796889 | <gh_stars>0
from Roll import Roll
class Roller:
def __init__(self, user):
self._rolls = []
self._user = user
def add_roll(self, roll):
self._rolls.append(roll)
def clear_rolls(self):
self._rolls = []
@property
def rolls(self):
return self._rolls
@property
def user(self):
return self._user
| 2.75 | 3 |
openmm/plot_osmp.py | aakognole/osmp-calc | 0 | 12796890 | import numpy as np
import matplotlib.pyplot as plt
import glob
from sys import argv
from os.path import exists as file_exists
methods = ['drude', 'c36']
mol1, mol2 = str(argv[1]), str(argv[2])
sysname = mol1+'_'+mol2
def blockavg(x,nblocks=30):
lblock = int(len(x)/nblocks)
m = []
for i in range(nblocks):
start = i*lblock
end = (i+1)*lblock
m.append(np.mean(x[start:end]))
m = np.array(m)
return np.mean(m), np.std(m)
for method in methods:
dirs = sorted(glob.glob('%s_at_*'%(method)))
if len(dirs) == 0:
continue
print(method.upper(),':',mol1.upper(),'-',mol2.upper())
osmp = []
f = open('OSMP_%s_%s_%s.dat'%(mol1,mol2,method), 'w')
f.write('# %8s %10s %10s\n'%('Conc (M)','OsmP (bar)','Error'))
print('# %8s %10s %10s'%('Conc (M)','OsmP (bar)','Error'))
for d in dirs:
c = d.split("_")[2]
r1 = np.loadtxt('%s/osmp.%s_%s_%s.1.dat'%(d,mol1,mol2,c))
r2 = np.loadtxt('%s/osmp.%s_%s_%s.2.dat'%(d,mol1,mol2,c))
r3 = np.loadtxt('%s/osmp.%s_%s_%s.3.dat'%(d,mol1,mol2,c))
r = np.concatenate((r1,r2,r3))/100000.0
m,s = blockavg(r[:,1])
print("%10.1f %10.3f %10.3f"%(float(c),m,s))
f.write("%10.1f %10.3f %10.3f\n"%(float(c),m,s))
osmp.append((float(c),m,s))
osmp = np.array(osmp)
f.close()
# plot
plt.figure()
plt.title(method.upper()+': '+mol1.upper()+' - '+mol2.upper())
plt.errorbar(osmp[:,0],osmp[:,1],yerr=osmp[:,2],marker='o',markersize=5,capsize=3)
plt.xlabel('Concentration (M)')
plt.ylabel('Osmotic Pressure (bar)')
plt.tight_layout()
plt.savefig('OSMP_%s_%s_%s.png'%(mol1,mol2,method))
plt.close()
if file_exists('OSMP_%s_%s_drude.dat'%(mol1,mol2)) and file_exists('OSMP_%s_%s_c36.dat'%(mol1,mol2)):
osmp_drude = np.loadtxt('OSMP_%s_%s_drude.dat'%(mol1,mol2))
osmp_c36 = np.loadtxt('OSMP_%s_%s_c36.dat'%(mol1,mol2))
plt.figure()
plt.title(mol1.upper()+' - '+mol2.upper())
plt.errorbar(osmp_drude[:,0],osmp_drude[:,1],yerr=osmp_drude[:,2],marker='o',markersize=5,capsize=3,label='drude')
plt.errorbar(osmp_c36[:,0],osmp_c36[:,1],yerr=osmp_c36[:,2],marker='o',markersize=5,capsize=3,label='c36')
plt.xlabel('Concentration (M)')
plt.ylabel('Osmotic Pressure (bar)')
plt.legend()
plt.tight_layout()
plt.savefig('OSMP_%s_%s_both.png'%(mol1,mol2))
plt.close()
| 2.375 | 2 |
training/data_lib.py | vsewall/frame-interpolation | 521 | 12796891 | # Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset creation for frame interpolation."""
from typing import Callable, Dict, List, Optional
from absl import logging
import gin.tf
import tensorflow as tf
def _create_feature_map() -> Dict[str, tf.io.FixedLenFeature]:
"""Creates the feature map for extracting the frame triplet."""
feature_map = {
'frame_0/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_0/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_0/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_0/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_1/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_1/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_1/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_1/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_2/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_2/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_2/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_2/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'path':
tf.io.FixedLenFeature((), tf.string, default_value=''),
}
return feature_map
def _parse_example(sample):
"""Parses a serialized sample.
Args:
sample: A serialized tf.Example to be parsed.
Returns:
dictionary containing the following:
encoded_image
image_height
image_width
"""
feature_map = _create_feature_map()
features = tf.io.parse_single_example(sample, feature_map)
output_dict = {
'x0': tf.io.decode_image(features['frame_0/encoded'], dtype=tf.float32),
'x1': tf.io.decode_image(features['frame_2/encoded'], dtype=tf.float32),
'y': tf.io.decode_image(features['frame_1/encoded'], dtype=tf.float32),
# The fractional time value of frame_1 is not included in our tfrecords,
# but is always at 0.5. The model will expect this to be specificed, so
# we insert it here.
'time': 0.5,
# Store the original mid frame filepath for identifying examples.
'path': features['path'],
}
return output_dict
def _random_crop_images(crop_size: int, images: tf.Tensor,
total_channel_size: int) -> tf.Tensor:
"""Crops the tensor with random offset to the given size."""
if crop_size > 0:
crop_shape = tf.constant([crop_size, crop_size, total_channel_size])
images = tf.image.random_crop(images, crop_shape)
return images
def crop_example(example: tf.Tensor, crop_size: int,
crop_keys: Optional[List[str]] = None):
"""Random crops selected images in the example to given size and keys.
Args:
example: Input tensor representing images to be cropped.
crop_size: The size to crop images to. This value is used for both
height and width.
crop_keys: The images in the input example to crop.
Returns:
Example with cropping applied to selected images.
"""
if crop_keys is None:
crop_keys = ['x0', 'x1', 'y']
channels = [3, 3, 3]
# Stack images along channel axis, and perform a random crop once.
image_to_crop = [example[key] for key in crop_keys]
stacked_images = tf.concat(image_to_crop, axis=-1)
cropped_images = _random_crop_images(crop_size, stacked_images, sum(channels))
cropped_images = tf.split(
cropped_images, num_or_size_splits=channels, axis=-1)
for key, cropped_image in zip(crop_keys, cropped_images):
example[key] = cropped_image
return example
def apply_data_augmentation(
augmentation_fns: Dict[str, Callable[..., tf.Tensor]],
example: tf.Tensor,
augmentation_keys: Optional[List[str]] = None) -> tf.Tensor:
"""Applies random augmentation in succession to selected image keys.
Args:
augmentation_fns: A Dict of Callables to data augmentation functions.
example: Input tensor representing images to be augmented.
augmentation_keys: The images in the input example to augment.
Returns:
Example with augmentation applied to selected images.
"""
if augmentation_keys is None:
augmentation_keys = ['<KEY>']
# Apply each augmentation in sequence
augmented_images = {key: example[key] for key in augmentation_keys}
for augmentation_function in augmentation_fns.values():
augmented_images = augmentation_function(augmented_images)
for key in augmentation_keys:
example[key] = augmented_images[key]
return example
def _create_from_tfrecord(batch_size, file, augmentation_fns,
crop_size) -> tf.data.Dataset:
"""Creates a dataset from TFRecord."""
dataset = tf.data.TFRecordDataset(file)
dataset = dataset.map(
_parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Perform data_augmentation before cropping and batching
if augmentation_fns is not None:
dataset = dataset.map(
lambda x: apply_data_augmentation(augmentation_fns, x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if crop_size > 0:
dataset = dataset.map(
lambda x: crop_example(x, crop_size=crop_size),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
def _generate_sharded_filenames(filename: str) -> List[str]:
"""Generates filenames of the each file in the sharded filepath.
Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py.
Args:
filename: The sharded filepath.
Returns:
A list of filepaths for each file in the shard.
"""
base, count = filename.split('@')
count = int(count)
return ['{}-{:05d}-of-{:05d}'.format(base, i, count) for i in range(count)]
def _create_from_sharded_tfrecord(batch_size,
train_mode,
file,
augmentation_fns,
crop_size,
max_examples=-1) -> tf.data.Dataset:
"""Creates a dataset from a sharded tfrecord."""
dataset = tf.data.Dataset.from_tensor_slices(
_generate_sharded_filenames(file))
# pylint: disable=g-long-lambda
dataset = dataset.interleave(
lambda x: _create_from_tfrecord(
batch_size,
file=x,
augmentation_fns=augmentation_fns,
crop_size=crop_size),
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=not train_mode)
# pylint: enable=g-long-lambda
dataset = dataset.prefetch(buffer_size=2)
if max_examples > 0:
return dataset.take(max_examples)
return dataset
@gin.configurable('training_dataset')
def create_training_dataset(
batch_size: int,
file: Optional[str] = None,
files: Optional[List[str]] = None,
crop_size: int = -1,
crop_sizes: Optional[List[int]] = None,
augmentation_fns: Optional[Dict[str, Callable[..., tf.Tensor]]] = None
) -> tf.data.Dataset:
"""Creates the training dataset.
The given tfrecord should contain data in a format produced by
frame_interpolation/datasets/create_*_tfrecord.py
Args:
batch_size: The number of images to batch per example.
file: (deprecated) A path to a sharded tfrecord in <tfrecord>@N format.
Deprecated. Use 'files' instead.
files: A list of paths to sharded tfrecords in <tfrecord>@N format.
crop_size: (deprecated) If > 0, images are cropped to crop_size x crop_size
using tensorflow's random cropping. Deprecated: use 'files' and
'crop_sizes' instead.
crop_sizes: List of crop sizes. If > 0, images are cropped to
crop_size x crop_size using tensorflow's random cropping.
augmentation_fns: A Dict of Callables to data augmentation functions.
Returns:
A tensorflow dataset for accessing examples that contain the input images
'x0', 'x1', ground truth 'y' and time of the ground truth 'time'=[0,1] in a
dictionary of tensors.
"""
if file:
logging.warning('gin-configurable training_dataset.file is deprecated. '
'Use training_dataset.files instead.')
return _create_from_sharded_tfrecord(batch_size, True, file,
augmentation_fns, crop_size)
else:
if not crop_sizes or len(crop_sizes) != len(files):
raise ValueError('Please pass crop_sizes[] with training_dataset.files.')
if crop_size > 0:
raise ValueError(
'crop_size should not be used with files[], use crop_sizes[] instead.'
)
tables = []
for file, crop_size in zip(files, crop_sizes):
tables.append(
_create_from_sharded_tfrecord(batch_size, True, file,
augmentation_fns, crop_size))
return tf.data.experimental.sample_from_datasets(tables)
@gin.configurable('eval_datasets')
def create_eval_datasets(batch_size: int,
files: List[str],
names: List[str],
crop_size: int = -1,
max_examples: int = -1) -> Dict[str, tf.data.Dataset]:
"""Creates the evaluation datasets.
As opposed to create_training_dataset this function makes sure that the
examples for each dataset are always read in a deterministic (same) order.
Each given tfrecord should contain data in a format produced by
frame_interpolation/datasets/create_*_tfrecord.py
The (batch_size, crop_size, max_examples) are specified for all eval datasets.
Args:
batch_size: The number of images to batch per example.
files: List of paths to a sharded tfrecord in <tfrecord>@N format.
names: List of names of eval datasets.
crop_size: If > 0, images are cropped to crop_size x crop_size using
tensorflow's random cropping.
max_examples: If > 0, truncate the dataset to 'max_examples' in length. This
can be useful for speeding up evaluation loop in case the tfrecord for the
evaluation set is very large.
Returns:
A dict of name to tensorflow dataset for accessing examples that contain the
input images 'x0', 'x1', ground truth 'y' and time of the ground truth
'time'=[0,1] in a dictionary of tensors.
"""
return {
name: _create_from_sharded_tfrecord(batch_size, False, file, None,
crop_size, max_examples)
for name, file in zip(names, files)
}
| 2.078125 | 2 |
7 kyu/Consecutive letters.py | mwk0408/codewars_solutions | 6 | 12796892 | def solve(s):
s=list(s)
s.sort()
s=''.join(s)
if len(s)==1:
return True
for i in range(1,len(s),1):
if ord(s[i])-ord(s[i-1])!=1:
return False
return True | 3.125 | 3 |
Homework/BAB_homework_task2.py | kc9jud/ShellCorretta | 0 | 12796893 | import matplotlib.pyplot as plt
import numpy as np
#returns the binding energy predicted by nuclear liquid drop model
def BE_liquidDrop(N,Z): #N=num of neutrons, Z=num of protons
#num of nucleons
A = N+Z
#physical constants (from Alex's notes, in MeV)
a1 = 15.49
a2 = 17.23
a3 = 0.697
a4 = 22.6
#nuclear liquid drop model
return a1*A - a2*A**(2./3) - a3*(Z**2)/(A*(1./3)) - a4*(N-Z)**2/A
#finds the neutron dripline
def findDripLine(Z):
#test statement for finding dripline
check = False
#start with symmetric nucleus
N=Z
#iterative search for dripline
while (check == False):
BE_i = BE_liquidDrop(N+1,Z)
BE_f = BE_liquidDrop(N,Z)
Q = BE_f-BE_i
if (Q>0):
return N
else:
N = N+1
def makeMatCores(Zrange):
Nstart = 0
Nrange = int(2.3*Zrange)
Zstart = 1
mat = np.zeros((Zrange-Zstart,Nrange-Nstart))
for Z in range(Zstart,Zrange):
for N in range(Nstart,Nrange):
BE_i_up = BE_liquidDrop(N+1,Z)
BE_f_up = BE_liquidDrop(N,Z)
Qup = BE_f_up-BE_i_up
BE_i_down = BE_liquidDrop(N+1,Z)
BE_f_down = BE_liquidDrop(N,Z)
Qdown = BE_f_down-BE_i_down
if (Q<0):
mat[Z-Zstart, N-Nstart] = 1
else:
mat[Z-Zstart, N-Nstart] = 0
return mat
#plt.matshow(makeMatCores(100,100))
#define range of Z's
Z_low = 2
Z_top = 150
mat = makeMatCores(Z_top)
img2 = plt.imshow(mat,interpolation='nearest',
origin='lower')
plt.show()
#interested in finding the neutron drip line for the range Z=36-44
#Z = range(Z_low, Z_top+1)
#N = []
#
#for z in Z:
# dripline = findDripLine(z)
# print "For", z,"protons, the neutron dripline is",dripline, "neutrons"
# N.append(dripline)
#
#mat = np.zeros((max(Z)+1,max(N)+1))
#
#for i in range(0,len(Z)):
# mat[Z[i],N[i]] = 1
#plt.matshow(mat)
#plt.show()
| 2.859375 | 3 |
src/DNAAnimBuilding.py | Toonerz/libdna | 1 | 12796894 | <reponame>Toonerz/libdna<filename>src/DNAAnimBuilding.py
from pandac.PandaModules import *
from panda3d.core import *
from DNAInteractiveProp import DNAInteractiveProp
class DNAAnimBuilding(DNAInteractiveProp):
def __init__(self):
DNAInteractiveProp.__init__(self)
self.anim = ''
def setAnim(self, anim):
self.anim = anim
def getAnim(self):
return self.anim | 1.75 | 2 |
tests/__init__.py | ymber/surface | 5 | 12796895 | """
Surface control station tests.
"""
| 0.890625 | 1 |
gcraft/application/wx.py | ddomurad/gcraft | 0 | 12796896 | <gh_stars>0
from OpenGL.GLUT import *
from gcraft.core.app import GCraftApp
from gcraft.core.input_event import InputEvent
import wx
from wx import glcanvas
class GCraftCanvas(wx.glcanvas.GLCanvas):
def __init__(self, parent: wx.Window, gc_app: GCraftApp):
wx.glcanvas.GLCanvas.__init__(self, parent, -1)
self._renderer = gc_app
self._renderer.swap_buffers = self.on_swap_buffers
self._renderer_inited = False
self._last_mouse_pos = None
self._context = wx.glcanvas.GLContext(self)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background_event)
self.Bind(wx.EVT_SIZE, self.on_resize_event)
self.Bind(wx.EVT_PAINT, self.on_paint_event)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down_event)
self.Bind(wx.EVT_KEY_UP, self.on_key_up_event)
def init(self):
glutInit()
self._renderer_inited = True
self._renderer.on_init()
self.resize()
def resize(self):
if self._renderer_inited:
size = self.GetClientSize()
self.SetCurrent(self._context)
self._renderer.on_reshape(size.width, size.height)
def render(self):
self._renderer.on_render()
self._renderer.input_state.clear_mouse_movement()
def on_swap_buffers(self):
self.SwapBuffers()
def on_erase_background_event(self, event):
pass # Do nothing, to avoid flashing on MSW.
def on_resize_event(self, event):
wx.CallAfter(self.resize)
event.Skip()
def on_paint_event(self, event):
self.SetCurrent(self._context)
if not self._renderer_inited:
self.init()
self.render()
self.Refresh(False)
def on_mouse_event(self, event):
if event.GetEventType() == wx.wxEVT_LEFT_DOWN:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=0, state=True)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
elif event.GetEventType() == wx.wxEVT_LEFT_UP:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=0, state=False)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
if event.GetEventType() == wx.wxEVT_RIGHT_DOWN:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=1, state=True)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
elif event.GetEventType() == wx.wxEVT_RIGHT_UP:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=1, state=False)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
elif event.GetEventType() == wx.wxEVT_MOTION:
if self._last_mouse_pos is None:
self._last_mouse_pos = [event.X, event.Y]
input_event = InputEvent(InputEvent.IE_MOUSE_MOVE, mouse_x=event.X, mouse_y=event.Y,
mouse_dx=self._last_mouse_pos[0] - event.X,
mouse_dy=self._last_mouse_pos[1] - event.Y)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
self._last_mouse_pos[0] = event.X
self._last_mouse_pos[1] = event.Y
def on_key_down_event(self, event):
input_event = InputEvent(InputEvent.IE_KEY_DOWN, mouse_x=event.Y, mouse_y=event.Y, key=event.GetKeyCode())
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
def on_key_up_event(self, event):
input_event = InputEvent(InputEvent.IE_KEY_UP, mouse_x=event.Y, mouse_y=event.Y, key=event.GetKeyCode())
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
class GCraftContinuousRenderer(wx.Timer):
def __init__(self, canvas: GCraftCanvas):
wx.Timer.__init__(self)
self.canvas = canvas
def start(self):
wx.Timer.Start(self, 10)
def stop(self):
wx.Timer.Stop(self)
def Notify(self):
self.canvas.Refresh(False)
class GCraftContinuousCanvas(GCraftCanvas):
def __init__(self, parent: wx.Window, gc_app: GCraftApp):
GCraftCanvas.__init__(self, parent, gc_app)
self.renderer_timer = GCraftContinuousRenderer(self)
def start(self):
self.renderer_timer.start()
def stop(self):
self.renderer_timer.stop()
| 1.984375 | 2 |
lr.py | xyk2000/LogisticsRegression | 1 | 12796897 | <reponame>xyk2000/LogisticsRegression<filename>lr.py
import numpy as np
np.random.seed(10)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class LogisticsReg():
def __init__(self,learn_rate=1e-3,niter=100000):
self.lr=learn_rate
self.iter=int(niter)
def fit(self,x,y):
x=np.insert(x, 0, values=1, axis=1)
w=np.random.uniform(size=(x.shape[1]))
for i in range(self.iter):
p = sigmoid(x.dot(w))
w -= self.lr * x.T.dot(p - y)
self.w=w
def predict(self,x):
x=np.insert(x, 0, values=1, axis=1)
return sigmoid(x.dot(self.w))
if __name__=='__main__':
data=np.loadtxt('data.csv',delimiter=',',skiprows=1)
np.random.shuffle(data)
x=data[:600,:-1]
y=data[:600,-1]
mean=np.mean(x,axis=0)
std=np.std(x,axis=0)
x=(x-mean)/std
x_val=data[600:,:-1]
y_val=data[600:,-1]
x_val=(x_val-mean)/std
model=LogisticsReg()
model.fit(x,y)
import sklearn.metrics
pred=model.predict(x_val)
print(sklearn.metrics.roc_auc_score(y_val,pred))
'''
with open('acc_thres.csv','w') as f:
for i in range(100):
pred=model.predict(x_val)>(i/100)
acc=sklearn.metrics.accuracy_score(y_val,pred)
f.write('%s,%s\n'%(i/100,acc))
'''
with open('acc_lr.csv','w') as f:
for i in range(-10,1):
model=LogisticsReg(10**i)
model.fit(x,y)
pred=model.predict(x_val)>0.5
acc=sklearn.metrics.accuracy_score(y_val,pred)
f.write('%s,%s\n'%(10**i,acc))
| 3.015625 | 3 |
kalamine/layout.py | qwerty-fr/kalamine | 0 | 12796898 | <reponame>qwerty-fr/kalamine
#!/usr/bin/env python3
import datetime
import os
import re
import sys
import yaml
from .template import xkb_keymap, \
osx_keymap, osx_actions, osx_terminators, \
klc_keymap, klc_deadkeys, klc_dk_index, \
web_keymap, web_deadkeys
from .utils import open_local_file, load_data, text_to_lines, lines_to_text, \
DEFAULT_DEAD_KEYS, ODK_ID
###
# Helpers
#
def upper_key(letter):
if len(letter) != 1: # dead key?
return ' '
customAlpha = {
'\u00df': '\u1e9e', # ß ẞ
'\u007c': '\u00a6', # | ¦
'\u003c': '\u2264', # < ≤
'\u003e': '\u2265', # > ≥
'\u2020': '\u2021', # † ‡
'\u2190': '\u21d0', # ← ⇐
'\u2191': '\u21d1', # ↑ ⇑
'\u2192': '\u21d2', # → ⇒
'\u2193': '\u21d3', # ↓ ⇓
'\u00b5': ' ', # µ (to avoid getting `Μ` as uppercase)
}
if letter in customAlpha:
return customAlpha[letter]
elif letter.upper() != letter.lower():
return letter.upper()
else:
return ' '
def substitute_lines(text, variable, lines):
prefix = 'KALAMINE::'
exp = re.compile('.*' + prefix + variable + '.*')
indent = ''
for line in text.split('\n'):
m = exp.match(line)
if m:
indent = m.group().split(prefix)[0]
break
return exp.sub(lines_to_text(lines, indent), text)
def substitute_token(text, token, value):
exp = re.compile('\\$\\{' + token + '(=[^\\}]*){0,1}\\}')
return exp.sub(value, text)
def load_tpl(layout, ext):
tpl = 'base'
if layout.has_altgr:
tpl = 'full'
if layout.has_1dk and ext.startswith('.xkb'):
tpl = 'full_1dk'
out = open_local_file(os.path.join('tpl', tpl + ext)).read()
out = substitute_lines(out, 'GEOMETRY_base', layout.base)
out = substitute_lines(out, 'GEOMETRY_full', layout.full)
out = substitute_lines(out, 'GEOMETRY_altgr', layout.altgr)
for key, value in layout.meta.items():
out = substitute_token(out, key, value)
return out
###
# Constants
#
CONFIG = {
'author': '<NAME>',
'license': 'WTFPL - Do What The Fuck You Want Public License',
'geometry': 'ISO'
}
SPACEBAR = {
'shift': " ", # U+0020 SPACE
'altgr': " ", # U+0020 SPACE
'altgr_shift': " ", # U+0020 SPACE
'1dk': "'", # U+0027 APOSTROPHE
'1dk_shift': "'" # U+0027 APOSTROPHE
}
GEOMETRY = load_data('geometry.yaml')
###
# Main
#
class KeyboardLayout:
""" Lafayette-style keyboard layout: base + 1dk + altgr layers. """
def __init__(self, filepath):
""" Import a keyboard layout to instanciate the object. """
# initialize a blank layout
self.layers = [{}, {}, {}, {}, {}, {}]
self.dead_keys = {} # dictionary subset of DEAD_KEYS
self.dk_index = [] # ordered keys of the above dictionary
self.meta = CONFIG.copy() # default parameters, hardcoded
self.has_altgr = False
self.has_1dk = False
# load the YAML data (and its ancessor, if any)
try:
cfg = yaml.load(open(filepath), Loader=yaml.SafeLoader)
if 'extends' in cfg:
path = os.path.join(os.path.dirname(filepath), cfg['extends'])
ext = yaml.load(open(path), Loader=yaml.SafeLoader)
ext.update(cfg)
cfg = ext
except Exception as e:
print('File could not be parsed.')
print('Error: {}.'.format(e))
sys.exit(1)
# metadata: self.meta
for k in cfg:
if k not in ['base', 'full', 'altgr', 'spacebar', 'deadkeys']:
self.meta[k] = cfg[k]
filename = os.path.splitext(os.path.basename(filepath))[0]
self.meta['name'] = cfg['name'] if 'name' in cfg else filename
self.meta['name8'] = cfg['name8'] if 'name8' in cfg \
else self.meta['name'][0:8]
self.meta['fileName'] = self.meta['name8'].lower()
self.meta['lastChange'] = datetime.date.today().isoformat()
dead_keys = {}
if 'deadkeys' in cfg:
dead_keys = cfg['deadkeys']
for dk in DEFAULT_DEAD_KEYS:
found = any((d for d in dead_keys if d['char'] == dk['char']))
if not found:
dead_keys.append(dk)
# keyboard layers: self.layers & self.dead_keys
rows = GEOMETRY[self.meta['geometry']]['rows']
if 'full' in cfg:
full = text_to_lines(cfg['full'])
self._parse_template(full, dead_keys, rows, 0)
self._parse_template(full, dead_keys, rows, 4)
self.has_altgr = True
else:
base = text_to_lines(cfg['base'])
self._parse_template(base, dead_keys, rows, 0)
self._parse_template(base, dead_keys, rows, 2)
if 'altgr' in cfg:
self.has_altgr = True
self._parse_template(text_to_lines(cfg['altgr']), dead_keys, rows, 4)
# space bar
spc = SPACEBAR.copy()
if 'spacebar' in cfg:
for k in cfg['spacebar']:
spc[k] = cfg['spacebar'][k]
self.layers[0]['spce'] = ' '
self.layers[1]['spce'] = spc['shift']
self.layers[2]['spce'] = spc['1dk']
self.layers[3]['spce'] = spc['shift_1dk'] if 'shift_1dk' in spc \
else spc['1dk']
if self.has_altgr:
self.layers[4]['spce'] = spc['altgr']
self.layers[5]['spce'] = spc['altgr_shift']
# active dead keys: self.dk_index
for dk in dead_keys:
if dk['char'] in self.dead_keys:
self.dk_index.append(dk['char'])
# remove unused characters in self.dead_keys[].{base,alt}
def layer_has_char(char, layer_index):
for id in self.layers[layer_index]:
if self.layers[layer_index][id] == char:
return True
return False
for dk_id in self.dead_keys:
base = self.dead_keys[dk_id]['base']
alt = self.dead_keys[dk_id]['alt']
used_base = ''
used_alt = ''
for i in range(len(base)):
if layer_has_char(base[i], 0) or layer_has_char(base[i], 1):
used_base += base[i]
used_alt += alt[i]
self.dead_keys[dk_id]['base'] = used_base
self.dead_keys[dk_id]['alt'] = used_alt
# 1dk behavior
if ODK_ID in self.dead_keys:
self.has_1dk = True
odk = self.dead_keys[ODK_ID]
# alt_self (double-press), alt_space (1dk+space)
odk['alt_space'] = spc['1dk']
for key in self.layers[0]:
if self.layers[0][key] == ODK_ID:
odk['alt_self'] = self.layers[2][key]
break
# copy the 2nd and 3rd layers to the dead key
for i in [0, 1]:
for (name, alt_char) in self.layers[i + 2].items():
base_char = self.layers[i][name]
if name != 'spce' and base_char != ODK_ID:
odk['base'] += base_char
odk['alt'] += alt_char
def _parse_template(self, template, dead_keys, rows, layerNumber):
""" Extract a keyboard layer from a template. """
if layerNumber == 0: # base layer
colOffset = 0
else: # AltGr or 1dk
colOffset = 2
j = 0
for row in rows:
i = row['offset'] + colOffset
keys = row['keys']
base = list(template[2 + j * 3])
shift = list(template[1 + j * 3])
for key in keys:
baseKey = ('*' if base[i - 1] == '*' else '') + base[i]
shiftKey = ('*' if shift[i - 1] == '*' else '') + shift[i]
if layerNumber == 0 and baseKey == ' ': # 'shift' prevails
baseKey = shiftKey.lower()
if layerNumber != 0 and shiftKey == ' ':
shiftKey = upper_key(baseKey)
if baseKey != ' ':
self.layers[layerNumber + 0][key] = baseKey
if shiftKey != ' ':
self.layers[layerNumber + 1][key] = shiftKey
for dk in dead_keys:
if baseKey == dk['char']:
self.dead_keys[baseKey] = dk.copy()
if shiftKey == dk['char']:
self.dead_keys[shiftKey] = dk.copy()
i += 6
j += 1
###
# Geometry: base, full, altgr
#
def _fill_template(self, template, rows, layerNumber):
""" Fill a template with a keyboard layer. """
if layerNumber == 0: # base layer
colOffset = 0
shiftPrevails = True
else: # AltGr or 1dk
colOffset = 2
shiftPrevails = False
j = 0
for row in rows:
i = row['offset'] + colOffset
keys = row['keys']
base = list(template[2 + j * 3])
shift = list(template[1 + j * 3])
for key in keys:
baseKey = ' '
if key in self.layers[layerNumber]:
baseKey = self.layers[layerNumber][key]
shiftKey = ' '
if key in self.layers[layerNumber + 1]:
shiftKey = self.layers[layerNumber + 1][key]
dead_base = len(baseKey) == 2 and baseKey[0] == '*'
dead_shift = len(shiftKey) == 2 and shiftKey[0] == '*'
if shiftPrevails:
shift[i] = shiftKey[-1]
if dead_shift:
shift[i-1] = '*'
if upper_key(baseKey) != shiftKey:
base[i] = baseKey[-1]
if dead_base:
base[i-1] = '*'
else:
base[i] = baseKey[-1]
if dead_base:
base[i-1] = '*'
if upper_key(baseKey) != shiftKey:
shift[i] = shiftKey[-1]
if dead_shift:
shift[i-1] = '*'
i += 6
template[2 + j * 3] = ''.join(base)
template[1 + j * 3] = ''.join(shift)
j += 1
return template
def _get_geometry(self, layers=[0], name='ISO'):
""" `geometry` view of the requested layers. """
rows = GEOMETRY[name]['rows']
template = GEOMETRY[name]['template'].split('\n')[:-1]
for i in layers:
template = self._fill_template(template, rows, i)
return template
@property
def base(self):
return self._get_geometry([0, 2]) # base + 1dk
@property
def full(self):
return self._get_geometry([0, 4]) # base + altgr
@property
def altgr(self):
return self._get_geometry([4]) # altgr only
###
# OS-specific drivers: keylayout, klc, xkb, xkb_patch
#
@property
def keylayout(self):
""" Mac OSX driver """
out = load_tpl(self, '.keylayout')
for i, layer in enumerate(osx_keymap(self)):
out = substitute_lines(out, 'LAYER_' + str(i), layer)
out = substitute_lines(out, 'ACTIONS', osx_actions(self))
out = substitute_lines(out, 'TERMINATORS', osx_terminators(self))
return out
@property
def klc(self):
""" Windows driver (warning: requires CR/LF + UTF16LE encoding) """
out = load_tpl(self, '.klc')
out = substitute_lines(out, 'LAYOUT', klc_keymap(self))
out = substitute_lines(out, 'DEAD_KEYS', klc_deadkeys(self))
out = substitute_lines(out, 'DEAD_KEY_INDEX', klc_dk_index(self))
out = substitute_token(out, 'encoding', 'utf-16le')
return out
@property
def xkb(self): # will not work with Wayland
""" GNU/Linux driver (standalone / user-space) """
out = load_tpl(self, '.xkb')
out = substitute_lines(out, 'LAYOUT', xkb_keymap(self, False))
return out
@property
def xkb_patch(self):
""" GNU/Linux driver (system patch) """
out = load_tpl(self, '.xkb_patch')
out = substitute_lines(out, 'LAYOUT', xkb_keymap(self, True))
return out
###
# JSON output: keymap (base+altgr layers) and dead keys
#
@property
def json(self):
return {
'name': self.meta['name'],
'description': self.meta['description'],
'geometry': self.meta['geometry'].lower(),
'keymap': web_keymap(self),
'deadkeys': web_deadkeys(self),
'altgr': self.has_altgr
}
| 2.21875 | 2 |
src/core.py | MKuranowski/AdventOfCode2021 | 0 | 12796899 | <gh_stars>0
from typing import Callable, Generator, Iterable, TypeVar
_T = TypeVar("_T")
_K = TypeVar("_K")
def empty_str(t: str) -> bool:
return t == ""
def split_on(seq: Iterable[_T], pred: Callable[[_T], bool]) -> list[list[_T]]:
after_split: list[list[_T]] = []
current: list[_T] = []
for elem in seq:
if pred(elem):
# Split on this element
if current:
after_split.append(current)
current = []
else:
current.append(elem)
if current:
after_split.append(current)
return after_split
def aggregate_by(iterable: Iterable[_T], key: Callable[[_T], _K]) -> dict[_K, list[_T]]:
"""Groups elements from an iterable by key(elem).
Analogous to itertools.group_by; however this function doesn't care about the order
of the keys.
"""
d: dict[_K, list[_T]] = {}
for elem in iterable:
d.setdefault(key(elem), []).append(elem)
return d
def iterate(f: Callable[[_T], _T], x: _T = 0) -> Generator[_T, None, None]:
"""Iterate produces an infinite sequence of x, f(x), f(f(x)), ...
See Clojure's iterate.
"""
while True:
yield x
x = f(x)
| 2.859375 | 3 |
rl/algorithms/mfrl/mfrl.py | RamiSketcher/AMMI-RL | 0 | 12796900 | import gym
from gym.spaces import Box
import numpy as np
import torch as T
import rl.environments
from rl.data.buffer import TrajBuffer, ReplayBuffer
# from rl.data.buffer import TrajBuffer, ReplayBufferNP
# def make_env(env_id, seed, idx, capture_video, run_name):
# def thunk():
# print('in thunk')
# env = gym.make(env_id)
# env = gym.wrappers.RecordEpisodeStatistics(env)
# if capture_video:
# if idx == 0:
# env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
# env = gym.wrappers.ClipAction(env)
# env = gym.wrappers.NormalizeObservation(env)
# env = gym.wrappers.TransformObservation(env, lambda obs: np.clip(obs, -10, 10))
# env = gym.wrappers.NormalizeReward(env)
# env = gym.wrappers.TransformReward(env, lambda reward: np.clip(reward, -10, 10))
# env.seed(seed)
# env.action_space.seed(seed)
# env.observation_space.seed(seed)
# return env
#
# return thunk
class MFRL:
"""
Model-Free Reinforcement Learning
"""
def __init__(self, exp_prefix, configs, seed, device):
# super(MFRL, self).__init__(configs, seed)
# print('init MBRL!')
self.exp_prefix = exp_prefix
self.configs = configs
self.seed = seed
self._device_ = device
def _build(self):
self._set_env()
self._set_buffer()
def _set_env(self):
name = self.configs['environment']['name']
evaluate = self.configs['algorithm']['evaluation']
# Inintialize Learning environment
self.learn_env = gym.make(name)
self._seed_env(self.learn_env)
assert isinstance (self.learn_env.action_space, Box), "Works only with continuous action space"
if evaluate:
# Ininialize Evaluation environment
self.eval_env = gym.make(name)
self._seed_env(self.eval_env)
else:
self.eval_env = None
# Spaces dimensions
self.obs_dim = self.learn_env.observation_space.shape[0]
self.act_dim = self.learn_env.action_space.shape[0]
self.act_up_lim = self.learn_env.action_space.high
self.act_low_lim = self.learn_env.action_space.low
def _seed_env(self, env):
env.seed(self.seed)
env.action_space.seed(self.seed)
env.observation_space.seed(self.seed)
def _set_buffer(self):
max_size = self.configs['data']['buffer_size']
device = self._device_
if self.configs['algorithm']['on-policy']:
max_size = self.configs['data']['batch_size']
num_traj = max_size//20
horizon = 1000
self.buffer = TrajBuffer(self.obs_dim, self.act_dim, horizon, num_traj, max_size, self.seed, device)
else:
self.buffer = ReplayBuffer(self.obs_dim, self.act_dim, max_size, self.seed, device)
def initialize_buffer(self, num_traj=400):
# print('Initialize a New Buffer..')
seed = self.seed
device = self._device_
if self.configs['algorithm']['on-policy']:
# num_traj = 40
horizon = 1000
max_size = self.configs['data']['batch_size']
self.buffer = TrajBuffer(self.obs_dim, self.act_dim, horizon, num_traj, max_size, self.seed, device)
def initialize_learning(self, NT, Ni):
max_el = self.configs['environment']['horizon']
o, Z, el, t = self.learn_env.reset(), 0, 0, 0
if Ni < 1: return o, Z, el, t
print(f'[ Initial exploaration ] Starting')
for ni in range(1, Ni+1):
print(f'[ Initial exploaration ] Epoch {ni}')
nt = 0
while nt < NT:
# Random actions
a = self.learn_env.action_space.sample()
o_next, r, d, info = self.learn_env.step(a)
d = True if el == max_el else d # Ignore artificial termination
self.buffer.store_transition(o, a, r, o_next, d)
o = o_next
Z += r
el +=1
t +=1
if d or (el == max_el): o, Z, el = self.learn_env.reset(), 0, 0
nt += 1
return o, Z, el, t
def internact_op(self, n, o, d, Z, el, t):
Nt = self.configs['algorithm']['learning']['epoch_steps']
max_el = self.configs['environment']['horizon']
# a = self.actor_critic.get_action_np(o)
with T.no_grad(): a, log_pi, v = self.actor_critic.get_a_and_v_np(T.Tensor(o))
# print('log_pi: ', log_pi)
o_next, r, d_next, _ = self.learn_env.step(a)
Z += r
el += 1
t += 1
self.buffer.store_transition(o, a, r, d, v, log_pi, el)
if d_next or (el == max_el):
# o_next, Z, el = self.learn_env.reset(), 0, 0
with T.no_grad(): v_next = self.actor_critic.get_v(T.Tensor(o_next)).cpu()
self.buffer.traj_tail(d_next, v_next, el)
# print(f'termination: t={t} | el={el} | total_size={self.buffer.total_size()}')
o_next, d_next, Z, el = self.learn_env.reset(), 0, 0, 0
o, d = o_next, d_next
return o, d, Z, el, t
def internact_opB(self, n, o, Z, el, t):
Nt = self.configs['algorithm']['learning']['epoch_steps']
max_el = self.configs['environment']['horizon']
# a = self.actor_critic.get_action_np(o)
with T.no_grad(): a, log_pi, v = self.actor_critic.get_a_and_v_np(T.Tensor(o))
# print('log_pi: ', log_pi)
o_next, r, d, _ = self.learn_env.step(a)
Z += r
el += 1
t += 1
self.buffer.store(o, a, r, o_next, v, log_pi, el)
o = o_next
if d or (el == max_el):
if el == max_el:
with T.no_grad(): v = self.actor_critic.get_v(T.Tensor(o)).cpu()
else:
# print('v=0')
v = T.Tensor([0.0])
self.buffer.finish_path(el, v)
# print(f'termination: t={t} | el={el} | total_size={self.buffer.total_size()}')
o, Z, el = self.learn_env.reset(), 0, 0
return o, Z, el, t
def internact(self, n, o, Z, el, t):
Nx = self.configs['algorithm']['learning']['expl_epochs']
max_el = self.configs['environment']['horizon']
if n > Nx:
a = self.actor_critic.get_action_np(o) # Stochastic action | No reparameterization # Deterministic action | No reparameterization
else:
a = self.learn_env.action_space.sample()
o_next, r, d, _ = self.learn_env.step(a)
d = False if el == max_el else d # Ignore artificial termination
self.buffer.store_transition(o, a, r, o_next, d)
o = o_next
Z += r
el +=1
t +=1
if d or (el == max_el): o, Z, el = self.learn_env.reset(), 0, 0
return o, Z, el, t
def evaluate_op(self):
evaluate = self.configs['algorithm']['evaluation']
if evaluate:
print('[ Evaluation ]')
EE = self.configs['algorithm']['evaluation']['eval_episodes']
max_el = self.configs['environment']['horizon']
EZ = [] # Evaluation episodic return
ES = [] # Evaluation episodic score
EL = [] # Evaluation episodic length
for ee in range(1, EE+1):
print(f' [ Agent Evaluation ] Episode: {ee} ', end='\r')
o, d, Z, S, el = self.eval_env.reset(), False, 0, 0, 0
while not(d or (el == max_el)):
# with T.no_grad(): a, _, _ = self.actor_critic.get_pi(T.Tensor(o))
a = self.actor_critic.get_action_np(o, deterministic=True)
# a = self.actor_critic.get_action_np(o, deterministic=True)
o, r, d, info = self.eval_env.step(a)
Z += r
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': S += info['score']
el += 1
EZ.append(Z)
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': ES.append(S/el)
EL.append(el)
# if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand':
# for i in range(len(ES)):
# ES[i] /= EL[i]
return EZ, ES, EL
def evaluate(self):
evaluate = self.configs['algorithm']['evaluation']
if evaluate:
print('[ Evaluation ]')
EE = self.configs['algorithm']['evaluation']['eval_episodes']
max_el = self.configs['environment']['horizon']
EZ = [] # Evaluation episodic return
ES = [] # Evaluation episodic score
EL = [] # Evaluation episodic length
for ee in range(1, EE+1):
print(f' [ Agent Evaluation ] Episode: {ee} ', end='\r')
o, d, Z, S, el = self.eval_env.reset(), False, 0, 0, 0
while not(d or (el == max_el)):
# Take deterministic actions at evaluation time
a = self.actor_critic.get_action_np(o, deterministic=True) # Deterministic action | No reparameterization
o, r, d, info = self.eval_env.step(a)
Z += r
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': S += info['score']
el += 1
EZ.append(Z)
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': ES.append(S/el)
EL.append(el)
# if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand':
# for i in range(len(ES)):
# ES[i] /= EL[i]
return EZ, ES, EL
| 2.078125 | 2 |
twitter/getStatus.py | cuete/py-cat | 0 | 12796901 | #!/usr/bin/env python
#Update your .twconfig file on this same directory
#with your own api keys and secrets
#Get them signing up at https://apps.twitter.com
#Install required modules with
#'pip install -r requirements.txt'
import configparser
import os
import sys
import json
import twitter
def jdefault(o):
return o.__dict__
#usage: print(json.dumps(string, default=jdefault))
def main():
#Twitter status id to fetch
statusId = '973464578708316161'
try:
sys.stdout.write('reading config file... ')
config = configparser.RawConfigParser()
config.read('.twconfig')
print('success!')
except:
print('failed to read config file!')
exit()
try:
sys.stdout.write('connecting to api... ')
api = twitter.Api(consumer_key=config.get('keys', 'consumer_key'),
consumer_secret=config.get('keys', 'consumer_secret'),
access_token_key=config.get('keys', 'access_key'),
access_token_secret=config.get('keys', 'access_secret'))
print('success!')
except Exception as e:
print('failed to connect to twitter api!')
print(e)
exit()
try:
sys.stdout.write('fetching status %s... ' % statusId )
status = api.GetStatus(statusId)
print('success!')
except:
print('failed to get status!')
exit()
try:
print('writing to file out.txt... ')
with open(statusId + '.txt', 'w') as outfile:
statusparsed = json.loads(str(status).encode())
outfile.write(json.dumps(status, default=jdefault) + '\n')
sys.stdout.write('Created at: ' + statusparsed['created_at'])
outfile.closed
except:
print('failed writing to file!')
exit()
if __name__ == "__main__":
main()
| 2.640625 | 3 |
schoolbooks/schoolbook_even_medium.py | Dia-B/polymul-z2mx-m4 | 6 | 12796902 | <reponame>Dia-B/polymul-z2mx-m4<filename>schoolbooks/schoolbook_even_medium.py
from math import ceil
from .common import schoolbook_inner, Registers, schoolbook_postprocess
import sys
def schoolbook_medium(SRC1, SRC2, DEST, n):
parts = ceil(n / 12)
npart = ceil(n / parts)
instructions = schoolbook_even_medium(SRC1, SRC2, DEST, npart * parts)
yield from schoolbook_postprocess(SRC1, SRC2, DEST, instructions, n,
stack_src=True)
def schoolbook_even_medium(SRC1, SRC2, DEST, n):
# the idea here is that we need to divide neatly into schoolbook blocks
# for n <= 12, it's simply one schoolbook
# for 12 < n <= 24, we divide into 2x2 schoolbooks (so require even n)
# for 24 < n <= 36, we divide into 3x3 schoolbooks (so require n % 3 == 0)
# etc.
assert n % ceil(n / 12) == 0, "Can only handle n that divide into schoolbooks"
# reverse to prioritize low registers (as these have 16-bit loads)
regs = Registers(reversed([f'r{i}' for i in range(0, 13)] + ["r14"]))
# consider SRC1, SRC2 and DEST allocated
regs.alloc(SRC1)
regs.alloc(SRC2)
regs.alloc(DEST)
# these can be flexible, do not need to be r0 and r1 after first re-alloc
src1 = SRC1
src2 = SRC2
parts = ceil(n / 12)
npart = n // parts
# note that these offsets shouldnt exceed 4096 (i.e. n shouldnt be huge)
dest_offset = 0
a_offset = 0
b_offset = 0
def blockload(addr, regs, n, offset=0):
for i, r in enumerate(regs):
# if it's the last coefficient for odd n, load only one halfword
if i == n // 2 and n & 1:
yield f"ldrh {r}, [{addr}, #{i * 4 + offset}]"
else:
yield f"ldr {r}, [{addr}, #{i * 4 + offset}]"
# allocate registers for a and b
a = [regs.alloc() for _ in range(ceil(npart / 2))]
yield from blockload(src1, a, npart, a_offset)
if npart >= 11:
# we cannot afford to keep these pointers around
yield f"push {{{src1}}}"
regs.free(src1)
b = [regs.alloc() for _ in range(ceil(npart / 2))]
yield from blockload(src2, b, npart, b_offset)
if npart >= 11:
yield f"push {{{src2}}}"
regs.free(src2)
initialized = set()
for col in range(parts):
for row in (range(parts - 1, -1, -1) if col & 1 else range(parts)):
lastrow = not (col & 1) and row == parts-1 or (col & 1) and row == 0
yield from schoolbook_inner(npart, a, b, DEST, regs, initialized,
restore_b=not lastrow, dest_offset=dest_offset)
if col & 1: # for odd columns, go 'back up' instead of down
dest_offset -= 2 * npart
else:
dest_offset += 2 * npart
# if it's the last part in this col, don't load new src1 inputs
if lastrow:
if row == 0: # if we just finished a back-and-forth
dest_offset += 2 * 2 * npart
continue
if npart >= 11:
src1 = regs.alloc()
yield f"ldr {src1}, [sp, #4]"
if col & 1: # for odd columns, go 'back up' instead of down
a_offset -= 2 * npart
else:
a_offset += 2 * npart
yield from blockload(src1, a, npart, a_offset)
if npart >= 11:
yield f"str {src1}, [sp, #4]"
regs.free(src1)
if col == parts-1:
# if it's the last part, don't load new src2 inputs
continue
if npart >= 11:
src2 = regs.alloc()
yield f"ldr {src2}, [sp, #0]"
b_offset += 2 * npart
regs.free(*set(b)) # free; for some n there's one extra after repacking
b = [regs.alloc() for _ in range(ceil(npart / 2))]
yield from blockload(src2, b, npart, b_offset)
if npart >= 11:
yield f"str {src2}, [sp, #0]"
regs.free(src2)
regs.free(*set(a))
regs.free(*set(b))
if npart >= 11:
yield f"pop {{{SRC2}}}"
yield f"pop {{{SRC1}}}"
| 2.59375 | 3 |
examples/rec/tf_ncf.py | initzhang/Hetu | 82 | 12796903 | import tensorflow as tf
def neural_mf(user_input, item_input, y_, num_users, num_items, embed_partitioner=None):
embed_dim = 8
layers = [64, 32, 16, 8]
learning_rate = 0.01
with tf.compat.v1.variable_scope('nmf', dtype=tf.float32):
with tf.device('/cpu:0'):
User_Embedding = tf.compat.v1.get_variable(name="user_embed", shape=(
num_users, embed_dim + layers[0] // 2), initializer=tf.random_normal_initializer(stddev=0.01), partitioner=embed_partitioner)
Item_Embedding = tf.compat.v1.get_variable(name="item_embed", shape=(
num_items, embed_dim + layers[0] // 2), initializer=tf.random_normal_initializer(stddev=0.01), partitioner=embed_partitioner)
user_latent = tf.nn.embedding_lookup(User_Embedding, user_input)
item_latent = tf.nn.embedding_lookup(Item_Embedding, item_input)
W1 = tf.compat.v1.get_variable(name='W1', shape=(
layers[0], layers[1]), initializer=tf.random_normal_initializer(stddev=0.1))
W2 = tf.compat.v1.get_variable(name='W2', shape=(
layers[1], layers[2]), initializer=tf.random_normal_initializer(stddev=0.1))
W3 = tf.compat.v1.get_variable(name='W3', shape=(
layers[2], layers[3]), initializer=tf.random_normal_initializer(stddev=0.1))
W4 = tf.compat.v1.get_variable(name='W4', shape=(
embed_dim + layers[3], 1), initializer=tf.random_normal_initializer(stddev=0.1))
with tf.device('/gpu:0'):
mf_user_latent, mlp_user_latent = tf.split(
user_latent, [embed_dim, layers[0] // 2], 1)
mf_item_latent, mlp_item_latent = tf.split(
item_latent, [embed_dim, layers[0] // 2], 1)
mf_vector = tf.multiply(mf_user_latent, mf_item_latent)
mlp_vector = tf.concat((mlp_user_latent, mlp_item_latent), 1)
fc1 = tf.matmul(mlp_vector, W1)
relu1 = tf.nn.relu(fc1)
fc2 = tf.matmul(relu1, W2)
relu2 = tf.nn.relu(fc2)
fc3 = tf.matmul(relu2, W3)
relu3 = tf.nn.relu(fc3)
concat_vector = tf.concat((mf_vector, relu3), 1)
y = tf.reshape(tf.matmul(concat_vector, W4), (-1,))
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=y_)
loss = tf.reduce_mean(loss)
y = tf.sigmoid(y)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate)
return loss, y, optimizer
| 2.40625 | 2 |
tests/graph/test_ppatterns.py | saezlab/squidpy | 0 | 12796904 | import pytest
from anndata import AnnData
from pandas.testing import assert_frame_equal
import numpy as np
from squidpy.gr import moran, ripley_k, co_occurrence
MORAN_K = "moranI"
def test_ripley_k(adata: AnnData):
"""Check ripley score and shape."""
ripley_k(adata, cluster_key="leiden")
# assert ripley in adata.uns
assert "ripley_k_leiden" in adata.uns.keys()
# assert clusters intersection
cat_ripley = set(adata.uns["ripley_k_leiden"]["leiden"].unique())
cat_adata = set(adata.obs["leiden"].cat.categories)
assert cat_ripley.isdisjoint(cat_adata) is False
def test_moran_seq_par(dummy_adata: AnnData):
"""Check whether moran results are the same for seq. and parallel computation."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
df = moran(dummy_adata, copy=True, n_jobs=1, seed=42, n_perms=50)
df_parallel = moran(dummy_adata, copy=True, n_jobs=2, seed=42, n_perms=50)
idx_df = df.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
with pytest.raises(AssertionError, match=r'.*\(column name="pval_sim"\) are different.*'):
# because the seeds will be different, we don't expect the pval_sim values to be the same
assert_frame_equal(df, df_parallel)
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_moran_reproducibility(dummy_adata: AnnData, n_jobs: int):
"""Check moran reproducibility results."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
# seed will work only when multiprocessing/loky
df_1 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
df_2 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
idx_df = df_1.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
# assert fdr correction in adata.uns
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df_1.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
assert_frame_equal(df_1, df_2)
def test_co_occurrence(adata: AnnData):
"""
check ripley score and shape
"""
co_occurrence(adata, cluster_key="leiden")
# assert occurrence in adata.uns
assert "leiden_co_occurrence" in adata.uns.keys()
assert "occ" in adata.uns["leiden_co_occurrence"].keys()
assert "interval" in adata.uns["leiden_co_occurrence"].keys()
# assert shapes
arr = adata.uns["leiden_co_occurrence"]["occ"]
assert arr.ndim == 3
assert arr.shape[2] == 49
assert arr.shape[1] == arr.shape[0] == adata.obs["leiden"].unique().shape[0]
# @pytest.mark.parametrize(("ys", "xs"), [(10, 10), (None, None), (10, 20)])
@pytest.mark.parametrize(("n_jobs", "n_splits"), [(1, 2), (2, 2)])
def test_co_occurrence_reproducibility(adata: AnnData, n_jobs: int, n_splits: int):
"""Check co_occurrence reproducibility results."""
arr_1, interval_1 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
arr_2, interval_2 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
np.testing.assert_array_equal(sorted(interval_1), sorted(interval_2))
np.testing.assert_allclose(arr_1, arr_2)
| 2.3125 | 2 |
tests/test_characteristic.py | bdrydyk/homecontroller | 0 | 12796905 | """
Tests for pyhap.characteristic
"""
import uuid
from unittest import mock
import pytest
import pyhap.characteristic as characteristic
from pyhap.characteristic import Characteristic
PROPERTIES = {
"Format": characteristic.HAP_FORMAT.INT,
"Permissions": [characteristic.HAP_PERMISSIONS.READ]
}
def get_char(props, valid=None, min_value=None, max_value=None):
if valid is not None:
props["ValidValues"] = valid
if min_value is not None:
props["minValue"] = min_value
if max_value is not None:
props["maxValue"] = max_value
c = Characteristic(display_name="Test Char",
type_id=uuid.uuid1(),
properties=props)
return c
def test_default_value():
char = get_char(PROPERTIES.copy())
assert (characteristic.HAP_FORMAT.DEFAULT[PROPERTIES["Format"]]
== char.value)
def test_default_valid_value():
valid_values = {"foo": 2, "bar": 3}
char = get_char(PROPERTIES.copy(), valid=valid_values)
assert char.value in valid_values.values()
def test_set_value():
char = get_char(PROPERTIES.copy())
new_value = 3
char.set_value(new_value)
assert char.value == new_value
def test_set_value_valid_values():
valid_values = {"foo": 2, "bar": 3, }
char = get_char(PROPERTIES.copy(), valid=valid_values)
with pytest.raises(ValueError):
char.set_value(4)
def test_set_value_callback_toggle():
char = get_char(PROPERTIES.copy())
char.setter_callback = mock.Mock()
char.set_value(3, should_callback=False)
assert not char.setter_callback.called
char.set_value(3, should_callback=True)
assert char.setter_callback.called
def test_override_properties_properties():
new_properties = {'minValue': 10, 'maxValue': 20, 'step': 1}
char = get_char(PROPERTIES.copy(), min_value=0, max_value=1)
char.override_properties(properties=new_properties)
assert char.properties['minValue'] == new_properties['minValue']
assert char.properties['maxValue'] == new_properties['maxValue']
assert char.properties['step'] == new_properties['step']
def test_override_properties_valid_values():
new_valid_values = {'foo2': 2, 'bar2': 3}
char = get_char(PROPERTIES.copy(), valid={'foo': 1, 'bar': 2})
char.override_properties(valid_values=new_valid_values)
assert char.properties['ValidValues'] == new_valid_values
def test_get_hap_value():
max_value = 5
raw_value = 6
char = get_char(PROPERTIES.copy(), max_value=max_value)
char.set_value(raw_value, should_notify=False)
assert char.value == raw_value
assert char.get_hap_value() == max_value
def test_notify():
char = get_char(PROPERTIES.copy())
broker_mock = mock.Mock()
char.broker = broker_mock
notify_value = 3
expected = {
"type_id": char.type_id,
"value": notify_value,
}
char.value = notify_value
char.notify()
assert broker_mock.publish.called
broker_mock.publish.assert_called_with(expected, char)
def test_notify_except_no_broker():
char = get_char(PROPERTIES.copy())
with pytest.raises(characteristic.NotConfiguredError):
char.notify()
| 2.53125 | 3 |
example.py | julianspaeth/RandomSurvialForest | 40 | 12796906 | from random_survival_forest import RandomSurvivalForest, concordance_index
from lifelines import datasets
from sklearn.model_selection import train_test_split
import time
rossi = datasets.load_rossi()
# Attention: duration column must be index 0, event column index 1 in y
y = rossi.loc[:, ["arrest", "week"]]
X = rossi.drop(["arrest", "week"], axis=1)
X, X_test, y, y_test = train_test_split(X, y, test_size=0.25, random_state=10)
print("RSF")
start_time = time.time()
rsf = RandomSurvivalForest(n_estimators=20, n_jobs=-1, min_leaf=10)
rsf = rsf.fit(X, y)
print("--- %s seconds ---" % (time.time() - start_time))
y_pred = rsf.predict(X_test)
c_val = concordance_index(y_time=y_test["week"], y_pred=y_pred, y_event=y_test["arrest"])
print("C-index", round(c_val, 3))
| 2.46875 | 2 |
sysinv/sysinv/sysinv/sysinv/puppet/device.py | etaivan/stx-config | 0 | 12796907 | <reponame>etaivan/stx-config<filename>sysinv/sysinv/sysinv/sysinv/puppet/device.py
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import collections
from sysinv.common import constants
from sysinv.puppet import base
class DevicePuppet(base.BasePuppet):
"""Class to encapsulate puppet operations for device configuration"""
def _get_device_id_index(self, host):
"""
Builds a dictionary of device lists indexed by device id.
"""
devices = collections.defaultdict(list)
for device in self.dbapi.pci_device_get_all(hostid=host.id):
devices[device.pdevice_id].append(device)
return devices
def _get_host_qat_device_config(self, pci_device_list):
"""
Builds a config dictionary for QAT devices to be used by the platform
devices (compute) puppet resource.
"""
device_config = {}
qat_c62x_devices = pci_device_list[constants.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE]
if len(qat_c62x_devices) != 0:
for idx, device in enumerate(qat_c62x_devices):
name = 'pci-%s' % device.pciaddr
dev = {
'qat_idx': idx,
"device_id": "c62x",
}
device_config.update({name: dev})
qat_dh895xcc_devices = pci_device_list[constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE]
if len(qat_dh895xcc_devices) != 0:
for idx, device in enumerate(qat_dh895xcc_devices):
name = 'pci-%s' % device.pciaddr
dev = {
'qat_idx': idx,
"device_id": "dh895xcc",
}
device_config.update({name: dev})
if len(device_config) == 0:
return {}
return {
'platform::devices::qat::device_config': device_config,
'platform::devices::qat::service_enabled': True,
}
def get_host_config(self, host):
if constants.WORKER not in host.subfunctions:
# configuration only required for compute hosts
return {}
devices = self._get_device_id_index(host)
if len(devices) == 0:
# no pci devices on the system
return {}
device_config = {}
qat_devices = self._get_host_qat_device_config(devices)
if qat_devices:
device_config.update(qat_devices)
return device_config
| 1.976563 | 2 |
Code/search_algorithms/binary_search/binarySearch.py | Kevinjadia/Hacktoberfest_DSA_2021 | 4 | 12796908 | target = int(input("enter search target: "))
def binarySearch(list,target):
maximum = len(list) -1
minimum = 0
result = None
while (result != target):
m = (maximum + minimum) // 2
midPoint = list[m]
if(target > midPoint):
minimum = m +1
elif(target < midPoint):
maximum = m -1
else:
return m
result = midPoint
return m
print(binarySearch([1,2,3,4,5,6,7,8,9,10],target)) | 4.0625 | 4 |
accounts/urls.py | rattletat/homework-server | 1 | 12796909 | <filename>accounts/urls.py<gh_stars>1-10
from django.contrib.auth.views import LogoutView
from django.urls import path
from accounts.views import login, send_login_email
app_name = 'accounts'
urlpatterns = [
path("send_login_email", send_login_email, name="send_login_email"),
path("login", login, name="login"),
path('logout', LogoutView.as_view(), name='logout'),
]
| 1.9375 | 2 |
lib/python3.6/token.py | trikyas/django-project | 1 | 12796910 | <gh_stars>1-10
/anaconda3/lib/python3.6/token.py | 1.0625 | 1 |
Machine-Learning-with-Python- From-LM-to-DL/Midterm/04_midterm_exercise.py | andresdelarosa1887/Public-Projects | 1 | 12796911 | <gh_stars>1-10
import numpy as np
classification_vector= np.array([-1,-1,-1,-1,-1,1,1,1,1,1])
data_vector= np.array([[0,0], [2,0],[3,0], [0,2],[2,2],[5,1],[5,2],[2,4],[4,4],[5,5]])
def quadratic_kernel(data_vector):
return np.array((1 + np.dot(data_vector, data_vector.T))**2)
def perceptron_quadratic_kernel(feature_matrix, labels, T):
# Your code here
theta_0 = 0
theta= np.zeros(len(feature_matrix[0]))
for epoch in range(T):
for i, x in enumerate(feature_matrix):
kernelized_vector= quadratic_kernel(feature_matrix[i])
if (labels[i]*(np.dot(kernelized_vector, theta) + theta_0))[1] <= 0:
theta= theta + (np.dot(labels[i], kernelized_vector))
theta_0= (theta_0 + labels[i])
print(theta)
else:
theta_0= theta_0
theta= theta
return((theta, theta_0))
| 2.765625 | 3 |
Simulation/main.py | MKamyab1991/quadcopter_ppo | 2 | 12796912 | import gym
import numpy as np
import torch
import torch.optim as optim
from utils_main import make_env, save_files
from neural_network import ActorCritic
from ppo_method import ppo
from common.multiprocessing_env import SubprocVecEnv
from itertools import count
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
num_envs = 2
env_name = "CustomEnv-v0"
envs = [make_env(env_name) for i in range(num_envs)]
envs = SubprocVecEnv(envs)
num_inputs = envs.observation_space.shape[0]
num_outputs = envs.action_space.shape[0]
# Hyper params:
hidden_size = 400
lr = 3e-6
num_steps = 20
mini_batch_size = 5
ppo_epochs = 4
threshold_reward = -0.01
model = ActorCritic(num_inputs, num_outputs, hidden_size).to(device)
env = gym.make(env_name)
my_ppo = ppo(model, env)
optimizer = optim.Adam(model.parameters(), lr=lr)
max_frames = 1_500_0000
frame_idx = 0
test_rewards = []
save_iteration = 1000
model_save_iteration = 1000
state = envs.reset()
early_stop = False
def trch_ft_device(input, device):
output = torch.FloatTensor(input).to(device)
return output
saver_model = save_files()
while frame_idx < max_frames and not early_stop:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = 0
for _ in range(num_steps):
state = trch_ft_device(state, device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
# appending
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
states.append(state)
actions.append(action)
# next iteration init.
state = next_state
frame_idx += 1
if frame_idx % save_iteration == 0:
test_reward = np.mean([my_ppo.test_env() for _ in range(num_envs)])
test_rewards.append(test_reward)
# plot(frame_idx, test_rewards)
if test_reward > threshold_reward:
early_stop = True
if frame_idx % model_save_iteration == 0:
saver_model.model_save(model)
next_state = trch_ft_device(next_state, device)
_, next_value = model(next_state)
returns = my_ppo.compute_gae(next_value, rewards, masks, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
my_ppo.ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantage, optimizer)
max_expert_num = 50000
num_steps = 0
expert_traj = []
# building an episode based on the current model.
for i_episode in count():
state = env.reset()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
action = dist.sample().cpu().numpy()[0]
next_state, reward, done, _ = env.step(action)
state = next_state
total_reward += reward
expert_traj.append(np.hstack([state, action]))
num_steps += 1
print("episode:", i_episode, "reward:", total_reward)
if num_steps >= max_expert_num:
break
expert_traj = np.stack(expert_traj)
print()
print(expert_traj.shape)
print()
np.save("expert_traj.npy", expert_traj)
| 2.28125 | 2 |
contentstore/migrations/0011_message_metadata.py | praekeltfoundation/seed-stage-based-messaging | 1 | 12796913 | # Generated by Django 2.1.2 on 2019-02-20 15:47
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("contentstore", "0010_auto_20181126_1104")]
operations = [
migrations.AddField(
model_name="message",
name="metadata",
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
)
]
| 1.703125 | 2 |
tests/test_all.py | pawelmhm/recenseo.es | 0 | 12796914 | <reponame>pawelmhm/recenseo.es
"""
Integration tests for the app.
"""
import os,sys
import unittest
from contextlib import closing
from datetime import datetime
import time
import StringIO
import logging
from flask import url_for
from src import flaskr
from src import modele
from src.config import TestConfig
from utilities import manipulate_db
from tests import test_login
timestamp = datetime.fromtimestamp(time.time())
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class BaseTestCase(unittest.TestCase):
def setUp(self):
"""Before each test, set up a blank database"""
self.app = flaskr.app.test_client()
self.login("Hugo", "secret")
def tearDown(self):
self.logout()
def login(self,username,password):
return self.app.post('/login', data=dict(
username=username,
password=password), follow_redirects=True)
def logout (self):
return self.app.get('/logout', follow_redirects=True)
class GeneralTestCase(BaseTestCase):
def edit_profile(self):
return self.app.get("/edit_profile", follow_redirects=True)
def test_edit_profile(self):
rv = self.edit_profile()
self.assertIn("Edit your profile Hugo", rv.data)
def update_profile(self, email,about_me):
return self.app.post("/edit_profile", data=dict(email=email,
about_me=about_me), follow_redirects=True)
def test_update_profile(self):
rv = self.update_profile("maniana", "Curious explorer of new lands")
self.assertIn("Your profile has been updated", rv.data)
def make_review_request(self,title,content,category,deadline):
return self.app.post("/post_request_review", data=dict(
title=title,
content=content,
category=category,
deadline=deadline), follow_redirects=True)
def test_request_review(self):
rv = self.app.get("/request_review", follow_redirects=True)
self.assertEqual(200,rv.status_code)
@unittest.skip("make review request tested with files")
def test_make_review_request(self):
rv = self.make_review_request("title", "In this wos of so much importance to literature.", "academic",
"09/12/2012")
self.assertEqual(200,rv.status_code)
def main_thread(self):
return self.app.get('/', follow_redirects=True)
def test_main_thread(self):
rv = self.main_thread()
self.assertEqual(200, rv.status_code)
self.assertIn('Review Someone',rv.data)
def click_reviews(self,id):
url = "/req/" + str(id)
return self.app.get(url, follow_redirects=True)
def test_click_reviews(self):
rv = self.click_reviews(1)
self.assertEqual(200, rv.status_code)
def display_user_requests(self,uid):
return self.app.get('/display_user_requests/%s' % uid, follow_redirects=True)
def test_display_user_request(self):
rv = self.display_user_requests(1)
self.assertEqual(rv.status_code,200)
self.assertIn("Hugo",rv.data)
def review_this(self,review_text,rating,request_id):
url = '/req/post/' + str(request_id)
return self.app.post(url, data=dict(
review_text=review_text,
rating=rating),
follow_redirects=True)
def test_review_this(self):
# invalid request
response = self.review_this("good work",99,102)
self.assertIn("errors",response.data)
rv = self.review_this(
"nice work this is amazing", 5, 101)
self.assertEqual(rv.status_code,200)
self.assertIn("has been added",rv.data)
def test_reviews_of_user(self):
rv = self.app.get("/reviews_of_user/%s" % 2,
follow_redirects=True)
self.assertEqual(rv.status_code, 200)
self.assertIn("reviews of drafts published by", rv.data)
def test_show_responses(self):
rv = self.app.get('/reviews/201')
self.assertEqual(200,rv.status_code)
def test_update_possible(self):
url = "/req/" + str(101)
rv = self.app.get(url)
self.assertIn("Update Request", rv.data)
url = "/req/" + str(3)
rv = self.app.get(url)
self.assertNotIn("Update Request", rv.data)
def update_post(self,id,title,content,category,deadline):
url = "/req/update/" + str(id)
return self.app.post(url,data=dict(
title=title,
content=content,
category=category,
deadline=deadline), follow_redirects=True)
<EMAIL>("not implemented yet")
def test_update_post(self):
# what if update is not allowed? Hugo's article has id 101, he tries
# to update 102
rv = self.update_post(102,"new title","new content with long soom",\
"academic",timestamp)
self.assertEqual(200,rv.status_code)
self.assertIn("invalid", rv.data)
# now a valid request
rv = self.update_post(101,"new title","new content with a lot of blah", \
"academic",timestamp)
self.assertIn("ok",rv.data)
def test_all_reviews(self):
rv = self.app.get('/reviews')
self.assertEqual(rv.status_code,200)
self.assertIn('All reviews written by all users',rv.data)
self.assertIn('Anonymous', rv.data)
class TestPostRequest(BaseTestCase):
data = {'title':'Lewiathanus livus',
'content':'A book by Hobbes is always worth reading',
'category':'academic', 'deadline':str(timestamp)}
rather_not = ['sh', 'ps1','ghost','exe']
def do_post(self, data):
return self.app.post('/request_review', data=data,
follow_redirects=True)
def upload_something(self, extension, message):
"""
Message, expected message in flash.
"""
data = self.data.copy()
filename = 'file.%s' % extension
data["file"] = (StringIO.StringIO('new file'), filename)
response = self.do_post(data)
self.assertEqual(response.status_code,200)
self.assertIn(message,response.data)
def test_upload_allowed_formats(self):
# valid formats
for ext in TestConfig.ALLOWED_EXTENSIONS:
self.upload_something(ext,'review request sucessfuly')
def test_upload_invalid_data(self):
# Invalid extensions
# we expect a message informing about it
message = "following formats are allowed:"
for ext in self.rather_not:
self.upload_something(ext, message)
def test_no_file(self):
rv = self.do_post(self.data)
self.assertEqual(rv.status_code,200)
self.assertIn("No file added",rv.data)
def test_invalid_form(self):
data = self.data.copy()
data["content"] = ""
rv = self.do_post(data)
self.assertEqual(rv.status_code,200)
self.assertIn("Invalid form.",rv.data)
if __name__ == '__main__':
manipulate_db.populateDb(TestConfig.DATABASE)
unittest.main()
| 2.515625 | 3 |
output/models/ms_data/additional/test102433_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 12796915 | <filename>output/models/ms_data/additional/test102433_xsd/__init__.py
from output.models.ms_data.additional.test102433_xsd.test102433 import Bar
__all__ = [
"Bar",
]
| 1.117188 | 1 |
mongo_db_from_config/__init__.py | cisagov/mongo-db-from-config | 0 | 12796916 | """This package contains the mongo_db_from_config code."""
from .mongo_db_from_config import db_from_config
__version__ = "0.0.1"
__all__ = ["db_from_config"]
| 1.148438 | 1 |
circuit_benchmarks/qft.py | eddieschoute/circuit-benchmarks | 7 | 12796917 | <reponame>eddieschoute/circuit-benchmarks<filename>circuit_benchmarks/qft.py<gh_stars>1-10
# -*- coding: utf-8 -*
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import math
from qiskit import QuantumRegister, QuantumCircuit
def qft_circuit(number_qubits: int):
"""Create quantum fourier transform circuit on quantum register qreg."""
qreg = QuantumRegister(number_qubits, name="myreg")
circuit = QuantumCircuit(qreg, name="qft")
for i in range(number_qubits):
for j in range(i):
circuit.cu1(math.pi / float(2 ** (i - j)), qreg[i], qreg[j])
circuit.h(qreg[i])
return circuit
| 2.4375 | 2 |
randomFourierComplete.py | aasensio/randomModulator | 1 | 12796918 | import numpy as np
import matplotlib.pyplot as pl
import scipy.sparse as sp
import scipy.sparse.linalg as splinalg
import scipy.fftpack as fft
def bin_ndarray(ndarray, new_shape, operation='sum'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions.
Example
-------
>>> m = np.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
if not operation.lower() in ['sum', 'mean', 'average', 'avg']:
raise ValueError("Operation {} not supported.".format(operation))
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d, c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
if operation.lower() == "sum":
ndarray = ndarray.sum(-1*(i+1))
elif operation.lower() in ["mean", "average", "avg"]:
ndarray = ndarray.mean(-1*(i+1))
return ndarray
def myFFT(x):
"""
Return the FFT of a real signal taking into account some normalization
Parameters
----------
x : float
Signal time series
Returns
-------
float : Fourier coefficients of the real signal
"""
out = fft.rfft(x)
return out / np.sqrt(len(out))
def myIFFT(x):
"""
Return the IFFT for a real signal taking into account some normalization
Parameters
----------
x : float
Fourier coefficients
Returns
-------
float : signal
"""
out = fft.irfft(x)
return out * np.sqrt(len(out))
def myTotalPower(f):
"""
Return the power spectrum of a signal
Parameters
----------
f : float
Signal Fourier coefficients
Returns
-------
float : total power
"""
return f[0]**2 + 2.0*np.sum(f[1:]**2)
class randomDemodulator(object):
"""Summary
Returns
-------
TYPE : Demodulate I and Q signals together
"""
def __init__(self, totalTime, dt, dtIntegration, stokes, beta, signalToNoise=0.0, seed=0, modulationType=0):
"""Summary
Parameters
----------
totalTime : TYPE
Description
dt : TYPE
Description
dtIntegration : TYPE
Description
seed : int, optional
Description
Returns
-------
TYPE : Description
"""
self.totalTime = totalTime
self.dt = dt
self.dtIntegration = dtIntegration
self.seed = seed
self.signalToNoise = signalToNoise
self.modulationType = modulationType
if (self.seed != 0):
np.random.seed(self.seed)
# Read seeing power spectrum
self.powerLites = np.loadtxt('powerSpectrumSeeing.dat')
self.powerLites[:,1] = 10.0**self.powerLites[:,1]
# Number of samples of the original sample
self.nSteps = int(totalTime / dt)
self.times = np.arange(self.nSteps) * self.dt
# Frequency axis
self.freq = fft.rfftfreq(self.nSteps, d=dt)
# Betas and Stokes parameters
self.beta = beta
self.stokes = stokes
# Generate Gaussian noise with unit variance and multiply by the square root of the power spectrum
# to generate the noise with the appropriate power spectrum
noise = np.random.randn(self.nSteps)
noiseFFT = myFFT(noise)
self.powerSeeing = np.interp(np.abs(self.freq), self.powerLites[:,0], self.powerLites[:,1])
self.powerSeeing[0] = 0.0
self.seeingFFT = np.sqrt(self.powerSeeing) * noiseFFT
self.seeingFFT /= np.sqrt(myTotalPower(self.seeingFFT))
self.seeing = myIFFT(self.seeingFFT)
# Make sure that the total power is unity
print 'Total variance = ', np.sum(self.seeing**2), myTotalPower(self.seeingFFT)
# Compute the signal and its power spectrum
self.signal = [None] * 4
for i in range(4):
self.signal[i] = self.stokes[i]*(1.0 + self.beta[i] * self.seeing)
# Generate modulation using a lambda/4 and lambda/2 polarimeter with random angles
# self.modulation = [np.ones(self.nSteps), 2.0*np.random.rand(self.nSteps)-1.0, 2.0*np.random.rand(self.nSteps)-1.0, 2.0*np.random.rand(self.nSteps)-1.0]
if (self.modulationType == 0):
self.alphaModulation = 0.5*np.pi*np.random.rand(self.nSteps)
self.betaModulation = 0.5*np.pi*np.random.rand(self.nSteps)
else:
temp = np.load('alphaBetaSamples.npz')
self.alphaModulation = temp['arr_0'][0:self.nSteps]
self.betaModulation = temp['arr_1'][0:self.nSteps]
self.modulation = [np.ones(self.nSteps), \
np.cos(2.0*self.alphaModulation) * np.cos(2.0*(self.alphaModulation-2.0*self.betaModulation)),\
np.sin(2.0*self.alphaModulation) * np.cos(2.0*(self.alphaModulation-2.0*self.betaModulation)),\
np.sin(2.0*(2.0*self.betaModulation-self.alphaModulation))]
self.integrationTime = self.dtIntegration
self.lengthSample = int(self.dtIntegration / self.dt)
self.nSamples = int(self.dt / self.dtIntegration * self.nSteps)
self.signalIntegrated = [None] * 2
for i in range(2):
temp = self.signal[0] * self.modulation[0]
sign = (-1.0)**i
for j in range(1,4):
temp += sign * self.signal[j] * self.modulation[j]
self.signalIntegrated[i] = bin_ndarray(temp, (self.nSamples,), operation='sum')
self.signalIntegrated[i] += np.mean(self.signalIntegrated[i]) / self.signalToNoise * np.random.randn(self.nSamples)
self.tIntegrated = np.arange(self.nSamples) * self.dtIntegration
# Generate modulation matrix
self.sparseM = [None] * 4
self.sparseMStar = [None] * 4
for state in range(4):
sparseData = []
sparseRow = []
sparseCol = []
loop = 0
for i in range(self.nSamples):
for j in range(self.lengthSample):
sparseData.append(self.modulation[state][loop])
sparseRow.append(i)
sparseCol.append(loop)
loop += 1
self.sparseM[state] = sp.coo_matrix((sparseData, (sparseRow, sparseCol)), shape=(self.nSamples, self.nSteps))
self.sparseMStar[state] = self.sparseM[state].transpose(copy=True)
self.factor = 2*np.ones(self.nSteps)
self.factor[0] = 1.0
def forward(self, signal, beta, ray):
return self.sparseM[ray].dot(1.0+beta*signal)
def forwardPartial(self, signal, ray):
return self.sparseM[ray].dot(signal)
def backward(self, z, ray):
return self.factor * myFFT(self.sparseMStar[ray].dot(z))
def totalPower(self, z):
return (z[0] * z[0].conj() + 2 * np.sum(z[1:] * z[1:].conj())).real / len(z)
def softThreshold(self, x, lambdaValue):
return np.fmax(0,1.0 - lambdaValue / np.fmax(np.abs(x),1e-10)) * x
def hardThreshold(self, x, lambdaValue):
xPar = np.copy(x)
xPar[np.abs(x) < lambdaValue] = 0.0
return xPar
def FISTA(self, initial=None, initialStokes=None, thresholdMethod='soft', niter=10, lambdaValue=1.0):
"""
Solve the l1 regularized problem using the FISTA algorithm, that solves the following problem:
argmin_O ||y - M*F^{-1}*alpha||_2 + \lambda ||alpha||_1
Args:
rank (int, optional): rank of the solution
niter (int, optional): number of iterations
Returns:
TYPE: Description
"""
if (initial == None):
x = np.zeros(self.nSteps)
I0 = 0.9
Q0 = 0.1
U0 = 0.2
V0 = 0.3
betaI = 10.0#self.beta[0]
betaQ = 10.0#self.beta[1]
betaU = 10.0#self.beta[2]
betaV = 10.0#self.beta[3]
else:
x = np.copy(initial)
I0, Q0, U0, V0 = initialStokes
xNew = np.copy(x)
y = np.copy(x)
res = self.sparseMStar[0].dot(self.sparseM[0])
largestEigenvalue = splinalg.eigsh(res, k=1, which='LM', return_eigenvectors=False)
self.mu = 0.5 / (np.real(largestEigenvalue)) * 0.0002
t = 1.0
normL1 = []
normL2 = []
normL0 = []
for loop in range(niter):
signal = myIFFT(x)
forwI = self.forward(signal, betaI, 0) # M1(t) * (1+betaI*N(t))
forwQ = self.forward(signal, betaQ, 1) # M2(t) * (1+betaQ*N(t))
forwU = self.forward(signal, betaU, 2) # M3(t) * (1+betaU*N(t))
forwV = self.forward(signal, betaV, 3) # M4(t) * (1+betaV*N(t))
residual1 = self.signalIntegrated[0] - (I0 * forwI + Q0 * forwQ + U0 * forwU + V0 * forwV)
gradient1 = -2.0 * I0 * betaI * self.backward(residual1, 0) - 2.0 * Q0 * betaQ * self.backward(residual1, 1) - \
2.0 * U0 * betaU * self.backward(residual1, 2) - 2.0 * V0 * betaV * self.backward(residual1, 3)
residual2 = self.signalIntegrated[1] - (I0 * forwI - Q0 * forwQ - U0 * forwU - V0 * forwV)
gradient2 = -2.0 * I0 * betaI * self.backward(residual2, 0) + 2.0 * Q0 * betaQ * self.backward(residual2, 1) + \
2.0 * U0 * betaU * self.backward(residual2, 2) + 2.0 * V0 * betaV * self.backward(residual2, 3)
gradient = gradient1 + gradient2
if (thresholdMethod == 'hardLambda'):
xNew = self.hardThreshold(y - self.mu * np.real(gradient), lambdaValue)
if (thresholdMethod == 'hardPercentage'):
xNew = self.hardThreshold(y - self.mu * np.real(gradient), lambdaValue)
if (thresholdMethod == 'soft'):
xNew = self.softThreshold(y - self.mu * np.real(gradient), lambdaValue)
xNew[0] = 0.0
xNew /= np.sqrt(myTotalPower(xNew))
if (thresholdMethod == 'L2'):
xNew = y - self.mu * np.real(gradient)
tNew = 0.5*(1+np.sqrt(1+4.0*t**2))
y = xNew + (t-1.0) / tNew * (xNew - x)
t = tNew
x = np.copy(xNew)
normResidual = np.linalg.norm(residual1 + residual2)
normSolutionL1 = np.linalg.norm(x, 1)
normSolutionL0 = np.linalg.norm(x, 0)
if (loop % 10):
# Stokes parameters
I0 = 0.5 * np.sum(forwI * (self.signalIntegrated[0]+self.signalIntegrated[1])) / np.sum(forwI**2)
A = np.zeros((3,3))
A[0,0] = np.sum(forwQ**2)
A[1,1] = np.sum(forwU**2)
A[2,2] = np.sum(forwV**2)
A[0,1] = np.sum(forwQ * forwU)
A[1,0] = A[0,1]
A[0,2] = np.sum(forwQ * forwV)
A[2,0] = A[0,2]
A[1,2] = np.sum(forwU * forwV)
A[2,1] = A[1,2]
b = np.zeros(3)
b[0] = 0.5 * np.sum(forwQ * (self.signalIntegrated[0]-self.signalIntegrated[1]))
b[1] = 0.5 * np.sum(forwU * (self.signalIntegrated[0]-self.signalIntegrated[1]))
b[2] = 0.5 * np.sum(forwV * (self.signalIntegrated[0]-self.signalIntegrated[1]))
Q0, U0, V0 = np.linalg.solve(A,b)
if (I0 < 0):
I0 = 1.0
if (np.abs(Q0) > 1.0):
Q0 = 1e-3
if (np.abs(U0) > 1.0):
U0 = 1e-3
if (np.abs(V0) > 1.0):
V0 = 1e-3
# Seeing amplitude
M1N = self.forwardPartial(signal, 0) # M1(t) * N(t)
M2N = self.forwardPartial(signal, 1) # M2(t) * N(t)
M3N = self.forwardPartial(signal, 2) # M3(t) * N(t)
M4N = self.forwardPartial(signal, 3) # M4(t) * N(t)
M1One = self.forwardPartial(np.ones(self.nSteps), 0) # M1(t) * 1(t)
M2One = self.forwardPartial(np.ones(self.nSteps), 1) # M2(t) * 1(t)
M3One = self.forwardPartial(np.ones(self.nSteps), 2) # M3(t) * 1(t)
M4One = self.forwardPartial(np.ones(self.nSteps), 3) # M4(t) * 1(t)
A = np.zeros((3,3))
A[0,0] = Q0**2 * np.sum(M2N**2)
A[1,1] = U0**2 * np.sum(M3N**2)
A[2,2] = V0**2 * np.sum(M4N**2)
A[0,1] = Q0 * U0 * np.sum(M3N * M2N)
A[1,0] = A[0,1]
A[0,2] = Q0 * V0 * np.sum(M4N * M2N)
A[2,0] = A[0,2]
A[1,2] = U0 * V0 * np.sum(M4N * M3N)
A[2,1] = A[1,2]
b = np.zeros(3)
b[0] = 0.5 * Q0 * np.sum(M2N * (self.signalIntegrated[0]-self.signalIntegrated[1])) - \
Q0**2 * np.sum(M2One * M2N) - Q0 * U0 * np.sum(M3One * M2N) - Q0 * V0 * np.sum(M4One * M2N)
b[1] = 0.5 * U0 * np.sum(M3N * (self.signalIntegrated[0]-self.signalIntegrated[1])) - \
U0 * Q0 * np.sum(M2One * M3N) - U0**2 * np.sum(M3One * M3N) - U0 * V0 * np.sum(M4One * M3N)
b[2] = 0.5 * V0 * np.sum(M4N * (self.signalIntegrated[0]-self.signalIntegrated[1])) - \
V0 * Q0 * np.sum(M2One * M4N) - V0 * U0 * np.sum(M3One * M4N) - V0**2 * np.sum(M4One * M4N)
betaI = np.abs((0.5 * I0 * np.sum(M1N * (self.signalIntegrated[0]+self.signalIntegrated[1])) - \
I0**2 * np.sum(M1One * M1N)) / (I0**2 * np.sum(M1N**2)))
betaQ, betaU, betaV = np.abs(np.linalg.solve(A,b))
if (loop % 50 == 0):
print "It {0:4d} - l2={1:10.3e} - l1={2:10.4f} - l0={3:5.1f}% - I={4:11.5f} - Q/I={5:11.5f} - U/I={6:11.5f} - V/I={7:11.5f} - bI={8:11.5f} - bQ={9:11.5f} - bU={10:11.5f} - bV={11:11.5f}".format(loop, normResidual,
normSolutionL1, 100.0*normSolutionL0 / self.nSteps, I0, Q0/I0, U0/I0, V0/I0, betaI, betaQ, betaU, betaV)
normL2.append(normResidual)
normL1.append(normSolutionL1)
normL0.append(normSolutionL0)
return x, (I0, Q0, U0, V0), (betaI, betaQ, betaU, betaV), normL2, normL1, normL0
def demodulateTrivial(self):
forwI = self.sparseM[0].dot(np.zeros(self.nSteps)+1.0)
forwQ = self.sparseM[1].dot(np.zeros(self.nSteps)+1.0)
forwU = self.sparseM[2].dot(np.zeros(self.nSteps)+1.0)
forwV = self.sparseM[3].dot(np.zeros(self.nSteps)+1.0)
I0 = 0.5 * np.sum(forwI * (self.signalIntegrated[0]+self.signalIntegrated[1])) / np.sum(forwI**2)
A = np.zeros((3,3))
A[0,0] = np.sum(forwQ**2)
A[1,1] = np.sum(forwU**2)
A[2,2] = np.sum(forwV**2)
A[0,1] = np.sum(forwQ * forwU)
A[1,0] = A[0,1]
A[0,2] = np.sum(forwQ * forwV)
A[2,0] = A[0,2]
A[1,2] = np.sum(forwU * forwV)
A[2,1] = A[1,2]
b = np.zeros(3)
b[0] = 0.5 * np.sum(forwQ * (self.signalIntegrated[0]-self.signalIntegrated[1]))
b[1] = 0.5 * np.sum(forwU * (self.signalIntegrated[0]-self.signalIntegrated[1]))
b[2] = 0.5 * np.sum(forwV * (self.signalIntegrated[0]-self.signalIntegrated[1]))
Q0, U0, V0 = np.linalg.solve(A,b)
return I0, Q0, U0, V0
# totalTime = 1.0 # s
# dt = 0.001 # s
# dtIntegration = 0.01 #s
# beta = np.asarray([15.0, 100.0, 100., 100.0])
# stokes = np.asarray([1.0, 1.2e-3, 5.e-3, 0.001])
# out = randomDemodulator(totalTime, dt, dtIntegration, stokes, beta, seed=123, signalToNoise=1e3)
# coefFourier, stokes, beta, normL21, normL11 = out.FISTA(thresholdMethod = 'soft', niter = 600, lambdaValue = 0.000000051)
# stI, stQ, stU, stV = out.demodulateTrivial()
# print "Q/I_original={0} - Q/I_inferred={1} - Q/I_trivial={2} - diff={3}".format(out.stokes[1] / out.stokes[0], stokes[1] / stokes[0], \
# stQ/stI, out.stokes[1] / out.stokes[0]-stokes[1] / stokes[0])
# print "U/I_original={0} - U/I_inferred={1} - U/I_trivial={2} - diff={3}".format(out.stokes[2] / out.stokes[0], stokes[2] / stokes[0], \
# stU/stI, out.stokes[2] / out.stokes[0]-stokes[2] / stokes[0])
# print "V/I_original={0} - V/I_inferred={1} - V/I_trivial={2} - diff={3}".format(out.stokes[3] / out.stokes[0], stokes[3] / stokes[0], \
# stV/stI, out.stokes[3] / out.stokes[0]-stokes[3] / stokes[0])
# pl.close('all')
# f, ax = pl.subplots(nrows=1, ncols=4, figsize=(18,6))
# coefFourier[0] = 0.0
# Nt = myIFFT(coefFourier)
# Nt /= np.sqrt(myTotalPower(coefFourier))
# stokesPar = ['I', 'Q', 'U', 'V']
# loop = 0
# for loop in range(4):
# ax[loop].plot(out.times, out.signal[loop])
# ax[loop].plot(out.times, stokes[loop] / stokes[0] * (1.0 + beta[loop]*Nt))
# ax[loop].set_xlabel('Time [s]')
# ax[loop].set_ylabel('Stokes {0}'.format(stokesPar[loop]))
# ax[loop].annotate
# pl.tight_layout()
# ax[0,0].plot(out.times, out.signal[0])
# ax[0,0].plot(out.times, stokes[0] *(1.0+beta[0]*Nt))
# ax[0,1].plot(out.signal[1])
# ax[0,1].plot(stokes[1] / stokes[0] *(1.0+beta[1]*Nt))
# ax[1,0].plot(out.signal[2])
# ax[1,0].plot(stokes[2] / stokes[0] * (1.0+beta[2]*Nt))
# ax[1,1].plot(out.signal[3])
# ax[1,1].plot(stokes[3] / stokes[0] * (1.0+beta[3]*Nt))
# ax[2,0].semilogy(np.abs(myFFT(out.seeing)))
# ax[2,0].semilogy(np.abs(myFFT(Nt)))
# ax[2,1].semilogy(normL21)
# ax[2,1].semilogy(normL11)
# ax[3,0].plot(out.signalIntegrated[0])
# ax[3,0].plot(out.signalIntegrated[1])
# ax[3,1].plot(out.seeing)
# ax[3,1].plot(Nt)
| 3.078125 | 3 |
tank/forms.py | oteejay/lms | 0 | 12796919 | <reponame>oteejay/lms
from django import forms
from django.utils.translation import gettext_lazy as _
from plant.models import Plant
from .models import Tank, Configuration
class TankForm(forms.ModelForm):
plant = forms.ModelChoiceField(queryset=Plant.objects.all(), empty_label='...select plant...')
class Meta:
model = Tank
fields = ('plant', 'capacity', 'unit', 'image')
help_texts = {
'plant': _('The plant, this tank is located in.'),
'unit': _('The unit of measurement for its capacity.'),
'image': _("A picture of the scene where the LMS device is installed.")
}
class ConfigurationForm(forms.ModelForm):
class Meta:
model = Configuration
fields = ('shape', 'width', 'length', 'height', 'major_diameter',
'minor_diameter', 'diameter', 'unit', 'alignment')
labels = {
'major_diameter': _('Major Diameter'),
'minor_diameter': _('Minor Diameter')
}
help_texts = {
'unit': _('The unit of measurement utilized for this configuration.')
}
widgets = {
'shape': forms.Select(),
'alignment': forms.Select()
}
| 2.578125 | 3 |
The container /Robotic Arm/craves.ai-master/pose/utils/evaluation.py | ReEn-Neom/ReEn.Neom-source-code- | 0 | 12796920 | <reponame>ReEn-Neom/ReEn.Neom-source-code-
from __future__ import absolute_import
import math
import numpy as np
import matplotlib.pyplot as plt
from random import randint
import torch
from .misc import *
from .transforms import transform, transform_preds
__all__ = ['accuracy', 'AverageMeter']
def get_preds(scores):
''' get predictions from score maps in torch Tensor
return type: torch.LongTensor
'''
assert scores.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)
maxval = maxval.view(scores.size(0), scores.size(1), 1)
idx = idx.view(scores.size(0), scores.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:,:,0] = (preds[:,:,0] - 1) % scores.size(3) + 1
preds[:,:,1] = torch.floor((preds[:,:,1] - 1) / scores.size(3)) + 1
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
return preds
def calc_dists(preds, target, normalize):
preds = preds.float()
target = target.float()
dists = torch.zeros(preds.size(1), preds.size(0))
for n in range(preds.size(0)):
for c in range(preds.size(1)):
if target[n,c,0] > 1 and target[n, c, 1] > 1:
dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
if dists.ne(-1).sum() > 0:
return dists.le(thr).eq(dists.ne(-1)).sum().numpy() / dists.ne(-1).sum().numpy()
else:
return -1
def accuracy(output, target, idxs, thr=0.5):
''' Calculate accuracy according to PCK, but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs', followed by individual accuracies
'''
preds = get_preds(output)
gts = get_preds(target)
norm = torch.ones(preds.size(0))*output.size(3)/4.0
dists = calc_dists(preds, gts, norm)
acc = torch.zeros(len(idxs)+1)
avg_acc = 0
cnt = 0
for i in range(len(idxs)):
acc[i+1] = dist_acc(dists[idxs[i]-1], thr=thr)
if acc[i+1] >= 0:
avg_acc = avg_acc + acc[i+1]
cnt += 1
if cnt != 0:
acc[0] = avg_acc / cnt
return acc
def final_preds_bbox(output, bbox, res):
preds = get_preds(output) # float typ
preds = preds.numpy()
for i in range(preds.shape[0]):
width = bbox[2][i] - bbox[0][i]
height = bbox[3][i] - bbox[1][i]
for j in range(preds.shape[1]):
preds[i, j, :] = preds[i, j, :] / res * np.array([width, height]) + np.array([bbox[0][i], bbox[0][i]])
return torch.from_numpy(preds)
def final_preds(output, center, scale, res):
coords = get_preds(output) # float type
# pose-processing
for n in range(coords.size(0)):
for p in range(coords.size(1)):
hm = output[n][p]
px = int(math.floor(coords[n][p][0]))
py = int(math.floor(coords[n][p][1]))
if px > 1 and px < res[0] and py > 1 and py < res[1]:
diff = torch.Tensor([hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1]-hm[py - 2][px - 1]])
coords[n][p] += diff.sign() * .25
coords[:, :, 0] += 0.5
coords[:, :, 1] -= 0.5
preds = coords.clone()
# Transform back
for i in range(coords.size(0)):
preds[i] = transform_preds(coords[i], center[i], scale[i], res)
if preds.dim() < 3:
preds = preds.view(1, preds.size())
return preds
def d3_acc(preds, gts, percent = .5):
num_samples = len(preds)
acc = np.zeros_like(preds[0])
hit = 0
# miss_list = []
max_error_list = [] #max angle error for each image
res_list = []
for i in range(num_samples):
pred = np.array(preds[i])
gt = np.array(gts[i])
res = np.abs(pred - gt)
res[0:7] = np.abs((res[0:7] + 180.0) % 360.0 - 180.0)
max_error_list.append(np.max(res[0:4]))
res_list.append(res)
# if not np.any(res[0:4]>10): #false prediction
# acc += res
# hit = hit + 1
# else:
# miss_list.append(i)
top_n = int(percent * num_samples) #take top N images with smallesr error.
sorted_list = np.argsort(max_error_list)
for i in range(top_n):
acc += res_list[sorted_list[i]]
return (acc/top_n)[:4]
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 2.328125 | 2 |
scripts/dcs.py | chebroluharika/SDK_Automation_Generator | 0 | 12796921 | <reponame>chebroluharika/SDK_Automation_Generator
import requests
import json
import logging
import time
from copy import deepcopy
import platform # For getting the operating system name
import subprocess # For executing a shell command
rest_call_timeout_sec = 60
max_retries_in_session = 50
polling_call_timeout_sec = 10 * 60
sec_between_polling = 10
class FirstTimeSetUp(object):
def __init__(self, appliance_name, use_ip=False, override_version=None, retries=max_retries_in_session, appliance_dhcp_ip_address=None,
post_eula_delay=0, use_static_ip=False, use_one_ip=False, ipv6_type=None, ova_ip=None, host_data=None):
"""Constructor method to RestAppliance.
We compute the correct API-Version for REST calls.
The version can be overridden by passing in a value for override_version.
Ideally, this computation should be moved to a default in the datastore.
Parameters
----------
appliance_name : str
fully qualified name of the appliance
use_ip : bool
Flag to indicate if an IP address is being passed instead of DNS hostname.
override_version (Optional): int
The default version string (determined by program name) can be overridden.
Currently only accepting values of 100, 199, and 200.
retries (Optional): int
Number of retries to do in HTTP session.
appliance_dhcp_ip_address : str
appliance dhcp ip address, will be used to connect to the appliance if not None
"""
self.fqdn = appliance_name
if appliance_dhcp_ip_address:
self.base_url = "https://" + appliance_dhcp_ip_address
else:
self.base_url = "https://" + appliance_name
self.use_ip = use_ip
# create a persistant session so that we can retry if the appliance goes offline (e.g. first time setup)
self.sess = requests.Session()
self.retries = retries
adap = requests.adapters.HTTPAdapter(max_retries=self.retries)
self.sess.mount('http://', adap)
self.sess.mount('https://', adap)
# if version is passed in, use that. Else use the default for the program
# Default to the minimal version number that implements all the requirements that we need. Defined per program.
# Eventually we may need version overrides at each REST call.
if override_version:
self.api_version = override_version
else:
self.api_version = 120
logging.info("The API Version utilized is {0}.".format(self.api_version))
self._header = {'X-API-Version': '{}'.format(self.api_version), 'Content-Type': 'application/json'}
self._secure_header = {}
def get_secure_headers(self):
"""Helper method to appliance_request().
Gives header information required by the appliance with authentication information.
Return
------
_secure_header: dict. Dictionary containing X-API-Verions, Content-Type, and Auth. The Auth parameter value is a sessionID.
"""
# Once _secure_header is defined, we can use it over and over again for the duration of its life.
# Note, the header is only good for that user (administrator), 24 hours, and until the next reboot.
if self._secure_header:
return self._secure_header
payload = {"userName": "Administrator", "password": "<PASSWORD>"}
url = '/rest/login-sessions'
try:
r = self.sess.post(self.base_url + url, verify=False, headers=self._header, data=json.dumps(payload), timeout=rest_call_timeout_sec)
except requests.exceptions.RequestException as e:
raise Exception("There was an issue connecting to the appliance to get headers. Exception message: {0}".format(e))
except Exception as e:
raise Exception("There was an issue with the HTTP Call to get headers. Exception message: {0}".format(e))
if r.status_code >= 300:
raise Exception('Failure to get secure connection. Status {0}.'.format(r.status_code))
try:
safe_json = r.json()
self._secure_header = self._header.copy()
self._secure_header['Auth'] = safe_json.get('sessionID')
if self._secure_header['Auth'] is None:
raise Exception('Auth token for the header is undefined. No Session ID available. Status: {0}.'.format(r.status_code))
return self._secure_header
except ValueError as e:
raise Exception('Failure to get a JSON value from the response. Status: {0}.'.format(r.status_code))
except:
raise Exception('Failure to access the sessionID from the response. Status: {0}. JSON: {1}'.format(r.status_code, r.json()))
def appliance_request(self, request_type, url, secure=True, payload=None, other_properties={}, extra_headers={}, poll=True,
timeout=None):
"""Helper method to call HTTP requests.
An exception will be raised if an unknown value for request_type is utilized.
Exceptions will also be raised if the appliance cannot be contacted.
If secure=True, this function depends on get_secure_headers().
Parameters
----------
request_type: str
accepted values are: "POST", "PUT", "GET", "DELETE"
Any other value will raise an error.
url: str
url location of the REST call. This method concatenates https:// and the fully qualified domain name of the system with this string.
secure (optional): boolean
True requests data adding a header that includes authentiation information.
False requests data without authentication information
no value defaults in True
payload (optional): dict
Python object payload for POST or PUT calls, to be serialized into JSON.
other_properties (optional): dict
A dictionary of extra properties that we can give to the Python Request module.
The dictionary is unpacked and added to Request.
For example: other_properties={'stream': True}
poll : boolean
If false, polling tasks will return immiedtaly - needed for failover setups
timeout: None or integer
Defaults to rest_call_timeout_sec if 'None' or unspecified
Return
------
return (success, r, safe_json_result, polling_results)
A tuple with these values:
success: bool. A True/False value. True indicates that the status_code was under 300 and the polling was successful.
r: a response object from a Requests call.
safe_json_results: the JSON returned from the HTTP request. None if the request did not return a JSON value.
polling_results: dict. dictionary with two values, task_state and task_status. Both are populated whenever the call requires task polling.
"""
if timeout is None:
timeout = rest_call_timeout_sec
if not secure:
head = self._header
else:
head = self.get_secure_headers()
head = dict(head.items() + extra_headers.items())
full_url = self.base_url + url
logging.debug("Preparing HTTP {0} request.".format(request_type))
logging.debug("Preparing URL: {0}.".format(full_url))
logging.debug("Preparing Headers: {0}.".format(head))
logging.debug("Preparing Payload: {0}.".format(json.dumps(payload)))
polling_results = {}
try:
if request_type == "POST":
r = self.sess.post(full_url, verify=False, headers=head, data=json.dumps(payload), timeout=timeout, **other_properties)
elif request_type == "PUT":
r = self.sess.put(full_url, verify=False, headers=head, data=json.dumps(payload), timeout=timeout, **other_properties)
elif request_type == "GET":
r = self.sess.get(full_url, verify=False, headers=head, timeout=timeout, **other_properties)
elif request_type == "DELETE":
r = self.sess.delete(full_url, verify=False, headers=head, timeout=timeout, **other_properties)
else:
raise Exception("RestAppliance attempted to call an http request other than POST, PUT, or GET. request_type: {0}. url: {1}".format(request_type, url))
try:
safe_json_result = r.json()
except:
safe_json_result = {}
logging.debug("Returned. Status code: {0}.".format(r.status_code))
logging.debug("Returned. JSON: {0}.".format(safe_json_result))
# 202 status codes indicate a task that is pollable. The calling function may not know that this will return a 202.
success = False
if r.status_code == 202:
if not poll:
return (True, r, safe_json_result, {'task_state': 'N/A', 'task_status': 'N/A'})
(task_state, task_status) = self.poll_for_task(url, r)
polling_results = {'task_state': task_state,
'task_status': task_status}
if task_state == "Completed":
success = True
elif self.use_ip and task_state == "Warning":
#This is required sicne FTS will try to validate the hostname and if it is not valid hostname then it will
#the warning for post-validation status.
success = True
elif r.status_code < 300:
success = True
else:
polling_results = {'task_state': safe_json_result.get('errorCode', 'Error'),
'task_status': safe_json_result.get('details', str(safe_json_result))}
return (success, r, safe_json_result, polling_results)
except requests.exceptions.RequestException as e:
raise Exception("There was an issue connecting to the appliance. Exception message: {0}".format(e))
except Exception as e:
raise Exception("There was an issue with the HTTP Call. Exception message: {0}".format(e))
def accept_eula_once(self, service_access="yes"):
"""On initial communication with the appliance, the end user service agreement (EULA) must be accepted.
This only needs to occur once. Additional calls will not change the status of the EULA nor the status of the service access.
If a change to the service access is required, see the function change_service_access()
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
No authentication on the appliance is required.
Parameters
----------
service_access (optional): str
"yes" will accept service access
"no" will not allow service access
empty value will default to "yes"
"""
url = '/rest/appliance/eula/status'
(_, _, json_result, _) = self.appliance_request(request_type='GET', url=url, secure=False)
if not json_result: # if False, eula acceptance has already occurred.
logging.warning('EULA does not need to be saved.')
else:
logging.debug('Call EULA Acceptance with enable service access={0}'.format(service_access))
url = '/rest/appliance/eula/save'
payload = {"supportAccess": service_access}
(save_success, save_resp, save_json_response, _) = self.appliance_request(request_type='POST', url=url, secure=False, payload=payload)
if save_success:
logging.info('EULA Accepted.')
else:
raise Exception('accept_eula failed. Status: {0}. JSON Response: {1}'.format(save_resp.status_code, json.dumps(save_json_response, sort_keys=True, indent=4, separators=(',', ': '))))
def accept_eula(self, service_access="yes", tries=3, retry_interval=5):
thistry = 1
while True:
logging.info("accept_eula try {}".format(thistry))
try:
self.accept_eula_once(service_access=service_access)
return
except Exception as e:
logging.exception(e)
if thistry >= tries:
raise e
time.sleep(retry_interval)
thistry += 1
def change_administrator_password(self):
"""On initial logon, the administrator's password has to be changed from the default value.
The call to the administrator password change is attempted.
If the change administrator password call fails, then we attempt to login with the administrator password.
If successful, we log a message and the accurate administrator password.
If the administrator login is not successful, an error is raised.
The administrator data is pulled from the dictionary in this file. This needs to be moved to a more formal location.
Parameters
----------
none
"""
# The changePassword REST end point only works for the initial administrator password change.
url = '/rest/users/changePassword'
payload = {"userName": initial_admin,
"oldPassword": <PASSWORD>,
"newPassword": <PASSWORD>}
(success, resp, json_response, _) = self.appliance_request(request_type='POST', url=url, secure=False, payload=payload)
if success:
logging.info('Administrator password change was accepted.')
elif resp.status_code == 400:
logon_url = '/rest/login-sessions'
logon_payload = {"userName": admin_user, "password": <PASSWORD>}
(logon_success, _, _, _) = self.appliance_request(request_type='POST', url=logon_url, secure=False, payload=logon_payload)
if not logon_success:
raise Exception('change_administrator_password failed. Status: {0}. JSON Response: {1}'.format(resp.status_code, json.dumps(json_response, sort_keys=True, indent=4, separators=(',', ': '))))
logging.warning('Administrator password has already been changed. Password is {0}'.format(<PASSWORD>))
else:
raise Exception('change_administrator_password failed. Status: {0}. JSON Response: {1}'.format(resp.status_code, json.dumps(json_response, sort_keys=True, indent=4, separators=(',', ': '))))
def get_mac(self):
"""Request the MAC address from the appliance. Use the first one found in applianceNetworks collection.
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
Parameters
----------
none
Return
------
mac address: string
"""
json_answer = self.get_networking_data()
for network in json_answer.get('applianceNetworks', []):
mac_address = network.get('macAddress')
if mac_address:
logging.info('MAC Address is: {0}'.format(mac_address))
return mac_address
raise Exception('MAC Address is not defined')
def get_ipv4_name_servers(self):
"""Request the list of dns ipv4 name servers. Use the first one found in applianceNetworks collection.
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
Parameters
----------
none
Return
------
list of ipv4 name servers: list
"""
json_answer = self.get_networking_data()
for network in json_answer.get('applianceNetworks', []):
ipv4_name_servers = network.get('ipv4NameServers')
if ipv4_name_servers:
logging.info('IPv4 Name servers: {0}'.format(ipv4_name_servers))
return ipv4_name_servers
raise Exception('IPv4 Name server is not defined')
def get_networking_data(self):
"""Request the networking information from the appliance.
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
Parameters
----------
none
Return
------
a response object from a call to the network page.
"""
url = '/rest/appliance/network-interfaces'
(success, resp, json_response, _) = self.appliance_request(request_type='GET', url=url, secure=True)
if not success:
raise Exception('get_networking_data call failed. Status: {0}. JSON Response: {1}'.format(resp.status_code, json.dumps(json_response, sort_keys=True, indent=4, separators=(',', ': '))))
return json_response
def get_time_locale_data(self):
"""Request the networking information from the appliance.
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
Parameters
----------
none
Return
------
a response object from a call to the network page.
"""
url = "/rest/appliance/configuration/time-locale"
(success, resp, json_response, _) = self.appliance_request(request_type='GET', url=url, secure=True)
if not success:
raise Exception('get_time_locale_data call failed. Status: {0}. JSON Response: {1}'.format(resp.status_code, json.dumps(json_response, sort_keys=True, indent=4, separators=(',', ': '))))
return json_response
def set_time_server_and_locale(self, ntpserver):
"""
If the time definition is not part of the network-interfaces JSON, it is set via an independent REST endpoint.
:param ntpserver: IP address of the ntpserver.
:return:
:raises: Exception, Exception
"""
time_locale_url = "/rest/appliance/configuration/time-locale"
# Query for current time-locale setting.
time_locale_settings = self.get_time_locale_data() # Exception will be raised by method if it fails.
time_locale_settings["dateTime"] = None # our time is not necessarily the NTP time, so don't set it.
time_locale_settings["ntpServers"] = [str(ntpserver)] # use the defined NTP server and only it.
(ntp_success, _, ntp_rjson, ntp_polling_results) = self.appliance_request(request_type='POST',
url=time_locale_url,
secure=True,
payload=time_locale_settings)
if not ntp_success:
logging.error(json.dumps(ntp_rjson, sort_keys=True, indent=4, separators=(',', ': ')))
if 'Wait until the operation completes' in ntp_polling_results.get("task_status"):
raise Exception(
'time-locale setting failed. Polling State: {0}. Polling Status: {1}. '.format(
ntp_polling_results.get("task_state"), ntp_polling_results.get("task_status")))
else:
raise Exception(
'time-locale setting failure. Polling State: {0}. Polling Status: {1}. '.format(
ntp_polling_results.get("task_state"), ntp_polling_results.get("task_status")))
logging.info("NTP server setting was successful")
def poll_for_task(self, calling_url, response):
'''Helper method to appliance_request().
Status Response 202 indicates an asynchronous REST call. Poll until the task is complete or error.
Adds to the set of parameters that appliance_request() returns.
Parameters
----------
calling_url : string
The URL that was called in appliance_request.
response : a response object from a Requests call.
Return
------
tuple containing:
task_state: str. A short summary of the execution/completion status
task_status: str. State of the task. For Example: Unknown, Running, Terminated, Error, Warning, Completed.
'''
# network-interfaces is a special case. Rather than network-interfaces returning a object of type TaskResourceV2, this end point returns Void.
# From my reading of the documentation, this is not consistant with the Task Tracker mechanism. I have brought this to the attention
# of the atlas team.
#
# there are now at least two responses with the task URL in the header:
# '/rest/appliance/network-interfaces' and '/rest/appliance/configuration/time-locale'
# Go to checking for response in the header, if not there, check in the JSON. Poor consistency in
# implementations and inconsistent with REST principles.
url = response.headers.get('location')
if url is None:
url = response.json().get('uri')
if url is None:
raise Exception('Could not read the task to poll. Originating request on URL: {0}.'.format(calling_url))
full_rest_url = self.base_url + url
task_state = 'Running'
start_time = time.time()
try:
logging.debug("Starting polling the rest task {0}.".format(url))
already_reset_session = False
while task_state in ['Running', 'New', 'Pending', 'Starting']:
if time.time() >= start_time + polling_call_timeout_sec:
raise Exception('Task Polling did not respond within {0} seconds. Time out and exit. Originating request on URL: {1}'.format(polling_call_timeout_sec, calling_url))
time.sleep(sec_between_polling)
r_tree = None
try:
logging.debug("Current Time {0}".format(time.asctime(time.localtime(time.time()))))
r_tree = self.sess.get(full_rest_url + "?view=tree", verify=False, headers=self.get_secure_headers(), timeout=rest_call_timeout_sec)
except Exception as e:
logging.exception("FTS get failed: " + str(e))
if already_reset_session:
raise Exception("There was an issue with the HTTP Call for task polling. Exception message: {0}".format(e))
# delete and recreate of the session if it loses connection. Changes in IP address, FQDN, etc can make use lose the session.
else:
already_reset_session = True
self.sess.close()
self.sess = requests.Session()
adap = requests.adapters.HTTPAdapter(max_retries=self.retries)
self.sess.mount('http://', adap)
self.sess.mount('https://', adap)
if r_tree:
r_treejson = r_tree.json()
task_resource = r_treejson.get('resource')
task_state = task_resource.get('taskState')
task_status = task_resource.get('taskStatus', '')
task_errors = task_resource.get('taskErrors', None)
if task_errors:
# in case of errors place them in log output and append to status message
for e in task_errors:
logging.error(e)
task_status += ";" + ";".join([str(e) for e in task_errors])
logging.debug("Percent Complete : {0}. State: {1}. Status: {2}.".format(task_resource.get('percentComplete'), task_state, task_status))
logging.debug("The task tree for {0} is:".format(full_rest_url))
logging.debug("Returned JSON: {0}".format(r_treejson))
else:
logging.debug("Exception during get call, response was not set")
logging.debug("Unable to get the task tree for {0}".format(full_rest_url))
return(task_state, task_status)
except ValueError as e:
raise Exception('Error getting the JSON results from the task. Originating request on URL: {0}. Exception: {1}'.format(calling_url, e))
except Exception as e:
raise Exception('Error in polling for the task. Originating request on URL: {0}. Exception: {1}'.format(calling_url, e))
def first_time_setup(self, ntpserver, use_static_ip=False, use_tbird_fts=False, use_i3s_fts=False, use_one_ip=False, ipv6_type=None, ova_ip=None, host_data=None):
"""Creates networking for the appliance.
Configures the appliance as DHCP.
The appliance queries itself to define its macAddress.
If the api_version is above version 100, this will set a DNS Server IP address and set
the overrideIpv4DhcpDnsServers value to False.
"ipv4NameServers": [dns_server_ip],
"overrideIpv4DhcpDnsServers": False
If the api_version is below version 100, it does not set DNS.
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
"""
url = '/rest/appliance/network-interfaces'
if ova_ip:
networking_data = self.get_networking_data()
network = networking_data['applianceNetworks'][0]
network["hostname"] = self.fqdn
network["domainName"] = self.fqdn.split(".", 1)[1]
network['ipv4Type'] = "STATIC"
network["searchDomains"] = None
# this is what UI does so we follow
network["aliasDisabled"] = True
network["overrideIpv4DhcpDnsServers"] = False
if network["app1Ipv4Addr"] == network["virtIpv4Addr"]:
network["virtIpv4Addr"] = ''
network["app1Ipv4Addr"] = ova_ip
network["app2Ipv4Addr"] = ''
else:
raise Exception("Impossible happened: app1Ipv4Addr != virtIpv4Addr")
network["ipv6Type"] = "UNCONFIGURE"
payload = networking_data
elif use_static_ip:
networking_data = self.get_networking_data()
networks = []
for network in networking_data['applianceNetworks']:
if network["ipv4Type"] == "DHCP":
network['ipv4Type'] = "STATIC"
network['ipv6Type'] = "UNCONFIGURE"
networks.append(network)
if use_i3s_fts:
network['ipv6Type'] = "UNCONFIGURE"
network["overrideIpv4DhcpDnsServers"] = False
network['virtIpv4Addr'] = None
networks.append(network)
appliance_domain_name = self.fqdn.split(".", 1)[1]
if appliance_domain_name not in network["hostname"]:
network["hostname"] = network["hostname"] + '.' + appliance_domain_name
logging.info("Setting fqdn for the appliance:{0}".format(network["hostname"]))
networking_data['applianceNetworks'] = networks
networking_data.pop('serverCertificate', None)
payload = networking_data
else:
appliance_mac_address = self.get_mac()
ipv4_name_servers = self.get_ipv4_name_servers()
# Only ipv6 with DHCP is supported, will add static at some point later.
if ipv6_type != "DHCP":
ipv6_type = "UNCONFIGURE"
payload = {"type": "ApplianceServerConfiguration",
"applianceNetworks": [{"ipv4Type": "DHCP",
"ipv6Type": ipv6_type,
"macAddress": appliance_mac_address,
"hostname": self.fqdn,
"device": "eth0",
"ipv4NameServers": ipv4_name_servers,
"overrideIpv4DhcpDnsServers": False,
"ipv4Subnet": "",
"ipv4Gateway": None,
"confOneNode": True,
"activeNode": 1
}
],
}
# Not clear why this conditions fails to query the network interface to get the base for the payload, but
# we need to know if the network definition has the "time" dictionary defined in it; if it does, copy into
# place for the test below this set of conditional branches. Since both get_mac and get_ipv4_name_servers
# use calls to get_networking_data, this seems poorly designed, but given how complex the code is in this
# area and the lack of documented test cases, this seems to be the safest change to make.
old_network = self.get_networking_data()
if "time" in old_network:
payload["time"] = deepcopy(old_network["time"])
# This is the later data model, where the time server and locale are set via their own API
# This will embed another REST process using POST inside the generation of a POST to do the network setup.
self.set_time_server_and_locale(ntpserver)
poll = True
# Do not poll for the task if a ova_ip is passed in. If a ova_ip is passed in then we will lose connection
# to the orginal location after the POST command to set the networking. Instead, we will change to the new
# address and then poll for the task.
if ova_ip:
poll = False
(success, response, rjson, polling_results) = self.appliance_request(request_type='POST', url=url, secure=True, payload=payload, poll=poll)
# Reset the base url to the the fqdn for any subsequent rest actions and then use the fqdn to make sure the
# networking setup task completes successfully. This needs to be done for OVA's that are deployed as DHCP but,
# need to be configured as static since the ip address will change after setting up the networking.
if ova_ip and success:
self.base_url = "https://" + self.fqdn
(task_state, task_status) = self.poll_for_task(url, response)
polling_results = {'task_state': task_state,
'task_status': task_status}
if task_state == "Completed":
success = True
else:
success = False
if not success:
logging.error(json.dumps(rjson, sort_keys=True, indent=4, separators=(',', ': ')))
if 'Wait until the operation completes' in polling_results.get("task_status"):
raise Exception('first_time_setup failure. Polling State: {0}. Polling Status: {1}. '.format(polling_results.get("task_state"), polling_results.get("task_status")))
else:
raise Exception('first_time_setup failure. Polling State: {0}. Polling Status: {1}. '.format(polling_results.get("task_state"), polling_results.get("task_status")))
logging.info("First time setup was successful")
logging.debug(json.dumps(self.get_networking_data(), indent=2, sort_keys=True))
def readIPV6FromFile(self):
ipv6address = []
vm_network_json_file = open('vm_network.json',)
data = json.load(vm_network_json_file)
# Iterating through the json list
for i in data['results'][0]['msg']:
for j in data['results'][0]['msg'][i]['ipv6']:
ipv6address.append(j)
# Closing file
vm_network_json_file.close()
return ipv6address
def ping(hosts):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
operating_sys = platform.system().lower()
for i in hosts:
param = '-n' if operating_sys=='windows' else '-c'
shell_needed = True if operating_sys == 'windows' else False
# Building the command. Ex: "ping -c 1 google.com"
ping_command = ['ping', param, '1', i]
ping_output = subprocess.run(ping_command,shell=shell_needed,stdout=subprocess.PIPE)
success = ping_output.returncode
if success == 0:
return (str(i)+str('%ens160'))
if __name__ == '__main__':
post_eula_delay = '10'
ntpserver = "10.10.10.10"
ipv6address = readIPV6FromFile()
pingableipv6 = ping(ipv6address)
ra = FirstTimeSetUp("appliance_name", override_version=3200, use_ip=False,
appliance_dhcp_ip_address=None,
post_eula_delay=post_eula_delay, use_static_ip=False,use_one_ip=False,
ipv6_type=None, ova_ip=None, host_data=None)
ra.accept_eula("yes")
logging.info("Sleeping {0} seconds to wait for appliance to stabilize".format(post_eula_delay))
time.sleep(post_eula_delay)
ra.change_administrator_password()
ra.first_time_setup(ntpserver, use_static_ip=False,
use_i3s_fts=False, use_one_ip=False, ipv6_type=None,
ova_ip=None, host_data=None)
| 2.375 | 2 |
drug/views.py | Ctrl-plus-C/Chiron | 20 | 12796922 | <gh_stars>10-100
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK
)
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import authenticate
from .models import Nutrient, Record, Symptomrecord, Diseaserecord, Foodrecord, Foodlist, Selfcarediary
from .serializers import NutrientsSerializer
from rest_framework.views import APIView
from rest_framework import permissions, status
import infermedica_api
# import Symp
from .serializers import SelfcarediarySerializer
import requests,json
infermedica_api.configure(app_id='945555e1', app_key='be2ee424c225c567086a084637a359de')
def home(request):
if request.user.is_authenticated():
return render(request, 'drug/home.html',{})
return redirect('accounts/login')
def loginpage(request):
return render(request, 'drug/login.html', {})
def search(symptom):
api = infermedica_api.get_api()
data = api.search(symptom["orth"])
return data
def nutrients(request):
if request.user.is_authenticated():
return render(request, 'drug/nutrients.html', {})
return redirect('accounts/login')
def selfdiary(request):
if request.user.is_authenticated():
return render(request, 'drug/selfdiary.html', {})
return redirect('accounts/login')
def analytics(request):
if request.user.is_authenticated():
return render(request, 'drug/analytics.html', {})
return redirect('accounts/login')
class Prescription(APIView):
@csrf_exempt
def post(self,request):
medicname = request.data.get("text")
# import pdb; pdb.set_trace()
data = requests.get("https://api.fda.gov/drug/label.json?search="+medicname).json()
return Response(data, status=status.HTTP_200_OK)
def medication(request):
if request.user.is_authenticated():
return render(request, 'drug/medication.html', {})
return redirect('accounts/login.html')
class ParseD(APIView):
@csrf_exempt
def post(self,request):
sentence = request.data.get("text")
dbrow = Record(user=request.user,search_query=sentence)
dbrow.save()
api = infermedica_api.get_api()
response = api.parse(sentence).to_dict()["mentions"]
mysymptomlist = []
templist = {}
print("reached templist")
for data in response:
templist["orth"] = data["orth"]
templist["id"] = data["id"]
mysymptomlist.append(templist.copy())
finalsearchdata = []
print("reached finalserach")
for symptom in mysymptomlist:
callsearchdata = api.search(symptom['orth'])
finalsearchdata.extend(callsearchdata)
finaldict = {}
print("conversion")
for dictdata in finalsearchdata:
finaldict[dictdata['label']] = dictdata['id']
symprow = Symptomrecord(user_record=dbrow,present_symptoms=dictdata['label'],present_symptoms_id=dictdata['id'])
symprow.save()
return Response(finaldict, status=status.HTTP_200_OK)
class Condition(APIView):
@csrf_exempt
def post(self, request):
api = infermedica_api.API(app_id='945555e1', app_key='be2ee424c225c567086a084637a359de')
# r = infermedica_api.Diagnosis(app_id='945555e1', app_key='be2ee424c225c567086a084637a359de')
data = api.conditions_list()
# r = requests.post(url, data=json.dumps({'text': text}),headers={'Authorization': apiKey, 'Content-Type': 'application/json'})
return Response({"test":data}, status=status.HTTP_200_OK)
# class Search(APIView):
class Diagnosis(APIView):
@csrf_exempt
def post(self,request):
try:
present_symptoms = request.data.getlist('choices[]')
absent_symptoms = request.data.getlist('unchoices[]')
except AttributeError:
present_symptoms = request.data.get('choices')
absent_symptoms = request.data.get('unchoices')
query_text = request.data.get('queryText')
recordobject = Record.objects.get(user=request.user,search_query=query_text)
api = infermedica_api.get_api()
re = infermedica_api.Diagnosis(sex=request.data.get("gender"), age=request.data.get("age"))
for symptom in present_symptoms:
re.add_symptom(symptom, 'present')
for symptom in absent_symptoms:
re.add_symptom(symptom, 'absent')
re= api.diagnosis(re).to_dict()
for dictdata in re['conditions']:
diseaseobject = Diseaserecord(user_record=recordobject, probable_diseases=dictdata['name'], probable_diseases_id=dictdata['id'])
diseaseobject.save()
return Response({"test":re}, status=status.HTTP_200_OK)
# call diagnosis
class Symptom(APIView):
@csrf_exempt
def post(self,request):
api = infermedica_api.get_api()
response = api.parse(sentence).to_dict()["mentions"]
# import pdb; pdb.set_trace()
mysymptomlist = {}
for data in response:
mysymptomlist["orth"] = data["orth"]
mysymptomlist["id"] = data["id"]
data.append(api.symptom_details(mysymptomlist["id"]))
return Response({"test":data},status=status.HTTP_200_OK)
# @csrf_exempt
# @api_view(["POST"])
# @permission_classes((AllowAny,))
# def login(request):
# username = request.data.get("username")
# password = request.data.get("password")
# if username is None or password is None:
# return Response({'error': 'Please provide both username and password'},
# status=HTTP_400_BAD_REQUEST)
# user = authenticate(username=username, password=password)
# if not user:
# return Response({'error': 'Invalid Credentials'},
# status=HTTP_404_NOT_FOUND)
# token, restdetails = Token.objects.get_or_create(user=user)
# return Response({'token': token.key, "hasuraid": user.id},
# status=HTTP_200_OK)
# @csrf_exempt
# @api_view(["GET"])
# def sample_api(request):
# data = {'sample_data': 123}
# return Response(data, status=HTTP_200_OK)
class HeartRateApi(APIView):
@csrf_exempt
def get(self, request):
try:
heartrate = HeartRate.objects.all()
hserializer = HeartRateSerializer(heartrate)
heartrate_data = hserializer.data
return Response(heartrate_data, status=status.HTTP_200_OK)
except:
return Response({'success': False, 'message': 'No details found for given date'}, status=status.HTTP_400_BAD_REQUEST)
@csrf_exempt
def post(self, request, user):
request_data = request.data.copy()
request_data['user'] = user
singleroomaval = request_data.get('singleroomaval','')
doubleroomaval = request_data.get('doubleroomaval','')
if singleroomaval != '':
if int(singleroomaval) > 5 or int(singleroomaval) < 0:
return Response({"success": False,"message": "Availability must be between 0 and 5."}, status=status.HTTP_400_BAD_REQUEST)
if doubleroomaval != '':
if int(doubleroomaval) > 5 or int(doubleroomaval) < 0:
return Response({"success": False,"message": "Availability must be between 0 and 5."}, status=status.HTTP_400_BAD_REQUEST)
try:
booking = Booking.objects.get(date=datebooking)
bserializer = BookingSerializer(booking, data=request_data, partial=True)
except:
bserializer = BookingSerializer(data=request_data)
if bserializer.is_valid():
bserializer.save()
return Response(bserializer.data, status=status.HTTP_200_OK)
return Response(bserializer.errors, status=status.HTTP_400_BAD_REQUEST)
class NutrientsApi(APIView):
@csrf_exempt
def get(self, request):
try:
nutrients = Nutrient.objects.all()
nserializer = NutrientsSerializer(nutrients)
nutrient_data = nserializer.data
return Response(nutrient_data, status=status.HTTP_200_OK)
except:
return Response({'success': False, 'message': 'No details found for given date'}, status=status.HTTP_400_BAD_REQUEST)
@csrf_exempt
def post(self, request):
request_data = request.data.copy()
request_data["user"] = request.user.pk
mealval = request_data.get('meal')
data = {
"query":mealval,
"timezone": "US/Eastern"
}
result = requests.post('https://trackapi.nutritionix.com/v2/natural/nutrients', data, headers={"x-app-id":"94f5edb6","x-app-key":"8bb3ae712275e9810ceec3b583e2727d"})
calories = 0
fat = 0
sugar = 0
protein = 0
carbs = 0
vita = 0
vitb = 0
vitc = 0
vitd = 0
vite = 0
foodlist = ""
for fooditem in result.json()["foods"]:
foodlist += fooditem["food_name"]+"; "
calories+=fooditem["nf_calories"]
fat+=fooditem["nf_total_fat"]
sugar+=fooditem["nf_sugars"]
protein+=fooditem["nf_protein"]
carbs+=fooditem["nf_total_carbohydrate"]
nutlist = fooditem["full_nutrients"]
vita+=nutlist[22]["value"]+nutlist[24]["value"]
vitb+=nutlist[38]["value"]+nutlist[40]["value"]
vitc+=nutlist[33]["value"]
vitd+=nutlist[29]["value"]
vite+=nutlist[27]["value"]
foodrecord = Foodrecord(user=request.user,search_query=mealval,calories=calories,fat=fat,sugars=sugar,protein=protein,carbohydrates=carbs,vitamina=vita,vitaminbcomplex=vitb,vitaminc=vitc,vitamind=vitd,vitamine=vite)
foodrecord.save()
for fooditem in result.json()["foods"]:
foodlistobj = Foodlist(food_record=foodrecord,food_item=fooditem["food_name"])
foodlistobj.save()
response = {
"foodlist":foodlist,
"calories":calories,
"fat":fat,
"sugars":sugar,
"protein":protein,
"carbohydrates":carbs,
"vitamina":vita,
"vitaminbcomplex":vitb,
"vitaminc":vitc,
"vitamind":vitd,
"vitamine":vite
}
# nserializer = NutrientsSerializer(data=request.data)
# if nserializer.is_valid():
# nserializer.save()
return Response(response, status=status.HTTP_200_OK)
# return Response(nserializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SelfdiaryApi(APIView):
def post(self, request):
request_data = request.data.copy()
request_data["user"] = request.user.pk
sserializer = SelfcarediarySerializer(data=request_data)
if sserializer.is_valid():
sserializer.save()
return Response(sserializer.data, status=status.HTTP_200_OK)
return Response(sserializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request):
try:
selfdiary = Selfcarediary.objects.filter(user=request.user)
resplist = []
for qset in selfdiary:
resplist.append({"diary":qset.diary,"date":qset.date})
return Response({"data":resplist}, status=status.HTTP_200_OK)
except:
return Response({"success": False}, status=status.HTTP_400_BAD_REQUEST) | 1.875 | 2 |
deeppavlov/tasks/paraphrases/__init__.py | deepmipt/kpi2017 | 3 | 12796923 | <filename>deeppavlov/tasks/paraphrases/__init__.py
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Description of the module tasks.paraphraser:
The task of the module
To provide train and test data for the agents.paraphraser module. For this the dataset with paraphrases in russian
posted on the site [1] is used. The module downloads train and test parts of the data. Then it shuffles, splits into
batches and feeds the to the agents.paraphraser module for training (feeds the whole dataset for testing). The part of
the data is reserved for the validation. Namely, the data is divided into k folds, where k equals to the
'--bagging-folds-number' parameter. Each fold is then used once as a validation while the k - 1 remaining folds form the
training set.
[1] http://paraphraser.ru/
""" | 2.15625 | 2 |
porespy/dns/__init__.py | xu-kai-xu/porespy | 0 | 12796924 | r"""
DNS
###
**Direct Numerical Simulation**
This module contains routines for performing direct numerical
simulations.
.. currentmodule:: porespy
.. autosummary::
:template: mybase.rst
:toctree: generated/
dns.tortuosity
"""
from ._funcs import *
| 1.140625 | 1 |
example/settings.py | rheinwerk-verlag/planning-poker-jira | 1 | 12796925 | <reponame>rheinwerk-verlag/planning-poker-jira<gh_stars>1-10
# Django settings for example project.
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = '<KEY>'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'planning_poker_jira/templates'),
)
ASGI_APPLICATION = 'example.routing.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'planning_poker_jira.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'channels',
'planning_poker.apps.ChannelsPresenceConfig',
'planning_poker',
'encrypted_fields',
'planning_poker_jira',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}
]
ROOT_URLCONF = 'example.urls'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
FIELD_ENCRYPTION_KEYS = [SECRET_KEY.encode().hex()]
| 1.960938 | 2 |
yolo/model/dense_prediction.py | parot-99/COVID-19-Warning-System | 2 | 12796926 | import tensorflow as tf
from .config import cfg
class YoloHead(tf.keras.layers.Layer):
def __init__(self, grid_size, classes, strides, anchors, xyscale, i):
super().__init__()
self.grid_size = grid_size
self.classes = classes
self.strides = strides
self.anchors = anchors
self.xyscale = xyscale
self.i = i
def call(self, feature_map):
batch_size = tf.shape(feature_map)[0]
conv_output = tf.reshape(
feature_map,
(batch_size, self.grid_size, self.grid_size, 3, 5 + self.classes),
)
bbox_xy, bbox_wh, detection_conf, classes_prob = tf.split(
conv_output, (2, 2, 1, self.classes), axis=-1
)
xy_grid = tf.meshgrid(
tf.range(self.grid_size), tf.range(self.grid_size)
)
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2)
xy_grid = tf.tile(
tf.expand_dims(xy_grid, axis=0),
[batch_size, 1, 1, 3, 1],
)
xy_grid = tf.cast(xy_grid, tf.float32)
bbox_xy_sigmoid = tf.sigmoid(bbox_xy)
detection_conf_sigmoid = tf.sigmoid(detection_conf)
classes_prob_sigmoid = tf.sigmoid(classes_prob)
prediction_xy = (
(bbox_xy_sigmoid * self.xyscale[self.i])
- 0.5 * (self.xyscale[self.i] - 1)
+ xy_grid
) * self.strides[self.i]
prediction_wh = tf.exp(bbox_wh) * self.anchors[self.i]
prediction_xywh = tf.concat([prediction_xy, prediction_wh], axis=-1)
prediction_prob = detection_conf_sigmoid * classes_prob_sigmoid
prediction_xywh = tf.reshape(prediction_xywh, (batch_size, -1, 4))
prediction_prob = tf.reshape(
prediction_prob, (batch_size, -1, self.classes)
)
return prediction_xywh, prediction_prob
class FilterLayer(tf.keras.layers.Layer):
def __init__(self, input_size, score_threshold=0.4):
super().__init__()
self.input_size = input_size
self.score_threshold = score_threshold
def call(self, bounding_boxes, scores):
input_size = self.input_size
score_threshold = self.score_threshold
bounding_boxes = tf.concat(bounding_boxes, axis=1)
scores = tf.concat(scores, axis=1)
scores_max = tf.math.reduce_max(scores, axis=-1)
mask = scores_max >= score_threshold
class_boxes = tf.boolean_mask(bounding_boxes, mask)
pred_conf = tf.boolean_mask(scores, mask)
class_boxes = tf.reshape(
class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]]
)
pred_conf = tf.reshape(
pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]]
)
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)
input_size = tf.cast(input_size, dtype=tf.float32)
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = (box_yx - (box_hw / 2.0)) / input_size
box_maxes = (box_yx + (box_hw / 2.0)) / input_size
boxes = tf.concat(
[
box_mins[..., 0:1],
box_mins[..., 1:2],
box_maxes[..., 0:1],
box_maxes[..., 1:2],
],
axis=-1,
)
predictions = tf.concat([boxes, pred_conf], axis=-1)
return predictions
def dense_prediction(feature_maps, classes, tiny=False):
bbox_tensors = []
prob_tensors = []
if tiny:
yolo_head_1 = YoloHead(
cfg.INPUT_SIZE // 16,
classes,
cfg.STRIDES_TINY,
cfg.ANCHORS_TINY,
cfg.XYSCALE_TINY,
0,
)(feature_maps[0])
bbox_tensors.append(yolo_head_1[0])
prob_tensors.append(yolo_head_1[1])
yolo_head_2 = YoloHead(
cfg.INPUT_SIZE // 32,
classes,
cfg.STRIDES_TINY,
cfg.ANCHORS_TINY,
cfg.XYSCALE_TINY,
1,
)(feature_maps[1])
bbox_tensors.append(yolo_head_2[0])
prob_tensors.append(yolo_head_2[1])
else:
yolo_head_1 = YoloHead(
cfg.INPUT_SIZE // 8,
classes,
cfg.STRIDES,
cfg.ANCHORS,
cfg.XYSCALE,
0,
)(feature_maps[0])
bbox_tensors.append(yolo_head_1[0])
prob_tensors.append(yolo_head_1[1])
yolo_head_2 = YoloHead(
cfg.INPUT_SIZE // 16,
classes,
cfg.STRIDES,
cfg.ANCHORS,
cfg.XYSCALE,
1,
)(feature_maps[1])
bbox_tensors.append(yolo_head_2[0])
prob_tensors.append(yolo_head_2[1])
yolo_head_3 = YoloHead(
cfg.INPUT_SIZE // 32,
classes,
cfg.STRIDES,
cfg.ANCHORS,
cfg.XYSCALE,
2,
)(feature_maps[2])
bbox_tensors.append(yolo_head_3[0])
prob_tensors.append(yolo_head_3[1])
predictions = FilterLayer(
input_size=tf.constant([cfg.INPUT_SIZE, cfg.INPUT_SIZE]),
score_threshold=0.2
)(bbox_tensors, prob_tensors)
return predictions
| 2.484375 | 2 |
experiments/bert/bert.py | 0xflotus/transfer-nlp | 0 | 12796927 | <reponame>0xflotus/transfer-nlp
import math
from typing import Tuple
import numpy as np
import pandas as pd
import torch
from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification
from torch.nn.utils import clip_grad_norm_
from torch.optim import Optimizer
from torch.optim.optimizer import required
from tqdm import tqdm
from transfer_nlp.loaders.loaders import DatasetSplits, DataFrameDataset
from transfer_nlp.loaders.vectorizers import Vectorizer
from transfer_nlp.loaders.vocabulary import Vocabulary
from transfer_nlp.plugins.config import register_plugin
tqdm.pandas()
@register_plugin
class BertVectorizer(Vectorizer):
def __init__(self, data_file: str):
super().__init__(data_file=data_file)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
df = pd.read_csv(data_file)
target_vocab = Vocabulary(add_unk=False)
for category in sorted(set(df.category)):
target_vocab.add_token(category)
self.target_vocab = target_vocab
def vectorize(self, title: str, max_seq_length: int) -> Tuple[np.array, np.array, np.array]:
tokens = self.tokenizer.tokenize(title)
tokens = ["[CLS]"] + tokens + ["[SEP]"]
token_type_ids = [0] * len(tokens)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
attention_mask += padding
token_type_ids += padding
return np.array(input_ids), np.array(attention_mask), np.array(token_type_ids)
@register_plugin
class BertDataset(DatasetSplits):
def __init__(self, data_file: str, batch_size: int, vectorizer: Vectorizer):
self.df = pd.read_csv(data_file)
np.random.shuffle(self.df.values) # Use this code in dev mode
N = 1000
self.df = self.df.head(n=N)
# preprocessing
self.vectorizer: Vectorizer = vectorizer
self.max_sequence = 0
for title in tqdm(self.df.title, desc="Getting max sequence"):
tokens = self.vectorizer.tokenizer.tokenize(text=title)
self.max_sequence = max(self.max_sequence, len(tokens))
self.max_sequence += 2
vectors = self.df['title'].progress_apply(lambda x: self.vectorizer.vectorize(title=x, max_seq_length=self.max_sequence))
self.df['input_ids'] = vectors.progress_apply(lambda x: x[0])
self.df['attention_mask'] = vectors.progress_apply(lambda x: x[1])
self.df['token_type_ids'] = vectors.progress_apply(lambda x: x[2])
self.df['y_target'] = self.df['category'].progress_apply(lambda x: self.vectorizer.target_vocab.lookup_token(x))
train_df = self.df[self.df.split == 'train'][['input_ids', 'attention_mask', 'token_type_ids', 'y_target']]
val_df = self.df[self.df.split == 'val'][['input_ids', 'attention_mask', 'token_type_ids', 'y_target']]
test_df = self.df[self.df.split == 'test'][['input_ids', 'attention_mask', 'token_type_ids', 'y_target']]
super().__init__(train_set=DataFrameDataset(train_df), train_batch_size=batch_size,
val_set=DataFrameDataset(val_df), val_batch_size=batch_size,
test_set=DataFrameDataset(test_df), test_batch_size=batch_size)
@register_plugin
def bert_model():
return BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4)
# Optimizer Code from HuggingFace repo
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x / warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0 - x
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
@register_plugin
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 2.453125 | 2 |
deployer/contrib/loggers/on_host.py | timgates42/python-deployer | 39 | 12796928 | <reponame>timgates42/python-deployer<filename>deployer/contrib/loggers/on_host.py
from deployer.loggers import Logger, RunCallback, ForkCallback
from deployer.utils import esc1
class OnHostLogger(Logger):
"""
Log all transactions on every host in:
~/.deployer/history
"""
def __init__(self, username):
from socket import gethostname
self.from_host = gethostname()
self.username = username
def log_run(self, run_entry):
if not run_entry.sandboxing:
run_entry.host._run_silent("""
mkdir -p ~/.deployer/;
echo -n `date '+%%Y-%%m-%%d %%H:%%M:%%S | ' ` >> ~/.deployer/history;
echo -n '%s | %s | %s | ' >> ~/.deployer/history;
echo '%s' >> ~/.deployer/history;
"""
% ('sudo' if run_entry.use_sudo else ' ',
esc1(self.from_host),
esc1(self.username),
esc1(run_entry.command or '')
))
return RunCallback()
def log_fork(self, fork_entry):
# Use the same class OnHostLogger in forks.
class callback(ForkCallback):
def get_fork_logger(c):
return OnHostLogger(self.username)
return callback()
| 2.265625 | 2 |
polybius/graphics/ui/menu.py | TStalnaker44/python_jeopardy | 0 | 12796929 | """
Author: <NAME>
File: menu.py
A general class for creating menus
Parameters:
pos - (x,y) position for the top-left corner of the menu
dims - (width, height) pixels of the menu
commands - list of dictionaries specifying the button attributes
padding - (horizontal, vertical) padding between border and buttons
spacing - space in pixels between buttons
color - rgb color of the menu background (None for transparent)
borderColor - rgb color value for border
borderWidth - pixel width for the border
font - Supplied as a pygame font
orientation - "vertical" | "horizontal"
"""
import pygame
from polybius.graphics.components import Button
from polybius.graphics.basics.drawable import Drawable
from polybius.graphics.utils.window import Window
class Menu(Drawable, Window):
def __init__(self, pos, dims, commands, padding=0, spacing=0,
color=(80,80,80), borderColor=(0,0,0),
borderWidth=2, orientation="vertical"):
"""Initializes the menu"""
Drawable.__init__(self, "", pos, worldBound=False)
Window.__init__(self)
self._offset = (pos[0], pos[1])
self._width = dims[0]
self._height = dims[1]
h_padding = padding[0]
v_padding = padding[1]
self._borderColor = borderColor
self._borderWidth = borderWidth
self._backgroundColor = color
n = len(commands)
xStart = h_padding
yStart = v_padding
self._buttons = []
# Create buttons with a vertical configuration
if orientation == "vertical":
buttonWidth = self._width - (2*h_padding) - (2*borderWidth)
buttonHeight = (self._height - (2*v_padding) - \
((n-1)*spacing) - (2*borderWidth)) // n
for x, b in enumerate(commands):
font = pygame.font.SysFont(b["font"], b["fontSize"])
self._buttons.append((Button(b["text"],
(xStart + self._offset[0],
yStart + (x*buttonHeight) + \
(x*spacing) + self._offset[1]),
font, b["fontColor"], b["color"],
buttonHeight, buttonWidth, b["borderColor"],
b["borderWidth"]),
x+1, b["closeOnPress"], (b.get("toggleText",None),b["text"])))
# Create buttons with a horizontal configuration
elif orientation == "horizontal":
buttonWidth = (self._width - (2*h_padding) - \
((n-1)*spacing) - (2*borderWidth)) // n
buttonHeight = self._height - (2*v_padding) - (2*borderWidth)
for x, b in enumerate(commands):
font = pygame.font.SysFont(b["font"], b["fontSize"])
self._buttons.append((Button(b["text"],
(xStart + self._offset[0] +\
(x*buttonWidth) + (x*spacing),
yStart + self._offset[1]),
font, b["fontColor"], b["color"],
buttonHeight, buttonWidth, b["borderColor"],
b["borderWidth"]),
x+1, b["closeOnPress"], (b.get("toggleText",None),b["text"])))
self._selection = None
self.createDisplay()
def getButtonByText(self, text):
"""Return the button with the provided text"""
for button in self._buttons:
if button[0].getText() == text:
return button[0]
def getButtonByPosition(self, position):
"""Return the button at the given position in the menu"""
return self._buttons[position][0]
def handleEvent(self, event):
"""Handles events on the pause menu"""
for b in self._buttons:
b[0].handleEvent(event,self.select,(b,))
return self.getSelection()
def select(self, button):
"""Sets the current selection"""
b, selection, closeOnPress, toggleText = button
if closeOnPress:
self.close()
if toggleText[0] != None:
currentText = b._text
if toggleText[0] == currentText:
b.setText(toggleText[1])
else:
b.setText(toggleText[0])
self._selection = selection
def getSelection(self):
"""Returns the current selection and resets it to None"""
sel = self._selection
self._selection = None
return sel
def draw(self, screen):
"""Draws the menu on the screen"""
super().draw(screen)
# Draw buttons
for b in self._buttons:
b[0].draw(screen)
def createDisplay(self):
"""Create the display of the menu"""
# Draw the border
surfBack = pygame.Surface((self._width, self._height))
surfBack.fill(self._borderColor)
# Draw the background
surf = pygame.Surface((self._width - (self._borderWidth * 2),
self._height - (self._borderWidth * 2)))
# Apply the background color or make transparent
if self._backgroundColor == None:
surf.fill((1,1,1))
surfBack.set_colorkey((1,1,1))
else:
surf.fill(self._backgroundColor)
# Blit the widget layer onto the back surface
surfBack.blit(surf, (self._borderWidth, self._borderWidth))
self._image = surfBack
| 3.953125 | 4 |
app/unitOfMeasurements/routes.py | DeschutesBrewery/brewerypi | 27 | 12796930 | from flask import flash, redirect, render_template, request, url_for
from flask_login import login_required
from sqlalchemy import and_
from . import unitOfMeasurements
from . forms import UnitOfMeasurementForm
from .. import db
from .. decorators import adminRequired
from .. models import UnitOfMeasurement
modelName = "Unit"
@unitOfMeasurements.route("/unitOfMeasurements", methods = ["GET", "POST"])
@login_required
@adminRequired
def listUnitOfMeasurements():
unitOfMeasurements = UnitOfMeasurement.query
return render_template("unitOfMeasurements/unitOfMeasurements.html", unitOfMeasurements = unitOfMeasurements)
@unitOfMeasurements.route("/units/add", methods = ["GET", "POST"])
@login_required
@adminRequired
def addUnitOfMeasurement():
operation = "Add"
form = UnitOfMeasurementForm()
# Add a new unit of measurement.
if form.validate_on_submit():
unitOfMeasurement = UnitOfMeasurement(Abbreviation = form.abbreviation.data, Name = form.name.data)
db.session.add(unitOfMeasurement)
db.session.commit()
flash("You have successfully added the new unit of measurement \"{}\".".format(unitOfMeasurement.Abbreviation), "alert alert-success")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
# Present a form to add a new unit of measurement.
breadcrumbs = [{"url" : url_for("unitOfMeasurements.listUnitOfMeasurements"), "text" : "<span class = \"glyphicon glyphicon-home\"></span>"}]
return render_template("addEdit.html", breadcrumbs = breadcrumbs, form = form, modelName = modelName, operation = operation)
@unitOfMeasurements.route("/units/addDefaultUnitsOfMeasurements", methods = ["GET", "POST"])
@login_required
@adminRequired
def addDefaultUnitsOfMeasurements():
defaultUnits = {"ASBC" : "american society of brewing chemists",
"ADF" : "apparent degree of fermentation",
"bbl" : "barrel",
"cells/ml" : "cells per milliliter",
"cells/ml/°P" : "cells per ml per degree plato",
"°C" : "degree celsius",
"°P" : "degree plato",
"°F" : "degree fahrenheit",
"°F/min" : "degree fahrenheit per minute",
"EBC" : "european brewery convention",
"gal" : "gallon",
"gpm" : "gallons per minute",
"g" : "grams",
"g/bbl" : "grams per barrel",
"g/L" : "grams per liter",
"h" : "hour",
"in" : "inches",
"IBU" : "international bittering unit",
"kg" : "kilogram",
"L" : "liters",
"mg" : "milligram",
"mL" : "milliliter",
"mm" : "millimeter",
"min" : "minute",
"ppb" : "parts per billion",
"ppm" : "parts per million",
"%" : "percentage",
"pH" : "potential of hydrogen",
"lb" : "pound",
"lb/bbl" : "pounds per barrel",
"psi" : "pounds per square inch",
"RDF" : "real degree of fermentation",
"RE" : "real extract",
"s" : "second",
"SG" : "specific gravity",
"SRM" : "standard reference method",
"t/h" : "tons per hour",
"TA" : "total acidity",
"vol" : "volumes",
"x10^12 cells" : "x10^12 cells",
"x10^6 cells" : "x10^6 cells"}
addedUnits = []
skippedUnits = []
for defaultUnit in defaultUnits:
unit = UnitOfMeasurement.query.filter(and_(UnitOfMeasurement.Abbreviation == defaultUnit,
UnitOfMeasurement.Name == defaultUnits[defaultUnit])).first()
if unit is None:
addedUnits.append(defaultUnits[defaultUnit])
unit = UnitOfMeasurement(Abbreviation = defaultUnit)
unit.Name = defaultUnits[defaultUnit]
db.session.add(unit)
else:
skippedUnits.append(defaultUnits[defaultUnit])
db.session.commit()
addedMessage = ""
alert = "alert alert-warning"
if addedUnits:
for unit in addedUnits:
if addedMessage == "":
addedMessage = "Added: {}".format(unit)
alert = "alert alert-success"
else:
addedMessage = "{}, {}".format(addedMessage, unit)
addedMessage = "{}.".format(addedMessage)
else:
addedMessage = "Added none of the default units of measurements."
flash(addedMessage, alert)
skippedMessage = ""
if skippedUnits:
for unit in skippedUnits:
if skippedMessage == "":
skippedMessage = "Skipped: {}".format(unit)
else:
skippedMessage = "{}, {}".format(skippedMessage, unit)
skippedMessage = "{} as they already exist.".format(skippedMessage)
flash(skippedMessage, "alert alert-warning")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
@unitOfMeasurements.route("/unitOfMeasurements/delete/<int:unitOfMeasurementId>", methods = ["GET", "POST"])
@login_required
@adminRequired
def deleteUnitOfMeasurement(unitOfMeasurementId):
unitOfMeasurement = UnitOfMeasurement.query.get_or_404(unitOfMeasurementId)
if unitOfMeasurement.isReferenced():
flash('Unit of Measurement "{}" is referenced by one or more element and/or event frame attribute template and/or tag and cannot be deleted.'. \
format(unitOfMeasurement.Abbreviation), "alert alert-danger")
else:
unitOfMeasurement.delete()
db.session.commit()
flash('You have successfully deleted the unit of measurement "' + unitOfMeasurement.Abbreviation + '".', "alert alert-success")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
@unitOfMeasurements.route("/unitOfMeasurements/edit/<int:unitOfMeasurementId>", methods = ["GET", "POST"])
@login_required
@adminRequired
def editUnitOfMeasurement(unitOfMeasurementId):
operation = "Edit"
unitOfMeasurement = UnitOfMeasurement.query.get_or_404(unitOfMeasurementId)
form = UnitOfMeasurementForm(obj = unitOfMeasurement)
# Edit an existing unit of measurement.
if form.validate_on_submit():
unitOfMeasurement.Abbreviation = form.abbreviation.data
unitOfMeasurement.Name = form.name.data
db.session.commit()
flash("You have successfully edited the unit of measurement \"{}\".".format(unitOfMeasurement.Abbreviation), "alert alert-success")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
# Present a form to edit an existing unit of measurement.
form.unitOfMeasurementId.data = unitOfMeasurement.UnitOfMeasurementId
form.abbreviation.data = unitOfMeasurement.Abbreviation
form.name.data = unitOfMeasurement.Name
breadcrumbs = [{"url" : url_for("unitOfMeasurements.listUnitOfMeasurements"), "text" : "<span class = \"glyphicon glyphicon-home\"></span>"},
{"url" : None, "text" : unitOfMeasurement.Name}]
return render_template("addEdit.html", breadcrumbs = breadcrumbs, form = form, modelName = modelName, operation = operation)
| 2.703125 | 3 |
conf/tests/test_wsgi.py | uktrade/sso | 1 | 12796931 | from unittest import mock
import pytest
from bs4 import BeautifulSoup
from conf import wsgi
@pytest.mark.django_db
@pytest.mark.parametrize('script_name,prefix', (('/sso', '/sso/accounts/'), ('', '/accounts/')))
def test_set_script_name(rf, script_name, prefix):
environ = rf._base_environ(
PATH_INFO='/accounts/password/reset/',
CONTENT_TYPE="text/html; charset=utf-8",
REQUEST_METHOD="GET",
HTTP_X_SCRIPT_NAME=script_name,
)
response = wsgi.application(environ=environ, start_response=mock.Mock)
assert response.status_code == 200
soup = BeautifulSoup(response.content, 'html.parser')
element = soup.find(id='header-sign-in-link')
assert element.attrs['href'].startswith(prefix)
| 2.34375 | 2 |
looking_for_group/game_catalog/migrations/0011_publishedmodule_parent_game_edition.py | andrlik/looking-for-group | 0 | 12796932 | # Generated by Django 2.1.2 on 2018-11-04 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('game_catalog', '0010_auto_20181104_1036'),
]
operations = [
migrations.RemoveField(
model_name='publishedgame',
name='edition',
),
migrations.RemoveField(
model_name='publishedgame',
name='game_system',
),
migrations.RemoveField(
model_name='publishedgame',
name='isbn',
),
migrations.RemoveField(
model_name='publishedgame',
name='publisher',
),
]
| 1.515625 | 2 |
bashbot/commands/rename_command.py | SudoPokeMaster/BashBot | 0 | 12796933 | import asyncio
from bashbot.commands import Command
from bashbot.commands.syntax import SyntaxBuilder
from bashbot.session_manager import SessionManager
class RenameCommand(Command):
def __init__(self):
super().__init__()
self.name = "Rename terminal"
self.aliases = [".rename"]
self.description = "Renames terminal session"
self.usage = ".rename <new_name>"
self.permission = "session.rename"
self.syntax = SyntaxBuilder() \
.param("new_name", "rename") \
.build()
async def rename(self, client, message, parameters):
session = SessionManager.get_session(message.channel)
if session:
if not len(parameters["new_name"]) > 20:
session.name = parameters["new_name"]
else:
await client.send_message(message.channel, ":no_entry_sign: Maximum length of session name is 20. Your is: %s" % len(parameters["name"]))
return
session.send_output(asyncio.get_event_loop())
else:
await client.send_message(message.channel, ":no_entry_sign: You are trying to freeze non-existing session")
| 2.296875 | 2 |
policy.py | shaoshitong/torchdistill | 1 | 12796934 | import os, sys
if __name__ == "__main__":
negative_identity_weight = [0, 0, 0]
positive_identity_weight = [0, 0, 0]
negative_classes_weight = [0, 0, 0]
positive_classes_weight = [0, 0, 0]
negative_policy_weight = [0, 0, 0]
positive_policy_weight = [0, 0, 0]
for i in range(len(negative_identity_weight)):
log = f'log/cifar10/icpkd/resnet18_from_resnet50_option_1_{i}_2倍.txt'
os.system("rm -rf ./resource")
os.system(f"python ./SST/image_classification_policy.py --log {log}")
print(f"============================end {i}================================") | 2.28125 | 2 |
parser.py | Snake-Whisper/zone-file-parser | 4 | 12796935 | #!/usr/bin/env python3
"""
Limits:
- can't read brackets in brackets
- avoid using more then one bracket per line. Normally it should work, but you should use it carefull
- problems by differncing ttl/domaine, if domain[:-1].isdigit()"""
VERSION = 1.1
DOMAIN_MAX_LENGHT = 255
CLASSES = ["IN", "HS", "CH", "CS"]
RRsTYPES = ["A","AAAA", "A6", "AFSDB", "APL", "CERT", "CNAME", "DHCID", "DNAME",
"DNSKEY", "DS", "GPOS", "HINFO", "IPSECKEY", "ISDN", "KEY", "KX", "LOC",
"MX", "NAPTR", "NSAP", "NS", "NSEC", "NSEC3","NSEC3PARAM", "NXT", "PTR",
"PX", "RP", "PRSIG", "RT", "SIG", "SOA", "SPF", "SRV", "SSHFP", "TXT", "WKS", "X25"]
from time import strftime
class ZoneFileError(Exception):
"""Simple Exception handler"""
def __init__(self, error, file):
self.error = str(error)
self.file = str(file)
def __str__(self):
return """Please check the given zone file {0}.\nFollowing Error occured: {1}""".format(self.file, self.error)
class _Parser():
"""Main Parser"""
def __init__(self, file):
self.file = file
self.zone = list()
self.Table = list() # format: [primKey, name, ttl, class, type, value]
self.stream = open(file)
self.zone_org = self.stream.read()
self.stream.close()
self.zone = self.zone_org.splitlines()
self.rmComment()
self.rmCompleteParanthese()
self.split()
self.cleanUp()
self.parse()
def error(self, error):
"""returns error"""
raise ZoneFileError(error, self.file)
def getIndexe(self, pattern):
"""return every index of fitting patter"""
self.counter = 0
self.result = list()
for i in range(self.zone_org.count(pattern)):
self.result.append(self.zone_org.find(pattern, self.counter))
self.counter = self.result[-1] + 1
return self.result
def rmComment(self):
"""Removes comments from zone (;, #, /**/, //)"""
if ";" in self.zone_org: self.zone = [i.split(";")[0] for i in self.zone if i != ";"]
if "#" in self.zone_org: self.zone = [i.split("#")[0] for i in self.zone if i != "#"]
if "//" in self.zone_org: self.zone = [i.split("//")[0] for i in self.zone if i != "//"]
if "/*" in self.zone_org:
self.pop = list()
self.counter = False
for i in range(len(self.zone)):
if "/*" in self.zone[i]:
self.counter = True
self.zone[i] = self.zone[i].split("/*")[0]
continue
if "*/" in self.zone[i]:
self.pop.append(i) # warnig: complete line is removed. Problem with: /*comment\nbla\nbla*/command?
self.counter = False
continue
if self.counter:
self.pop.append(i)
self.pop.sort(reverse = True) # To avoid collaps of mapping
for i in self.pop:
self.zone.pop(i)
def move(self, index):
"""Merge index + 1 with index."""
self.zone[index] += " " + self.zone[index + 1]
self.zone.pop(index + 1)
def rmParanthese(self):
"""removes paranthes if closed from zone file line"""
self.zone = [self.zone[i].replace("(", "").replace(")", "") if self.zone[i].count("(") == self.zone[i].count(")") else self.zone[i] for i in range(len(self.zone))]
def mergeParanthese(self):
"""Merge every paranthes to one line"""
self.paranthese = 0
self.subt = 0
for i in range(len(self.zone)):
i -= self.subt # to compense the mapping collaps
try:
self.zone[i]
except IndexError:
break
if "(" in self.zone[i]:
self.paranthese += 1
self.use_index = i
continue
if ")" in self.zone[i]:
self.paranthese -= 1
self.move(self.use_index)
self.subt += 1
continue
if self.paranthese:
self.move(self.use_index)
self.subt += 1
def rmCompleteParanthese(self):
"""removes every paranthes from zone by merging"""
self.count = 0
while [i for i in self.zone if "(" in i or ")" in i]:
self.count += 1
self.rmParanthese()
self.mergeParanthese()
if self.count > 100:
self.error("Paranthese Syntax: Please avoid using Paranthese in Paranthese or more then more paranthese per line")
self.rmParanthese()
del self.count
def split(self):
"""splits zone to fields"""
self.zone = [i.split() for i in self.zone]
def handle(self, primKey, Name, TTL, Class, Type, Value):
"""Handler for parser return. Here you get all data -> api""" # later mySQL?
self.Table.append([primKey, Name, TTL, Class, Type, Value])
def isType(self, object):
"""returns true if object is a entry type like NS, eg."""
return True if object in RRsTYPES else False
def isClass(self, object):
"""returns True if obeject is a class like IN, eg."""
return True if object in CLASSES else False
def isTTL(self, liste):
"""returns True if given list from zone is TTL record"""
return True if liste[0] == '$TTL' and len(liste) < 3 else False
def isTTLobj(self, object):
"""Returns if given object is ttl. Warning: it's just probatly correct"""
return True if object[:-1].isdigit() else False # -1 because of 23h for eg.
def cleanUp(self):
"""removes empty strings and lists from zone"""
self.zone = [i for i in self.zone if i and i[0] != '']
def getType(self, liste):
"""returns type of given entry"""
for i in liste:
if self.isType(i):
return i
def getClass(self, liste):
"""returns class of given entry"""
for i in liste:
if self.isClass(i):
return i
def parse(self):
"""Main Parser"""
self.primKey = 0
for entry in self.zone:
if self.isTTL(entry):
self.default_TTL = entry[1] # default ttl
continue
self.type = self.getType(entry)
self.klasse = self.getClass(entry)
if self.type:
self.default_type = self.type
else:
try:
self.type = self.default_type
except NameError:
self.error("Please check your zonfile. Error at {0}.\nType not found".format(" ".join(entry)))
if self.klasse:
self.default_klasse = self.klasse
else:
try:
self.klasse = self.default_klasse
except NameError:
self.error("Please check your zonfile. Error at {0}.\nClass not found".format(" ".join(entry)))
self.typeindex = entry.index(self.type)
self.value = " ".join(entry[self.typeindex+1:])
entry = entry[:self.typeindex] # left: probatly name, probatly ttl, probatly class
self.over = len(entry)
if self.over == 3:
if entry.pop(2) != self.klasse:
self.error("There occured a fatal logical error at {0}.\nPlease contact support for more information".format(" ".join(entry)))
self.over = len(entry)
if self.over == 2: # Possible: class, ttl, name but: entry[1] = {TTL//class} -> !name
if entry[1] == self.klasse:
entry.pop()
else:
self.ttl = entry.pop() # Has to be ttl
self.over = len(entry)
if self.over == 1: # possible: name, class, ttl
if entry[0] == self.klasse:
entry.pop()
elif self.isTTLobj(entry[0]):
print("warning at {0}. I'll handle it as TTL!".format(" | ".join([str(y) for y in (self.primKey, self.name, entry[0], self.klasse, self.type, self.value)]))) # carefull!!! 123456d as dom -> undifined error
self.ttl = entry.pop()
else:
self.name = entry[0]
try:
self.ttl = self.default_TTL
except AttributeError:
self.error("Please check your zonfile. TTL not found")
self.handle(self.primKey, self.name,self.ttl, self.klasse, self.type, self.value)
del self.value
self.primKey += 1
class Parser():
"""Paser - Friendly User API"""
def __init__(self, file):
import os.path as path
self.file = file
self.parser = _Parser(file)
self.table = self.parser.Table
self.TTL = self.parser.default_TTL
self.zonename = path.basename(self.file)
del self.parser # RAM clean
def getValues(self):
"""returns set of all available Values in the Zone"""
return set([i[5] for i in self.table])
def getTypes(self):
"""returns set of all available Types in the Zone"""
return set([i[4] for i in self.table])
def getClasses(self):
"""returns set of all available classes in the Zone"""
return set([i[3] for i in self.table])
def getTTLs(self):
"""returns set of all available TTLs in the Zone (Normaly one)"""
return set([i[2] for i in self.table])
def getDomains(self):
"""returns set of all available Domains in the Zone"""
return set([i[1] for i in self.table])
def getIDs(self):
"""returns set of all available ID's // prim. keys of internal table"""
return set([i[0] for i in self.table])
def getDefaultTTL(self):
"""Returns last used TTL"""
return self.TTL
def getRecords(self, ID = False, Domain = False, TTL = False, Class = False, Type = False, Value = False):
"""MetaGer - returns list of matching rows"""
self.result = list()
for i in self.table:
if ID and ID != i[0]: continue
if not isinstance(ID, bool) and ID == 0 and i[0] != 0: continue
if Domain and Domain != i[1]: continue
if TTL and TTL != i[2]: continue
if Class and Class != i[3]: continue
if Type and Type != i[4]: continue
if Value and Value != i[5]: continue
self.result.append(i)
return self.result
def getValue(self, Value):
"""Returns entrys matching the given value"""
return [i for i in self.table if i[5] == Value]
def getType(self, Type):
"""Returns entrys matching the given type"""
return [i for i in self.table if i[4] == Type]
def getClass(self, Class):
"""Returns entrys matching the given class"""
return [i for i in self.table if i[3] == Class]
def getTTL(self, TTL):
"""Returns entrys matching the given TTL"""
return [i for i in self.table if i[2] == str(TTL)]
def getName(self, Name):
"""Returns entrys matching the given name"""
return [i for i in self.table if i[1] == Name]
def getID(self, ID):
"""Returns entrys matching the given ID"""
return [i for i in self.table if i[0] == ID]
def getMaster(self):
"""Returns Master-field of SOA record"""
return self.getType("SOA")[0][5].split()[0]
def getZoneContact(self):
"""Returns contact-field of SOA record"""
return self.getType("SOA")[0][5].split()[1]
def getSerial(self):
"""Returns serial-field of SOA record"""
return self.getType("SOA")[0][5].split()[2]
def getRefreshTime(self):
"""Returns refersh time - field of SOA record"""
return self.getType("SOA")[0][5].split()[3]
def getRetryTime(self):
"""Returns retry time - field of SOA record"""
return self.getType("SOA")[0][5].split()[4]
def getExpireTime(self):
"""Returns expire time - field of SOA record"""
return self.getType("SOA")[0][5].split()[5]
def getNegativeCache(self):
"""Returns negative cache time - field of SOA record"""
return self.getType("SOA")[0][5].split()[6]
def getIPv4(self):
"""Return current IPv4 addr of origin"""
return self.getRecords(Domain = "@", Class = "IN", Type="A")[0][5]
def getIPv6(self):
"""Return current IPv6 addr of origin"""
return self.getRecords(Domain = "@", Class = "IN", Type="AAAA")[0][5]
def mkSerial(self, check = True):
"""Sets timestamp allone. If check, no serial > 99 are supported"""
self.old_time = self.getSerial()[:8]
self.new_time = strftime("%Y%m%d")
if self.old_time != self.new_time:
self.serial = "01"
else:
self.serial = str(int(self.getSerial()[8:]) + 1)
if check: assert int(self.serial) < 100, """More then 99 changes aren't supported per day."""
if len(self.serial) < 2:
self.serial = "0{0}".format(self.serial)
return "{0}{1}".format(self.new_time, self.serial)
def refresh(self):
"""Reloads complete zone"""
self.__init__(self.file)
def convert2sqlite(self, file, table = None, commit = True):
"""Writes results to sql database. If table not given, zonename is used
if commit = [True] chnages are automatic committed to db,
else connection object is returned"""
import sqlite3 as sql
if table: self.tableName = table
else: self.tableName = self.zonename
self.connection = sql.connect(file)
self.cursor = self.connection.cursor()
self.cursor.execute("drop table if exists '{0}'".format(self.tableName)) # insecure !!! Problems: "db.mydomain.local".count(".") != 0 -> mySQL Syntax error
self.cursor.execute("""CREATE TABLE '{0}'
(id INT,
domain VARCHAR({1}) NOT NULL,
ttl INT,
class VARCHAR({2}) NOT NULL,
type VARCHAR({3}) NOT NULL,
value TEXT NOT NULL)""".format(self.tableName, DOMAIN_MAX_LENGHT,
max([len(i) for i in RRsTYPES]),
max([len(i) for i in CLASSES]))) # also insecure
self.cursor.executemany('INSERT INTO "{0}" VALUES (?,?,?,?,?,?)'
.format(self.tableName), self.table)
if commit:
self.connection.commit()
self.cursor.close()
else:
return self.connection
if __name__ == "__main__":
from sys import argv
from os import path as path
if len(argv) == 1:
print("""
Bind Zonefile Parser
====================
Version: {0}
Converts zone file to sqlite database
Stand Alone Usage:
./parser.py zonefile [database=zone.sqlite]\n""".format(VERSION))
elif len(argv) == 2:
assert path.isfile(argv[1]), "Zonefile {0} not found".format(argv[1])
parser = Parser(argv[1])
parser.convert2sqlite("zone.sqlite")
print("wrote database to zone.sqlite")
elif len(argv) == 3:
assert path.isfile(argv[1]), "Zonefile {0} not found".format(argv[1])
parser = Parser(argv[1])
parser.convert2sqlite(argv[2])
print("wrote database to {0}".format(argv[2]))
else:
print("To many arguments")
| 2.484375 | 2 |
money_legos/uniswap/contracts.py | gokhanbaydar/py-money-legos | 3 | 12796936 | <gh_stars>1-10
from .. import util
exchangeAbi = util.read_json("./uniswap/abi/Exchange.json")
factoryAbi = util.read_json("./uniswap/abi/Factory.json")
contracts = {
"factory": {
"address": "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95",
"abi": factoryAbi,
},
"exchange": {
"abi": exchangeAbi,
},
}
| 1.875 | 2 |
ebad/trainer/module.py | vahidzee/ebad | 0 | 12796937 | import pytorch_lightning as pl
import torch
import typing as th
import torchmetrics
from .. import utils as ebad_utils
class AnoTrainer(pl.LightningModule):
def __init__(
self,
model_cls: th.Union[str, torch.nn.Module],
input_shape: th.Union[th.Tuple[int], th.List[int]],
input_clamp: th.Optional[th.Union[float, tuple]] = (-1., 1.),
model_params: th.Optional[dict] = None,
**kwargs,
):
super().__init__()
self.model = ebad_utils.get_value(self.hparams.model_cls)(**(self.hparams.model_params or dict()))
# metrics
self.val_auroc = torchmetrics.AUROC(num_classes=2, pos_label=1)
self.test_auroc = torchmetrics.AUROC(num_classes=2, pos_label=1)
def forward(self, x):
z = self.model(x)
return z
def training_step(self, batch, batch_idx: th.Optional[int] = None, optimizer_idx: th.Optional[int] = None):
inputs, targets = batch
if self.noise_eps:
# add minimal noise to the original inputs to prevent the model from focusing on purely "clean" inputs
inputs.add_(torch.randn_like(inputs) * self.noise_eps)
if self.hparams.input_clamp:
inputs.clamp_(
*(self.hparams.input_clamp if isinstance(self.hparams.input_clamp, tuple) else (
-self.hparams.input_clamp, self.hparams.input_clamp)))
# Obtain samples
samples = self.sampler.sample(sample_size=inputs.shape[0], update_buffer=True, device=inputs.device)
# Predict energy score for all images
all_inputs = torch.cat([inputs, samples], dim=0)
inputs_out, samples_out = self.model(all_inputs).chunk(2, dim=0)
# Calculate losses
loss = 0.
if self.regularizer_alpha:
reg_loss = (inputs_out ** 2 + samples_out ** 2).mean()
loss += self.regularizer_alpha * reg_loss
self.log(f'loss/regularization/train', reg_loss)
cdiv_loss = samples_out.mean() - samples_out.mean()
self.log(f'loss/contrastive_divergence/train', cdiv_loss)
loss += cdiv_loss
self.log(f'loss/train', loss)
self.log(f'metrics/inputs/train', inputs_out.mean())
self.log(f'metrics/samples/train', samples_out.mean())
return loss
def validation_step(self, batch, batch_idx: th.Optional[int] = None, optimizer_idx: th.Optional[int] = None):
# calculate the contrastive divergence between purely random images and unseen examples
inputs, targets = batch
self.log(f'metrics/random/val', self.model(torch.rand_like(inputs) * 2 - 1).mean())
scores = self.model(inputs)
self.val_auroc(
preds=-scores, # auroc expects predictions to have higher values for the positive class
targets=targets
)
self.log('metrics/auroc/val', self.val_auroc, on_step=True, on_epoch=True)
| 2.40625 | 2 |
modules/url_content_fetcher.py | facebookresearch/URL-Sanitization | 24 | 12796938 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Given an URL in string, make a request, fetch its content,
and parse using BeautifulSoup.
"""
import time
import requests
import logging
import urllib.parse as urlparse
from bs4 import BeautifulSoup
class URLContentFetcher(object):
def __init__(self, url, timeout=3, parser='html5lib', proxies=None):
self.url = url
self.soup = None
self.success = None
self.message = None
self.timeout = timeout
self.parser = parser
self.proxies = proxies
self.running_time = 0
def read_and_soup(self):
"""
Fetch content from a url
"""
user_agent_list = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/35.0.1916.47 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/57.0.2987.133 Safari/537.36',
]
parsed = urlparse.urlparse(self.url)
headers = {
"User-Agent": user_agent_list[
hash(parsed.netloc + parsed.path) % len(user_agent_list)],
"X-Requested-With": "XMLHttpRequest",
"Accept-Encoding": "gzip",
}
try:
start_time = time.time()
r = requests.get(
self.url,
headers=headers,
timeout=self.timeout,
stream=True,
proxies=self.proxies
)
url_data = r.content.decode('utf-8', 'ignore')
soup = BeautifulSoup(url_data, self.parser)
end_time = time.time()
self.running_time = end_time - start_time
self.soup = soup
self.success = True
except Exception as e:
logging.error(repr(e) + ", url: {0}".format(self.url))
self.success = False
self.message = "Modified URL error: " + str(e)
def get_body(self):
"""
Get the body of a HTML content
"""
if self.soup is None:
self.read_and_soup()
if not self.success or self.soup.body is None:
return ""
return self.soup.body.getText()
def get_title(self):
"""
Get the title from a HTML content
"""
if self.soup is None:
self.read_and_soup()
if not self.success or self.soup.title is None:
return ""
return self.soup.title
| 3.484375 | 3 |
setup.py | coopie/ttv | 0 | 12796939 | <filename>setup.py
from setuptools import setup
setup(name='ttv',
version='0.0.1',
description='A command line tool and a python library for test, train, validation set splitting',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/coopie/ttv',
download_url='https://github.com/coopie/ttv/archive/master.zip',
license='MIT',
install_requires=['docopt', 'pyyaml'],
py_modules=['ttv']
)
| 1.171875 | 1 |
TDO/utils_tdo/utils_evaluation.py | lgi2p/TDSelection | 0 | 12796940 | <reponame>lgi2p/TDSelection<filename>TDO/utils_tdo/utils_evaluation.py
def compute_precision_with_general(sol_dict_, truth_, ancestors_):
#function that compute the number of expected/general/erroneous values returned by the approach
#note that the general values are all the returned values that are more general than the expected one (therefore they are still true)
#note that the erronous values are values that are neither general or expected
n_exp_ = 0
n_gen_ = 0
n_err_ = 0
for d in sol_dict_:
returned_value = sol_dict_[d]
expected = truth_[d]
if returned_value == expected:
n_exp_ += 1
else:
if returned_value in ancestors_[expected]:
n_gen_ += 1
else:
n_err_ += 1
return n_exp_, n_gen_, n_err_ | 2.234375 | 2 |
servicex/datastream/pyarrowdemo.py | ssl-hep/ServiceX_datastream | 0 | 12796941 | <reponame>ssl-hep/ServiceX_datastream
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import awkward
import pyarrow as pa
import sys
import uproot
print(sys.path)
file = uproot.open(os.path.join("/Users","bengal1","dev","IRIS-HEP","data",
"DYJetsToLL_M-50_HT-100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root"))
events = file["Events"]
arrays = events.arrays(["nElectron",
"Electron_pt",
"Electron_eta",
"Electron_phi",
"Electron_mass",
"Electron_cutBased",
"Electron_pdgId",
"Electron_pfRelIso03_all"], entrystop=1000)
physics_objects = {}
offsets = awkward.JaggedArray.counts2offsets(arrays[b'nElectron'])
physics_objects["Electron"] = {
"pt": awkward.JaggedArray.fromoffsets(offsets, arrays[b"Electron_pt"].content),
"eta":awkward.JaggedArray.fromoffsets(offsets, arrays[b"Electron_eta"].content),
"phi": awkward.JaggedArray.fromoffsets(offsets, arrays[b"Electron_phi"].content),
"mass": awkward.JaggedArray.fromoffsets(offsets, arrays[b"Electron_mass"].content),
"cutBased": awkward.JaggedArray.fromoffsets(offsets, arrays[
b"Electron_cutBased"].content),
"pdgId": awkward.JaggedArray.fromoffsets(offsets, arrays[b"Electron_pdgId"].content),
"pfRelIso03_all": awkward.JaggedArray.fromoffsets(offsets, arrays[
b"Electron_pfRelIso03_all"].content)
}
electrons = physics_objects["Electron"]
t = awkward.Table(electrons)
awkward.toparquet("tmp.parquet", t)
pa_table = awkward.toarrow(t)
batches = pa_table.to_batches()
batch = batches[0]
sink = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
buf = sink.getvalue()
reader = pa.ipc.open_stream(buf)
batches2 = [b for b in reader]
arrays = awkward.fromarrow(batches2[0])
print(arrays)
| 1.4375 | 1 |
test/test_buildx_driver.py | altairengineering/pkr | 16 | 12796942 | import sys
from .utils import pkrTestCase
class TestBuildxDriver(pkrTestCase):
PKR = "pkr"
pkr_folder = "docker_driver"
kard_env = "dev"
kard_driver = "buildx_compose"
kard_extra = {
"tag": "123",
"flag": "flag_value",
"buildx.cache_registry": "dummy",
"buildx.builder_name": "testpkrbuilder",
}
def test_docker_driver_values(self):
self.kard_extra["src_path"] = self.src_path
self.generate_kard()
self.make_kard()
out_dir = self.pkr_path / "kard" / "test"
expected = sorted(
[
out_dir / "buildx" / "folder2_dst" / "copy",
out_dir / "buildx" / "file1" / "file2",
out_dir / "buildx" / "file1.dockerfile",
out_dir / "meta.yml",
]
)
def walk(path):
for p in path.iterdir():
if p.is_dir():
yield from walk(p)
continue
yield p.resolve()
self.assertEqual(sorted(list(walk(out_dir))), expected)
def test_docker_multiple_contexts(self):
self.kard_extra["src_path"] = self.src_path
self.generate_kard(env="contexts")
self.make_kard()
out_dir = self.pkr_path / "kard" / "test"
self.assertTrue((out_dir / "buildx" / "folder2_dst" / "copy").exists())
self.assertTrue((out_dir / "context1" / "folder2_dst" / "copy").exists())
self.assertTrue((out_dir / "buildx" / "file1.dockerfile").exists())
self.assertTrue((out_dir / "context1" / "file1.dockerfile").exists())
cmd = "{} image build -s container1 -c".format(self.PKR)
prc = self._run_cmd(cmd)
stdout = prc.stdout.read()
stderr = prc.stderr.read()
# Python 3.6
if sys.version_info < (3, 7):
self.assertEqual(stdout, b"", stdout)
self.assertEqual(
stderr, b"ERROR: (Exception) buildx is not supported for python < 3.6\n"
)
return
expected = (
b"Warning: No docker-compose file is provided with this environment.\n"
b"Removing context1 ... Done !\n"
b"Removing buildx ... Done !\n"
b"Start buildx builder testpkrbuilder\n"
b"Building docker images...\n\n"
b"Building container1:123 image...\n\n"
)
self.assertTrue("unknown instruction: flag_value" in stderr.decode("utf-8"), stderr)
self.assertEqual(stdout, expected, stdout)
self.assertRegex(
stderr.decode("utf-8"),
r"docker buildx build --progress plain --builder testpkrbuilder --load "
r"--file /tmp/.*/file1.dockerfile --cache-from type=registry,ref=dummy/cache "
r"--cache-to type=registry,mode=max,ref=dummy/cache --tag container1:123 "
r"/tmp/.*/context1",
)
| 2.203125 | 2 |
ch09/complexity.py | ricjuanflores/practice-of-the-python | 319 | 12796943 | def has_long_words(sentence):
if isinstance(sentence, str): # <1>
sentence = sentence.split(' ')
for word in sentence: # <2>
if len(word) > 10: # <3>
return True
return False # <4>
| 3.546875 | 4 |
src/wready/__init__.py | WisconsinRobotics/wready | 0 | 12796944 | <filename>src/wready/__init__.py
from .sig_handler import SignalInterruptHandler
from .wready_client import TaskContext, WReadyClient
from .wready_server import InitTask, WReadyServer, WReadyServerObserver
| 1.140625 | 1 |
utils/models/manufacture_building.py | roomdie/KingsEmpiresBot | 0 | 12796945 | from utils.models import base, product
#
# Bronze Age
#
BronzePottery = base.ManufactureBuilding(
name="🏺🏠 Гончарня",
products=[product.dish, product.jug, product.amphora],
create_price=[340, 490],
create_time_sec=1800,
manpower=108
)
BronzePlantation = base.ManufactureBuilding(
name="🍇🏠 Плантация",
products=[product.grape, product.pear, product.melon],
create_price=[340, 490],
create_time_sec=1800,
manpower=108
)
# Iron Age
IronForger = base.ManufactureBuilding(
name="🥩🏠 Мясник",
products=[product.meat, product.chicken],
create_price=[1500, 2400],
create_time_sec=5400,
manpower=230
)
IronButcher = base.ManufactureBuilding(
name="🧵🏠 Портной",
products=[product.threads, product.socks],
create_price=[1500, 2400],
create_time_sec=5400,
manpower=230
)
| 2.09375 | 2 |
problem0546.py | kmarcini/Project-Euler-Python | 0 | 12796946 | ###########################
#
# #546 The Floor's Revenge - Project Euler
# https://projecteuler.net/problem=546
#
# Code by <NAME>
#
###########################
| 1.1875 | 1 |
workertasks/migrations/0003_auto_20181109_0049.py | danula/crowdcog | 1 | 12796947 | # Generated by Django 2.1.3 on 2018-11-09 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workertasks', '0002_auto_20181109_0046'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='begin_exp',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='assignment',
name='begin_hit',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='assignment',
name='end_hit',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='worker',
name='qualification',
field=models.IntegerField(default=0),
),
]
| 1.476563 | 1 |
frameworks/pycellchem-2.0/src/artchem/ReactionParser.py | danielrcardenas/ac-course-2017 | 0 | 12796948 | #---------------------------------------------------------------------------
#
# ReactionParser.py: parser for chemical reactions in text format, such as:
#
# A + 2 B --> 3 C , k=2.49
#
# by <NAME>, Univ. Basel, Switzerland, January 2010
# June 2013: adapted to the PyCellChemistry package
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright (C) 2015 <NAME>
# Contact: http://www.artificial-chemistries.org/
#
# This file is part of PyCellChemistry.
#
# PyCellChemistry is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 3, as published by the Free Software Foundation.
#
# PyCellChemistry is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCellChemistry, see file COPYING. If not, see
# http://www.gnu.org/licenses/
#
import sys
from Multiset import *
from Reaction import *
class ReactionParser():
def parse_molecules( self, mollist ):
""" parse educt or product multiset given as a list of strings
containing the molecule name and an optional stoichiometric
coefficient, e.g. [ 'a', '2 fox', '4 b1' ]
"""
newmols = Multiset()
for mol in mollist:
mol = mol.strip()
if mol.find(' ') < 0:
name = mol
n = 1
else:
(num, name) = mol.split(' ', 2)
name = name.strip()
n = int(num)
if n > 0 and name != '':
newmols.inject(name, n)
return newmols
def parse_line( self, line ):
""" parse string containing chemical reaction """
line2 = line.split('#') # skip comments
line = line2[0]
try:
if line.find(',') < 0:
reactstr = line
k = 1.0
else:
(reactstr, kstr) = line.split(',', 2)
reactstr = reactstr.strip()
(kstr, kvar) = kstr.split('=', 2)
if (kstr.strip() != 'k'):
raise ValueError
k = float(kvar)
(educts, products) = reactstr.split('-->', 2)
edlist = educts.split('+')
prlist = products.split('+')
edmset = ReactionParser.parse_molecules( self, edlist )
prmset = ReactionParser.parse_molecules( self, prlist )
except ValueError:
print >> sys.stderr, "syntax error on line=", line
exit(-1)
if (edmset.empty() and prmset.empty()): return
newreaction = Reaction(edmset, prmset, k)
return newreaction
def parse_input( self, infile ):
""" parse input file containing multiple chemical reactions,
one per line.
'infile' is the file descriptor for the open input file
"""
reactions = ReactionQueue()
for line in infile.readlines():
line = line.strip()
if (line != '' and line[0] != '#'):
reaction = self.parse_line(line)
reactions.add(reaction)
return reactions
def parse_stdin( self ):
""" parse standard input (stdin) """
return self.parse_input(sys.stdin)
def parse_file( self, fname ):
""" open and parse input file fname """
reactions = None
try:
infd = open(fname, 'r')
reactions = self.parse_input(infd)
infd.close()
except IOError:
print >> sys.stderr, "Error opening file", fname
exit(-1)
return reactions
| 2.859375 | 3 |
raml_translation_with_score.py | nonstopfor/seq2seq-exposure-bias-tf | 1 | 12796949 | import cotk
from cotk._utils.file_utils import get_resource_file_path
from cotk.dataloader.dataloader import *
from collections import Counter
import numpy as np
from itertools import chain
class Score(DataField):
def get_next(self, dataset):
r"""read text and returns the next label(integer). Note that it may raise StopIteration.
Args:{DataField.GET_NEXT_ARG}
Examples:
>>> dataset = iter(["1\n", "0\n"])
>>> field = Label()
>>> field.get_next(dataset)
1
>>> field.get_next(dataset)
0
"""
score = next(dataset)
return float(score.strip())
def _map_fun(self, element, convert_ids_to_tokens=None):
"""
Returns the element itself.
Args:
element: An element of a dataset.
convert_ids_to_tokens: It's useless. This argument exists, just to keep the signature the same as that of super class.
"""
return element
class TranslationWithScore(cotk.dataloader.SingleTurnDialog):
@cotk._utils.hooks.hook_dataloader
def __init__(self, file_id, min_vocab_times, \
max_sent_length, invalid_vocab_times, \
tokenizer, remains_capital
):
super().__init__(file_id, min_vocab_times, \
max_sent_length, invalid_vocab_times, \
tokenizer, remains_capital)
def _load_data(self):
data_fields = {
'train': [['post', 'Sentence'], ['resp', 'Sentence'], ['score', Score]],
'dev': [['post', 'Sentence'], ['resp', 'Sentence']],
'test': [['post', 'Sentence'], ['resp', 'Sentence']],
}
return self._general_load_data(self._file_path, data_fields, \
self._min_vocab_times, self._max_sent_length, None, self._invalid_vocab_times)
def _general_load_data(self, file_path, data_fields, min_vocab_times, max_sent_length, max_turn_length,
invalid_vocab_times):
r'''This function implements a general loading process.
Arguments:
file_path (str): A string indicating the path of dataset.
data_fields (dict, list, tuple): If it's a list(tuple), it must be a list of (key, field) pairs.
Field must be a DataField instance,
or a subclass of DataField(in this case, its instance will be used, assuming its constructor accepts no arguments),
or a string(in this case, the instance of the class, whose __name__ is field, will be used).
For example, data_fields=[['post', 'Sentence'], ['label', Label]] means that,
in the raw file, the first line is a sentence and the second line is a label. They are saved in a dict.
dataset = {'post': [line1, line3, line5, ...], 'label': [line2, line4, line6, ...]}
data_fields=[['key1', 'Session'], ['key2', Label()]], means that, in the raw file, the first *several lines*
is a session, *followed by an empty line*, and the next line is a label.
dataset = {'key1': [session1, session2, ...], 'key2': [label1, label2, ...]}
If it's a dict, different datasets may have different formats.(If `data_fields` is a list or a tuple, different datasets have the same format).
Its keys are the same as `self.key_name` that indicate the datasets, and the values are lists as mentioned above.
For example, data_fields = {'train': [['sess', 'Session'], ['label', 'Label']], 'test': [['sess', 'session']]},
means that the train set contains sessions and labels, but the test set only contains sessions.
min_vocab_times (int): A cut-off threshold of valid tokens. All tokens appear
not less than `min_vocab_times` in **training set** will be marked as valid words.
max_sent_length (int): All sentences longer than ``max_sent_length`` will be shortened
to first ``max_sent_length`` tokens.
max_turn_length (int): All sessions, whose turn length is longer than ``max_turn_length`` will be shorten to
first ``max_turn_length`` sentences. If the dataset don't contains sessions, this parameter will be ignored.
invalid_vocab_times (int): A cut-off threshold of invalid tokens. All tokens appear
not less than ``invalid_vocab_times`` in the **whole dataset** (except valid words) will be
marked as invalid words. Otherwise, they are unknown words, which are ignored both for
model or metrics.
Returns:
(tuple): containing:
* **all_vocab_list** (list): vocabulary list of the datasets,
including valid and invalid vocabs.
* **valid_vocab_len** (int): the number of valid vocab.
``vocab_list[:valid_vocab_len]`` will be regarded as valid vocabs,
while ``vocab_list[valid_vocab_len:]`` regarded as invalid vocabs.
* **data** (dict): a dict contains data.
* **data_size** (dict): a dict contains size of each item in data.
'''
def get_fields(fields):
assert isinstance(fields, list) or isinstance(fields, tuple)
return [(data_key, DataField.get_field(field)) for data_key, field in fields]
if isinstance(data_fields, dict):
no_field_keys = [key for key in self.key_name if key not in data_fields]
if no_field_keys:
raise ValueError('There is no data fields for dataset(%s) ' % ', '.join(no_field_keys))
try:
data_fields = {key: get_fields(data_fields[key]) for key in self.key_name}
except AssertionError:
raise TypeError('If `data_field` is a dict, its value must be a list(or tuple) of lists(or tuples).')
elif isinstance(data_fields, list) or isinstance(data_fields, tuple):
data_fields = get_fields(data_fields)
data_fields = {key: data_fields for key in self.key_name}
else:
raise TypeError('`data_fields` must be a dict, or a list, or a tuple.')
# now data_fields is a dict. Keys are the same as self.key_name('train', 'test', 'dev', etc.). Each value is
# a list(tuple) of lists(tuples), which means (data_key(str), data_field(DataField)) pairs.
# For example,
# data_fields == {'train': [['sent', Sentence()], ['label', Label()]],
# 'test': [['sent', Sentence()], ['label', Label()]]}.
# Note, different dataset may have different fields.
special_tokens = set(self.ext_vocab)
origin_data = {}
for key in self.key_name:
origin_data[key] = {data_key: [] for data_key, _ in data_fields[key]}
with open("%s/%s.txt" % (file_path, key), encoding='utf-8') as f_file:
while True:
try:
for data_key, field in data_fields[key]:
element = field.convert_to_tokens(field.get_next(f_file), self.tokenize)
for token in field.iter_tokens(element):
if token in special_tokens:
raise RuntimeError(
'The dataset contains special token "%s". This is not allowed.' % token)
origin_data[key][data_key].append(element)
except StopIteration:
break
def chain_allvocab(dic, fields):
vocabs = []
for data_key, field in fields:
for element in dic[data_key]:
vocabs.extend(field.iter_tokens(element))
return vocabs
raw_vocab_list = chain_allvocab(origin_data['train'], data_fields['train'])
# Important: Sort the words preventing the index changes between
# different runs
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = [x[0] for x in vocab if x[1] >= min_vocab_times]
vocab_list = self.ext_vocab + list(left_vocab)
valid_vocab_len = len(vocab_list)
valid_vocab_set = set(vocab_list)
for key in self.key_name:
if key == 'train':
continue
raw_vocab_list.extend(chain_allvocab(origin_data[key], data_fields[key]))
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = [x[0] for x in vocab if x[1] >= invalid_vocab_times and x[0] not in valid_vocab_set]
vocab_list.extend(left_vocab)
print("valid vocab list length = %d" % valid_vocab_len)
print("vocab list length = %d" % len(vocab_list))
word2id = {w: i for i, w in enumerate(vocab_list)}
data = {}
data_size = {}
for key in self.key_name:
data[key] = {}
for data_key, field in data_fields[key]:
origin_data[key][data_key] = [field.convert_to_ids(element, word2id, self) for element in
origin_data[key][data_key]]
data[key][data_key] = [
field.cut(element, max_sent_length=max_sent_length, max_turn_length=max_turn_length) for element in
origin_data[key][data_key]]
if key not in data_size:
data_size[key] = len(data[key][data_key])
elif data_size[key] != len(data[key][data_key]):
raise RuntimeError(
"The data of input %s.txt contains different numbers of fields" % key)
vocab = chain_allvocab(origin_data[key], data_fields[key])
vocab_num = len(vocab)
oov_num = sum([word not in word2id for word in vocab])
invalid_num = sum([word not in valid_vocab_set for word in vocab]) - oov_num
sent_length = []
for data_key, field in data_fields[key]:
sent_length.extend(
[len(sent) for element in origin_data[key][data_key] for sent in field.iter_sentence(element)])
cut_word_num = np.sum(np.maximum(np.array(sent_length) - max_sent_length, 0))
session_keys = [data_key for data_key, field in data_fields[key] if field.__class__ == Session]
if session_keys:
turn_length = list(
map(len, chain.from_iterable((origin_data[key][sess_key] for sess_key in session_keys))))
max_turn_length_before_cut = max(turn_length)
sent_num = sum(turn_length)
cut_sentence_rate = np.sum(np.maximum(np.array(turn_length) - max_turn_length, 0)) / sent_num
else:
max_turn_length_before_cut = 1
cut_sentence_rate = 0
print(("%s set. invalid rate: %f, unknown rate: %f, max sentence length before cut: %d, " + \
"cut word rate: %f\n\tmax turn length before cut: %d, cut sentence rate: %f") % \
(key, invalid_num / vocab_num, oov_num / vocab_num, max(sent_length), \
cut_word_num / vocab_num, max_turn_length_before_cut, cut_sentence_rate))
# calculate hash value
hash_value = DataloaderHash(ignore_tokens=(self.go_id, self.eos_id, self.pad_id),
unk_id=self.unk_id).hash_datasets(data, data_fields, vocab_list[len(
self.ext_vocab):valid_vocab_len])
self.__hash_value = hash_value
return vocab_list, valid_vocab_len, data, data_size
def get_batch(self, key, indexes):
'''{LanguageProcessingBase.GET_BATCH_DOC_WITHOUT_RETURNS}
Returns:
(dict): A dict at least contains:
* **post_length** (:class:`numpy.ndarray`): A 1-d array, the length of post in each batch.
Size: ``[batch_size]``
* **post** (:class:`numpy.ndarray`): A 2-d padded array containing words of id form in posts.
Only provide valid words. ``unk_id`` will be used if a word is not valid.
Size: ``[batch_size, max(sent_length)]``
* **post_allvocabs** (:class:`numpy.ndarray`): A 2-d padded array containing words of id
form in posts. Provide both valid and invalid vocabs.
Size: ``[batch_size, max(sent_length)]``
* **resp_length** (:class:`numpy.ndarray`): A 1-d array, the length of response in each batch.
Size: ``[batch_size]``
* **resp** (:class:`numpy.ndarray`): A 2-d padded array containing words of id form
in responses. Only provide valid vocabs. ``unk_id`` will be used if a word is not valid.
Size: ``[batch_size, max(sent_length)]``
* **resp_allvocabs** (:class:`numpy.ndarray`):
A 2-d padded array containing words of id form in responses.
Provide both valid and invalid vocabs.
Size: ``[batch_size, max(sent_length)]``
Examples:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you",
>>> # "hello", "i", "am", "fine"]
>>> # vocab_size = 9
>>> # vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you", "hello", "i"]
>>> dataloader.get_batch('train', [0, 1])
{
"post_allvocabs": numpy.array([
[2, 5, 6, 10, 3], # first post: <go> are you fine <eos>
[2, 7, 3, 0, 0], # second post: <go> hello <eos> <pad> <pad>
]),
"post": numpy.array([
[2, 5, 6, 1, 3], # first post: <go> are you <unk> <eos>
[2, 7, 3, 0, 0], # second post: <go> hello <eos> <pad> <pad>
]),
"resp_allvocabs": numpy.array([
[2, 8, 9, 10, 3], # first response: <go> i am fine <eos>
[2, 7, 3, 0, 0], # second response: <go> hello <eos> <pad> <pad>
]),
"resp": numpy.array([
[2, 8, 1, 1, 3], # first response: <go> i <unk> <unk> <eos>
[2, 7, 3, 0, 0], # second response: <go> hello <eos> <pad> <pad>
]),
"post_length": numpy.array([5, 3]), # length of posts
"resp_length": numpy.array([5, 3]), # length of responses
}
'''
if key not in self.key_name:
raise ValueError("No set named %s." % key)
res = {}
batch_size = len(indexes)
res["post_length"] = np.array(list(map(lambda i: len(self.data[key]['post'][i]), indexes)), dtype=int)
res["resp_length"] = np.array(list(map(lambda i: len(self.data[key]['resp'][i]), indexes)), dtype=int)
res_post = res["post"] = np.zeros((batch_size, np.max(res["post_length"])), dtype=int)
res_resp = res["resp"] = np.zeros((batch_size, np.max(res["resp_length"])), dtype=int)
for i, j in enumerate(indexes):
post = self.data[key]['post'][j]
resp = self.data[key]['resp'][j]
res_post[i, :len(post)] = post
res_resp[i, :len(resp)] = resp
res["post_allvocabs"] = res_post.copy()
res["resp_allvocabs"] = res_resp.copy()
res_post[res_post >= self.valid_vocab_len] = self.unk_id
res_resp[res_resp >= self.valid_vocab_len] = self.unk_id
if key=='train':
res['score']=np.array([self.data[key]['score'][i] for i in indexes])
return res
def main():
max_sent_length = 50
loader = TranslationWithScore('./data/iwslt14_raml', 10, max_sent_length, 0, 'nltk', False)
loader.restart("train",batch_size=2,shuffle=True)
q=loader.get_next_batch("train")
print(len(q['score']))
print(q)
if __name__ == '__main__':
main()
| 2.4375 | 2 |
tests/test_data_functions.py | arthurazs/dotapatch | 12 | 12796950 | <reponame>arthurazs/dotapatch
'''Tests for HeropediaData functions'''
from unittest import TestCase, main as unit_main
from dotapatch.data import HeropediaData
import os.path as path
class TestDataFiles(TestCase):
'''Test if files/folders exists'''
@classmethod
def setUpClass(cls):
'''Sets up directory to check'''
cls.DATA_DIR = HeropediaData.DATA_DIR
def test_data_dir_exist(self):
'''file: assert 'data' folder exists'''
self.assertTrue(path.exists(self.DATA_DIR))
def test_item_data_exist(self):
'''file: assert 'itemdata' file exists'''
item_data = HeropediaData.ITEM_DATA
self.assertTrue(path.isfile(path.join(self.DATA_DIR, item_data)))
def test_hero_data_exist(self):
'''file: assert 'herodata' file exists'''
hero_data = HeropediaData.HERO_DATA
self.assertTrue(path.isfile(path.join(self.DATA_DIR, hero_data)))
class TestStringManipulation(TestCase):
'''Tests string manipulation'''
def test_sort_hero_name_change(self):
'''str: sort_hero("wisp") returns "io"'''
dictionary = ('wisp', None)
self.assertEqual('io', HeropediaData.sort_hero(dictionary))
def test_sort_item_name_change(self):
'''str: sort_item("sphere") returns "linken s sphere"'''
dictionary = ('sphere', None)
self.assertEqual(
"linken's_sphere", HeropediaData.sort_item(dictionary))
def test_sort_hero_name_not_change(self):
'''str: sort("io") returns "io"'''
dictionary = ('io', None)
self.assertEqual('io', HeropediaData.sort_hero(dictionary))
def test_sort_item_name_not_change(self):
'''str: sort("linken s sphere") returns "linken s sphere"'''
dictionary = ('linken s sphere', None)
self.assertEqual(
'linken s sphere', HeropediaData.sort_item(dictionary))
if __name__ == '__main__':
unit_main()
| 2.796875 | 3 |
Subsets and Splits