max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
venv/lib/python3.8/site-packages/requests/compat.py | GiulianaPola/select_repeats | 1 | 9000 | /home/runner/.cache/pip/pool/d1/fc/c7/6cbbdf9c58b6591d28ed792bbd7944946d3f56042698e822a2869787f6 | /home/runner/.cache/pip/pool/d1/fc/c7/6cbbdf9c58b6591d28ed792bbd7944946d3f56042698e822a2869787f6 | none | 1 | 0.768761 | 1 |
|
examples/python-guide/cross_validation_example.py | StatMixedML/GPBoost | 2 | 9001 | <reponame>StatMixedML/GPBoost
# coding: utf-8
# pylint: disable = invalid-name, C0111
import gpboost as gpb
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#--------------------Cross validation for tree-boosting without GP or random effects----------------
print('Simulating data...')
# Simulate and create your dataset
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
def sim_data(n):
"""Function that simulates data. Two covariates of which only one has an effect"""
X = np.random.rand(n, 2)
# mean function plus noise
y = f1d(X[:, 0]) + np.random.normal(scale=0.1, size=n)
return ([X, y])
# Simulate data
n = 1000
data = sim_data(2 * n)
# create dataset for gpb.train
data_train = gpb.Dataset(data[0][0:n, :], data[1][0:n])
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'metric': {'l2', 'l1'},
'learning_rate': 0.1,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# --------------------Combine tree-boosting and grouped random effects model----------------
print('Simulating data...')
# Simulate data
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.figure("Mean function")
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
n = 1000 # number of samples
np.random.seed(1)
X = np.random.rand(n, 2)
F = f1d(X[:, 0])
# Simulate grouped random effects
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2 # random effect variance
sigma2 = 0.1 ** 2 # error variance
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m) # simulate random effects
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = F + eps + xi # observed data
# define GPModel
gp_model = gpb.GPModel(group_data=group)
gp_model.set_optim_params(params={"optimizer_cov": "fisher_scoring"})
# create dataset for gpb.train
data_train = gpb.Dataset(X, y)
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'learning_rate': 0.05,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=False,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# Include random effect predictions for validation (observe the lower test error)
gp_model = gpb.GPModel(group_data=group)
print("Running cross validation for GPBoost model and use_gp_model_for_validation = TRUE")
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=True,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=Falsem, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
cvbst.best_iteration
| # coding: utf-8
# pylint: disable = invalid-name, C0111
import gpboost as gpb
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#--------------------Cross validation for tree-boosting without GP or random effects----------------
print('Simulating data...')
# Simulate and create your dataset
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
def sim_data(n):
"""Function that simulates data. Two covariates of which only one has an effect"""
X = np.random.rand(n, 2)
# mean function plus noise
y = f1d(X[:, 0]) + np.random.normal(scale=0.1, size=n)
return ([X, y])
# Simulate data
n = 1000
data = sim_data(2 * n)
# create dataset for gpb.train
data_train = gpb.Dataset(data[0][0:n, :], data[1][0:n])
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'metric': {'l2', 'l1'},
'learning_rate': 0.1,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# --------------------Combine tree-boosting and grouped random effects model----------------
print('Simulating data...')
# Simulate data
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.figure("Mean function")
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
n = 1000 # number of samples
np.random.seed(1)
X = np.random.rand(n, 2)
F = f1d(X[:, 0])
# Simulate grouped random effects
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2 # random effect variance
sigma2 = 0.1 ** 2 # error variance
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m) # simulate random effects
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = F + eps + xi # observed data
# define GPModel
gp_model = gpb.GPModel(group_data=group)
gp_model.set_optim_params(params={"optimizer_cov": "fisher_scoring"})
# create dataset for gpb.train
data_train = gpb.Dataset(X, y)
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'learning_rate': 0.05,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=False,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# Include random effect predictions for validation (observe the lower test error)
gp_model = gpb.GPModel(group_data=group)
print("Running cross validation for GPBoost model and use_gp_model_for_validation = TRUE")
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=True,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=Falsem, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
cvbst.best_iteration | en | 0.638467 | # coding: utf-8 # pylint: disable = invalid-name, C0111 #--------------------Cross validation for tree-boosting without GP or random effects---------------- # Simulate and create your dataset Non-linear function for simulation Function that simulates data. Two covariates of which only one has an effect # mean function plus noise # Simulate data # create dataset for gpb.train # specify your configurations as a dict # do cross-validation # --------------------Combine tree-boosting and grouped random effects model---------------- # Simulate data Non-linear function for simulation # number of samples # Simulate grouped random effects # number of categories / levels for grouping variable # grouping variable # incidence matrix relating grouped random effects to samples # random effect variance # error variance # simulate random effects # simulate error term # observed data # define GPModel # create dataset for gpb.train # specify your configurations as a dict # do cross-validation # Include random effect predictions for validation (observe the lower test error) | 3.035678 | 3 |
synapse/rest/synapse/client/unsubscribe.py | Florian-Sabonchi/synapse | 0 | 9002 | # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from synapse.api.errors import StoreError
from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
if TYPE_CHECKING:
from synapse.server import HomeServer
class UnsubscribeResource(DirectServeHtmlResource):
"""
To allow pusher to be delete by clicking a link (ie. GET request)
"""
SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.notifier = hs.get_notifier()
self.auth = hs.get_auth()
self.pusher_pool = hs.get_pusherpool()
self.macaroon_generator = hs.get_macaroon_generator()
async def _async_render_GET(self, request: SynapseRequest) -> None:
token = parse_string(request, "access_token", required=True)
app_id = parse_string(request, "app_id", required=True)
pushkey = parse_string(request, "pushkey", required=True)
user_id = self.macaroon_generator.verify_delete_pusher_token(
token, app_id, pushkey
)
try:
await self.pusher_pool.remove_pusher(
app_id=app_id, pushkey=pushkey, user_id=user_id
)
except StoreError as se:
if se.code != 404:
# This is fine: they're already unsubscribed
raise
self.notifier.on_new_replication_data()
respond_with_html_bytes(
request,
200,
UnsubscribeResource.SUCCESS_HTML,
)
| # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from synapse.api.errors import StoreError
from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
if TYPE_CHECKING:
from synapse.server import HomeServer
class UnsubscribeResource(DirectServeHtmlResource):
"""
To allow pusher to be delete by clicking a link (ie. GET request)
"""
SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.notifier = hs.get_notifier()
self.auth = hs.get_auth()
self.pusher_pool = hs.get_pusherpool()
self.macaroon_generator = hs.get_macaroon_generator()
async def _async_render_GET(self, request: SynapseRequest) -> None:
token = parse_string(request, "access_token", required=True)
app_id = parse_string(request, "app_id", required=True)
pushkey = parse_string(request, "pushkey", required=True)
user_id = self.macaroon_generator.verify_delete_pusher_token(
token, app_id, pushkey
)
try:
await self.pusher_pool.remove_pusher(
app_id=app_id, pushkey=pushkey, user_id=user_id
)
except StoreError as se:
if se.code != 404:
# This is fine: they're already unsubscribed
raise
self.notifier.on_new_replication_data()
respond_with_html_bytes(
request,
200,
UnsubscribeResource.SUCCESS_HTML,
)
| en | 0.876896 | # Copyright 2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. To allow pusher to be delete by clicking a link (ie. GET request) # This is fine: they're already unsubscribed | 2.109907 | 2 |
pyhanabi/act_group.py | ravihammond/hanabi-convention-adaptation | 1 | 9003 | <filename>pyhanabi/act_group.py
import set_path
import sys
import torch
set_path.append_sys_path()
import rela
import hanalearn
import utils
assert rela.__file__.endswith(".so")
assert hanalearn.__file__.endswith(".so")
class ActGroup:
def __init__(
self,
devices,
agent,
partner_weight,
seed,
num_thread,
num_game_per_thread,
num_player,
explore_eps,
trinary,
replay_buffer,
max_len,
gamma,
convention,
convention_act_override,
):
self.devices = devices.split(",")
self.seed = seed
self.num_thread = num_thread
self.num_player = num_player
self.num_game_per_thread = num_game_per_thread
self.explore_eps = explore_eps
self.trinary = trinary
self.replay_buffer = replay_buffer
self.max_len = max_len
self.gamma = gamma
self.load_partner_model(partner_weight)
self.model_runners = []
for dev in self.devices:
runner = rela.BatchRunner(agent.clone(dev), dev)
runner.add_method("act", 5000)
runner.add_method("compute_priority", 100)
runner.add_method("compute_target", 5000)
partner_runner = rela.BatchRunner(
self._partner_agent.clone(dev), dev)
partner_runner.add_method("act", 5000)
self.model_runners.append([runner, partner_runner])
self.num_runners = len(self.model_runners)
self.convention = convention
self.convention_act_override = convention_act_override
self.create_r2d2_actors()
def load_partner_model(self, weight_file):
try:
state_dict = torch.load(weight_file)
except:
sys.exit(f"weight_file {weight_file} can't be loaded")
overwrite = {}
overwrite["vdn"] = False
overwrite["device"] = "cuda:0"
overwrite["boltzmann_act"] = False
if "fc_v.weight" in state_dict.keys():
agent, cfg = utils.load_agent(weight_file, overwrite)
self._partner_sad = cfg["sad"] if "sad" in cfg else cfg["greedy_extra"]
self._partner_hide_action = bool(cfg["hide_action"])
else:
agent = utils.load_supervised_agent(weight_file, "cuda:0")
self._partner_sad = False
self._partner_hide_action = False
agent.train(False)
self._partner_agent = agent
def create_r2d2_actors(self):
convention_act_override = [0, 0]
convention_sender = [1, 0]
if self.convention_act_override:
convention_act_override = [0, 1]
convention_sender = [1, 0]
actors = []
for i in range(self.num_thread):
thread_actors = []
for j in range(self.num_game_per_thread):
game_actors = []
actor = hanalearn.R2D2Actor(
self.model_runners[i % self.num_runners][0],
self.seed,
self.num_player,
0,
self.explore_eps,
[0], # boltzmann_act
False,
0, # sad
0, # shuffle_color
0, # hide_action
self.trinary,
self.replay_buffer,
1, # multi-step
self.max_len,
self.gamma,
self.convention,
1,
0,
True, # convention_fict_act_override
True, # use_experience
)
game_actors.append(actor)
self.seed += 1
actor = hanalearn.R2D2Actor(
self.model_runners[i % self.num_runners][1], # runner
self.num_player, # numPlayer
1, # playerIdx
False, # vdn
self._partner_sad, # sad
self._partner_hide_action, # hideAction
self.convention, # convention
0, # conventionSender
1) # conventionOverride
game_actors.append(actor)
for k in range(self.num_player):
partners = game_actors[:]
partners[k] = None
game_actors[k].set_partners(partners)
thread_actors.append(game_actors)
actors.append(thread_actors)
self.actors = actors
print("ActGroup created")
def start(self):
for runners in self.model_runners:
for runner in runners:
runner.start()
def update_model(self, agent):
for runner in self.model_runners:
runner[0].update_model(agent)
| <filename>pyhanabi/act_group.py
import set_path
import sys
import torch
set_path.append_sys_path()
import rela
import hanalearn
import utils
assert rela.__file__.endswith(".so")
assert hanalearn.__file__.endswith(".so")
class ActGroup:
def __init__(
self,
devices,
agent,
partner_weight,
seed,
num_thread,
num_game_per_thread,
num_player,
explore_eps,
trinary,
replay_buffer,
max_len,
gamma,
convention,
convention_act_override,
):
self.devices = devices.split(",")
self.seed = seed
self.num_thread = num_thread
self.num_player = num_player
self.num_game_per_thread = num_game_per_thread
self.explore_eps = explore_eps
self.trinary = trinary
self.replay_buffer = replay_buffer
self.max_len = max_len
self.gamma = gamma
self.load_partner_model(partner_weight)
self.model_runners = []
for dev in self.devices:
runner = rela.BatchRunner(agent.clone(dev), dev)
runner.add_method("act", 5000)
runner.add_method("compute_priority", 100)
runner.add_method("compute_target", 5000)
partner_runner = rela.BatchRunner(
self._partner_agent.clone(dev), dev)
partner_runner.add_method("act", 5000)
self.model_runners.append([runner, partner_runner])
self.num_runners = len(self.model_runners)
self.convention = convention
self.convention_act_override = convention_act_override
self.create_r2d2_actors()
def load_partner_model(self, weight_file):
try:
state_dict = torch.load(weight_file)
except:
sys.exit(f"weight_file {weight_file} can't be loaded")
overwrite = {}
overwrite["vdn"] = False
overwrite["device"] = "cuda:0"
overwrite["boltzmann_act"] = False
if "fc_v.weight" in state_dict.keys():
agent, cfg = utils.load_agent(weight_file, overwrite)
self._partner_sad = cfg["sad"] if "sad" in cfg else cfg["greedy_extra"]
self._partner_hide_action = bool(cfg["hide_action"])
else:
agent = utils.load_supervised_agent(weight_file, "cuda:0")
self._partner_sad = False
self._partner_hide_action = False
agent.train(False)
self._partner_agent = agent
def create_r2d2_actors(self):
convention_act_override = [0, 0]
convention_sender = [1, 0]
if self.convention_act_override:
convention_act_override = [0, 1]
convention_sender = [1, 0]
actors = []
for i in range(self.num_thread):
thread_actors = []
for j in range(self.num_game_per_thread):
game_actors = []
actor = hanalearn.R2D2Actor(
self.model_runners[i % self.num_runners][0],
self.seed,
self.num_player,
0,
self.explore_eps,
[0], # boltzmann_act
False,
0, # sad
0, # shuffle_color
0, # hide_action
self.trinary,
self.replay_buffer,
1, # multi-step
self.max_len,
self.gamma,
self.convention,
1,
0,
True, # convention_fict_act_override
True, # use_experience
)
game_actors.append(actor)
self.seed += 1
actor = hanalearn.R2D2Actor(
self.model_runners[i % self.num_runners][1], # runner
self.num_player, # numPlayer
1, # playerIdx
False, # vdn
self._partner_sad, # sad
self._partner_hide_action, # hideAction
self.convention, # convention
0, # conventionSender
1) # conventionOverride
game_actors.append(actor)
for k in range(self.num_player):
partners = game_actors[:]
partners[k] = None
game_actors[k].set_partners(partners)
thread_actors.append(game_actors)
actors.append(thread_actors)
self.actors = actors
print("ActGroup created")
def start(self):
for runners in self.model_runners:
for runner in runners:
runner.start()
def update_model(self, agent):
for runner in self.model_runners:
runner[0].update_model(agent)
| en | 0.65857 | # boltzmann_act # sad # shuffle_color # hide_action # multi-step # convention_fict_act_override # use_experience # runner # numPlayer # playerIdx # vdn # sad # hideAction # convention # conventionSender # conventionOverride | 2.130337 | 2 |
A_source_code/carbon/code/make_mask.py | vanHoek-dgnm/CARBON-DISC | 0 | 9004 | # ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
from netCDF4 import Dataset
import numpy as np
import general_path
import accuflux
import ascraster
import get_surrounding_cells
import make_np_grid
def do(mask_asc_fn, mask_id, dum_asc, logical = "EQ", mask_type='np_grid'):
dum_mask = ascraster.create_mask(mask_asc_fn, mask_id, logical = logical, numtype=int)
mask=[]
if mask_type=="rowcol":
for i in dum_mask:
mask.append(dum_asc.get_row_col_from_index(i))
elif mask_type=="index":
for i in dum_mask:
mask.append(i)
elif mask_type=="latlon":
for i in dum_mask:
mask.append(dum_asc.get_coord_from_index(i))
elif mask_type=="np_grid":
mask = np.zeros((dum_asc.nrows, dum_asc.ncols), dtype=bool)
mask[:,:] = True
for i in dum_mask:
row, col = dum_asc.get_row_col_from_index(i)
mask[row,col]=False
return mask
| # ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
from netCDF4 import Dataset
import numpy as np
import general_path
import accuflux
import ascraster
import get_surrounding_cells
import make_np_grid
def do(mask_asc_fn, mask_id, dum_asc, logical = "EQ", mask_type='np_grid'):
dum_mask = ascraster.create_mask(mask_asc_fn, mask_id, logical = logical, numtype=int)
mask=[]
if mask_type=="rowcol":
for i in dum_mask:
mask.append(dum_asc.get_row_col_from_index(i))
elif mask_type=="index":
for i in dum_mask:
mask.append(i)
elif mask_type=="latlon":
for i in dum_mask:
mask.append(dum_asc.get_coord_from_index(i))
elif mask_type=="np_grid":
mask = np.zeros((dum_asc.nrows, dum_asc.ncols), dtype=bool)
mask[:,:] = True
for i in dum_mask:
row, col = dum_asc.get_row_col_from_index(i)
mask[row,col]=False
return mask
| en | 0.472568 | # ****************************************************** ## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University. ## Reuse permitted under Gnu Public License, GPL v3. # ****************************************************** | 1.907263 | 2 |
Code/Dataset.py | gitFloyd/AAI-Project-2 | 0 | 9005 | from io import TextIOWrapper
import math
from typing import TypeVar
import random
import os
from Settings import Settings
class Dataset:
DataT = TypeVar('DataT')
WIN_NL = "\r\n"
LINUX_NL = "\n"
def __init__(self, path:str, filename:str, newline:str = WIN_NL) -> None:
self.path_ = path
self.filename_ = filename
self.loaded_ = False
self.parsed_ = False
self.data_ = None
self.nl = newline
self.classes_ = set()
self.attributes_ = []
self.types_ = []
self.data_ = []
def Data(self) -> list:
return self.data_
def Attributes(self) -> list:
return self.attributes_
def Types(self) -> list:
return self.types_
def Classes(self) -> list:
return self.classes_
def Load(self, reload:bool = False) -> DataT:
if not self.loaded_ or reload:
self.file_ = open(os.sep.join([self.path_, self.filename_]))
self.loaded_ = True
# If we reload, then we want to reparse as well.
return self.Parse_(reload)
def Parse_(self, reparse:bool = False) -> DataT:
if not self.loaded_:
# Silently return instead of raising an exception because
# this method is not intended to be used outside of the
# class. Although, it can be used that way if needed.
return
if not self.parsed_ or reparse:
self.Parse_Hook_(self.file_.read())
return self.data_
def Parse_Hook_(self, data:str) -> None:
self.data_ = data
def __del__(self):
if self.loaded_:
self.file_.close()
class ArffRow:
ATTR_LABEL = '@ATTRIBUTE ' # need the space at the end here
DATA_LABEL = '@DATA'
ATTR_LEN = len(ATTR_LABEL)
DATA_LEN = len(DATA_LABEL)
Attributes = []
Types = []
Data = []
Classes = set()
IsCollecting_ = False
@classmethod
def Reset(cls):
cls.Attributes = []
cls.Types = []
cls.Data = []
cls.Classes = set()
cls.IsCollecting_ = False
def __init__(self, line:str, nl:str) -> None:
self.line_ = line
self.len_ = len(line)
self.nl_ = nl
def Len(self) -> str:
return self.len_
def HasAttributeLabel(self) -> bool:
return self.len_ >= ArffRow.ATTR_LEN and self.line_[0:ArffRow.ATTR_LEN] == ArffRow.ATTR_LABEL
def HasDataLabel(self) -> bool:
return self.len_ >= ArffRow.DATA_LEN and self.line_[0:ArffRow.DATA_LEN] == ArffRow.DATA_LABEL
def GetAttributeData(self) -> tuple[str, str]:
namePosition = 0
for (i, char) in enumerate(self.line_[ArffRow.ATTR_LEN:]):
if char == '\t':
namePosition = i + ArffRow.ATTR_LEN
break
return (self.line_[ArffRow.ATTR_LEN:namePosition], self.line_[namePosition + 1:])
def Parse(self):
if ArffRow.IsCollecting_ and self.len_ > 1:
ArffRow.Data.append(self.line_.split(','))
ArffRow.Classes.add(ArffRow.Data[-1][-1])
elif self.HasDataLabel():
ArffRow.IsCollecting_ = True
elif self.HasAttributeLabel():
attrData = self.GetAttributeData()
ArffRow.Attributes.append(attrData[0])
ArffRow.Types.append(attrData[1])
class ArffDataset(Dataset):
# ARFF (Attribute-Relation File Format)
#def __init__(self, path:str, filename:str, newline:str = Dataset.WIN_NL) -> None:
# super().__init__(path, filename, newline)
#
# self.parser_ = {
# 'attributesLoaded': False,
# }
def Parse_Hook_(self, data:str) -> None:
ArffRow.Reset()
rows = [ArffRow(line, self.nl) for line in data.split(self.nl)]
for row in rows:
row.Parse()
for attribute in ArffRow.Attributes:
self.attributes_.append(attribute)
for typeName in ArffRow.Types:
self.types_.append(typeName)
for datum in ArffRow.Data:
self.data_.append(datum)
self.classes_ = self.classes_.union(ArffRow.Classes)
classes = list(self.classes_)
attribute_maxes = {}
for row in self.data_:
classIndex = classes.index(row[-1])
row[-1] = [1 if i == classIndex else 0 for (i, value) in enumerate(classes)]
for i in range(len(row)):
if self.types_[i] == 'REAL':
row[i] = float(row[i])
elif self.types_[i] == 'INTEGER':
row[i] = int(row[i])
else:
continue
if i not in attribute_maxes:
attribute_maxes[i] = 0
if abs(row[i]) > attribute_maxes[i]:
attribute_maxes[i] = row[i]
for i in range(len(row)):
if self.types_[i] == 'REAL' or self.types_[i] == 'INTEGER':
row[i] = row[i] / attribute_maxes[i]
self.data_ = self.RowSort(self.data_)
def LexOrder(self, item1, item2):
num_fields = len(item1)
for i in range(num_fields):
if item1[i] != item2[i]:
if item1[i] < item2[i]:
return -1
else:
return 1
return 0
def RowSort(self, rows):
rows_len = len(rows)
if rows_len > 2:
result1 = self.RowSort(rows[0: math.floor(rows_len * 0.5)])
result2 = self.RowSort(rows[math.floor(rows_len * 0.5):])
sorted_rows = []
item1 = None
item2 = None
while len(result1) > 0 or len(result2) > 0:
if len(result1) > 0 and len(result2) > 0 and item1 == None and item2 == None:
item1 = result1.pop(0)
item2 = result2.pop(0)
elif len(result1) > 0 and item1 == None:
item1 = result1.pop(0)
elif len(result2) > 0 and item2 == None:
item2 = result2.pop(0)
order = 0
if item1 == None and item2 != None:
order = 1
elif item1 != None and item2 == None:
order = -1
else:
order = self.LexOrder(item1, item2)
if order == -1:
sorted_rows.append(item1)
item1 = None
elif order == 1:
sorted_rows.append(item2)
item2 = None
else:
sorted_rows.append(item1)
sorted_rows.append(item2)
item1 = None
item2 = None
if item1 != None:
sorted_rows.append(item1)
if item2 != None:
sorted_rows.append(item2)
return sorted_rows
elif rows_len == 1:
return rows
else:
order = self.LexOrder(rows[0], rows[1])
if order == 1:
rows.reverse()
return rows
def Fetch(self, *fields:list[str], limit:int = None, offset:int = 0):
cols = []
data = []
# iterate over the field names and find the column indices
# for names that match the requested field names
for (i, field) in enumerate(fields):
try:
cols.append(self.attributes_.index(field))
except ValueError:
pass
end = limit
if limit != None:
end += offset
for row in self.data_[offset:end]:
data.append([row[i] for i in cols])
return data
def FetchFilter_(self, i, value):
# Not used any more
#if self.types_[i] == 'REAL':
# return float(value)
#elif self.types_[i] == 'INTEGER':
# return int(value)
#else:
# return value
pass
def Size(self):
length = len(self.data_)
if length == 0:
return (len(self.data_), None)
return (len(self.data_), len(self.data_[0]))
def Shuffle(self):
random.shuffle(self.data_)
class Pistachio(ArffDataset):
SettingsKey = 'PistachioDataset'
def __init__(self, newline:str = Dataset.WIN_NL) -> None:
settings = Settings.Data()
super().__init__(
path = settings[Pistachio.SettingsKey]['Path'],
filename = settings[Pistachio.SettingsKey]['FileName'],
newline = newline
)
#pist = Pistachio(Dataset.LINUX_NL)
#
#for row in pist.Load()[0:10]:
# print(row)
| from io import TextIOWrapper
import math
from typing import TypeVar
import random
import os
from Settings import Settings
class Dataset:
DataT = TypeVar('DataT')
WIN_NL = "\r\n"
LINUX_NL = "\n"
def __init__(self, path:str, filename:str, newline:str = WIN_NL) -> None:
self.path_ = path
self.filename_ = filename
self.loaded_ = False
self.parsed_ = False
self.data_ = None
self.nl = newline
self.classes_ = set()
self.attributes_ = []
self.types_ = []
self.data_ = []
def Data(self) -> list:
return self.data_
def Attributes(self) -> list:
return self.attributes_
def Types(self) -> list:
return self.types_
def Classes(self) -> list:
return self.classes_
def Load(self, reload:bool = False) -> DataT:
if not self.loaded_ or reload:
self.file_ = open(os.sep.join([self.path_, self.filename_]))
self.loaded_ = True
# If we reload, then we want to reparse as well.
return self.Parse_(reload)
def Parse_(self, reparse:bool = False) -> DataT:
if not self.loaded_:
# Silently return instead of raising an exception because
# this method is not intended to be used outside of the
# class. Although, it can be used that way if needed.
return
if not self.parsed_ or reparse:
self.Parse_Hook_(self.file_.read())
return self.data_
def Parse_Hook_(self, data:str) -> None:
self.data_ = data
def __del__(self):
if self.loaded_:
self.file_.close()
class ArffRow:
ATTR_LABEL = '@ATTRIBUTE ' # need the space at the end here
DATA_LABEL = '@DATA'
ATTR_LEN = len(ATTR_LABEL)
DATA_LEN = len(DATA_LABEL)
Attributes = []
Types = []
Data = []
Classes = set()
IsCollecting_ = False
@classmethod
def Reset(cls):
cls.Attributes = []
cls.Types = []
cls.Data = []
cls.Classes = set()
cls.IsCollecting_ = False
def __init__(self, line:str, nl:str) -> None:
self.line_ = line
self.len_ = len(line)
self.nl_ = nl
def Len(self) -> str:
return self.len_
def HasAttributeLabel(self) -> bool:
return self.len_ >= ArffRow.ATTR_LEN and self.line_[0:ArffRow.ATTR_LEN] == ArffRow.ATTR_LABEL
def HasDataLabel(self) -> bool:
return self.len_ >= ArffRow.DATA_LEN and self.line_[0:ArffRow.DATA_LEN] == ArffRow.DATA_LABEL
def GetAttributeData(self) -> tuple[str, str]:
namePosition = 0
for (i, char) in enumerate(self.line_[ArffRow.ATTR_LEN:]):
if char == '\t':
namePosition = i + ArffRow.ATTR_LEN
break
return (self.line_[ArffRow.ATTR_LEN:namePosition], self.line_[namePosition + 1:])
def Parse(self):
if ArffRow.IsCollecting_ and self.len_ > 1:
ArffRow.Data.append(self.line_.split(','))
ArffRow.Classes.add(ArffRow.Data[-1][-1])
elif self.HasDataLabel():
ArffRow.IsCollecting_ = True
elif self.HasAttributeLabel():
attrData = self.GetAttributeData()
ArffRow.Attributes.append(attrData[0])
ArffRow.Types.append(attrData[1])
class ArffDataset(Dataset):
# ARFF (Attribute-Relation File Format)
#def __init__(self, path:str, filename:str, newline:str = Dataset.WIN_NL) -> None:
# super().__init__(path, filename, newline)
#
# self.parser_ = {
# 'attributesLoaded': False,
# }
def Parse_Hook_(self, data:str) -> None:
ArffRow.Reset()
rows = [ArffRow(line, self.nl) for line in data.split(self.nl)]
for row in rows:
row.Parse()
for attribute in ArffRow.Attributes:
self.attributes_.append(attribute)
for typeName in ArffRow.Types:
self.types_.append(typeName)
for datum in ArffRow.Data:
self.data_.append(datum)
self.classes_ = self.classes_.union(ArffRow.Classes)
classes = list(self.classes_)
attribute_maxes = {}
for row in self.data_:
classIndex = classes.index(row[-1])
row[-1] = [1 if i == classIndex else 0 for (i, value) in enumerate(classes)]
for i in range(len(row)):
if self.types_[i] == 'REAL':
row[i] = float(row[i])
elif self.types_[i] == 'INTEGER':
row[i] = int(row[i])
else:
continue
if i not in attribute_maxes:
attribute_maxes[i] = 0
if abs(row[i]) > attribute_maxes[i]:
attribute_maxes[i] = row[i]
for i in range(len(row)):
if self.types_[i] == 'REAL' or self.types_[i] == 'INTEGER':
row[i] = row[i] / attribute_maxes[i]
self.data_ = self.RowSort(self.data_)
def LexOrder(self, item1, item2):
num_fields = len(item1)
for i in range(num_fields):
if item1[i] != item2[i]:
if item1[i] < item2[i]:
return -1
else:
return 1
return 0
def RowSort(self, rows):
rows_len = len(rows)
if rows_len > 2:
result1 = self.RowSort(rows[0: math.floor(rows_len * 0.5)])
result2 = self.RowSort(rows[math.floor(rows_len * 0.5):])
sorted_rows = []
item1 = None
item2 = None
while len(result1) > 0 or len(result2) > 0:
if len(result1) > 0 and len(result2) > 0 and item1 == None and item2 == None:
item1 = result1.pop(0)
item2 = result2.pop(0)
elif len(result1) > 0 and item1 == None:
item1 = result1.pop(0)
elif len(result2) > 0 and item2 == None:
item2 = result2.pop(0)
order = 0
if item1 == None and item2 != None:
order = 1
elif item1 != None and item2 == None:
order = -1
else:
order = self.LexOrder(item1, item2)
if order == -1:
sorted_rows.append(item1)
item1 = None
elif order == 1:
sorted_rows.append(item2)
item2 = None
else:
sorted_rows.append(item1)
sorted_rows.append(item2)
item1 = None
item2 = None
if item1 != None:
sorted_rows.append(item1)
if item2 != None:
sorted_rows.append(item2)
return sorted_rows
elif rows_len == 1:
return rows
else:
order = self.LexOrder(rows[0], rows[1])
if order == 1:
rows.reverse()
return rows
def Fetch(self, *fields:list[str], limit:int = None, offset:int = 0):
cols = []
data = []
# iterate over the field names and find the column indices
# for names that match the requested field names
for (i, field) in enumerate(fields):
try:
cols.append(self.attributes_.index(field))
except ValueError:
pass
end = limit
if limit != None:
end += offset
for row in self.data_[offset:end]:
data.append([row[i] for i in cols])
return data
def FetchFilter_(self, i, value):
# Not used any more
#if self.types_[i] == 'REAL':
# return float(value)
#elif self.types_[i] == 'INTEGER':
# return int(value)
#else:
# return value
pass
def Size(self):
length = len(self.data_)
if length == 0:
return (len(self.data_), None)
return (len(self.data_), len(self.data_[0]))
def Shuffle(self):
random.shuffle(self.data_)
class Pistachio(ArffDataset):
SettingsKey = 'PistachioDataset'
def __init__(self, newline:str = Dataset.WIN_NL) -> None:
settings = Settings.Data()
super().__init__(
path = settings[Pistachio.SettingsKey]['Path'],
filename = settings[Pistachio.SettingsKey]['FileName'],
newline = newline
)
#pist = Pistachio(Dataset.LINUX_NL)
#
#for row in pist.Load()[0:10]:
# print(row)
| en | 0.597702 | # If we reload, then we want to reparse as well. # Silently return instead of raising an exception because # this method is not intended to be used outside of the # class. Although, it can be used that way if needed. # need the space at the end here # ARFF (Attribute-Relation File Format) #def __init__(self, path:str, filename:str, newline:str = Dataset.WIN_NL) -> None: # super().__init__(path, filename, newline) # # self.parser_ = { # 'attributesLoaded': False, # } # iterate over the field names and find the column indices # for names that match the requested field names # Not used any more #if self.types_[i] == 'REAL': # return float(value) #elif self.types_[i] == 'INTEGER': # return int(value) #else: # return value #pist = Pistachio(Dataset.LINUX_NL) # #for row in pist.Load()[0:10]: # print(row) | 2.554012 | 3 |
payments/models.py | wahuneke/django-stripe-payments | 0 | 9006 | <reponame>wahuneke/django-stripe-payments<filename>payments/models.py
import datetime
import decimal
import json
import traceback
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import models
from django.utils import timezone
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
import stripe
from jsonfield.fields import JSONField
from .managers import CustomerManager, ChargeManager, TransferManager
from .settings import (
DEFAULT_PLAN,
INVOICE_FROM_EMAIL,
PAYMENTS_PLANS,
plan_from_stripe_id,
SEND_EMAIL_RECEIPTS,
TRIAL_PERIOD_FOR_USER_CALLBACK,
PLAN_QUANTITY_CALLBACK
)
from .signals import (
cancelled,
card_changed,
subscription_made,
webhook_processing_error,
WEBHOOK_SIGNALS,
)
from .utils import convert_tstamp
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.api_version = getattr(settings, "STRIPE_API_VERSION", "2012-11-07")
class StripeObject(models.Model):
stripe_id = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta: # pylint: disable=E0012,C1001
abstract = True
class EventProcessingException(models.Model):
event = models.ForeignKey("Event", null=True)
data = models.TextField()
message = models.CharField(max_length=500)
traceback = models.TextField()
created_at = models.DateTimeField(default=timezone.now)
@classmethod
def log(cls, data, exception, event):
cls.objects.create(
event=event,
data=data or "",
message=str(exception),
traceback=traceback.format_exc()
)
def __unicode__(self):
return u"<%s, pk=%s, Event=%s>" % (self.message, self.pk, self.event)
class Event(StripeObject):
kind = models.CharField(max_length=250)
livemode = models.BooleanField()
customer = models.ForeignKey("Customer", null=True)
webhook_message = JSONField()
validated_message = JSONField(null=True)
valid = models.NullBooleanField(null=True)
processed = models.BooleanField(default=False)
stripe_connect = models.ForeignKey('ConnectUser', null=True)
@property
def message(self):
return self.validated_message
def __unicode__(self):
return "%s - %s" % (self.kind, self.stripe_id)
def link_customer(self):
cus_id = None
customer_crud_events = [
"customer.created",
"customer.updated",
"customer.deleted"
]
if self.kind in customer_crud_events:
cus_id = self.message["data"]["object"]["id"]
else:
cus_id = self.message["data"]["object"].get("customer", None)
if cus_id is not None:
try:
self.customer = Customer.objects.get(stripe_id=cus_id)
self.save()
except Customer.DoesNotExist:
pass
def link_stripe_connect(self):
connect_id = self.message["data"]["object"].get("user_id", None)
if connect_id is not None:
try:
self.stripe_connect = ConnectUser.objects.get(account_id=connect_id)
self.save()
except ConnectUser.DoesNotExist:
pass
def validate(self):
evt = stripe.Event.retrieve(self.stripe_id)
self.validated_message = json.loads(
json.dumps(
evt.to_dict(),
sort_keys=True,
cls=stripe.StripeObjectEncoder
)
)
if self.webhook_message["data"] == self.validated_message["data"]:
self.valid = True
else:
self.valid = False
self.save()
def process(self):
"""
"account.updated",
"account.application.deauthorized",
"charge.succeeded",
"charge.failed",
"charge.refunded",
"charge.dispute.created",
"charge.dispute.updated",
"chagne.dispute.closed",
"customer.created",
"customer.updated",
"customer.deleted",
"customer.subscription.created",
"customer.subscription.updated",
"customer.subscription.deleted",
"customer.subscription.trial_will_end",
"customer.discount.created",
"customer.discount.updated",
"customer.discount.deleted",
"invoice.created",
"invoice.updated",
"invoice.payment_succeeded",
"invoice.payment_failed",
"invoiceitem.created",
"invoiceitem.updated",
"invoiceitem.deleted",
"plan.created",
"plan.updated",
"plan.deleted",
"coupon.created",
"coupon.updated",
"coupon.deleted",
"transfer.created",
"transfer.updated",
"transfer.failed",
"ping"
"""
if self.valid and not self.processed:
try:
if not self.kind.startswith("plan.") and \
not self.kind.startswith("transfer."):
self.link_customer()
if not self.stripe_connect:
self.link_stripe_connect()
if self.kind.startswith("invoice."):
Invoice.handle_event(self)
elif self.kind.startswith("charge."):
if not self.customer:
self.link_customer()
self.customer.record_charge(
self.message["data"]["object"]["id"]
)
elif self.kind.startswith("transfer."):
Transfer.process_transfer(
self,
self.message["data"]["object"]
)
elif self.kind.startswith("customer.subscription."):
if not self.customer:
self.link_customer()
if self.customer:
self.customer.sync_current_subscription()
elif self.kind == "customer.deleted":
if not self.customer:
self.link_customer()
self.customer.purge()
self.send_signal()
self.processed = True
self.save()
except stripe.StripeError, e:
EventProcessingException.log(
data=e.http_body,
exception=e,
event=self
)
webhook_processing_error.send(
sender=Event,
data=e.http_body,
exception=e
)
def send_signal(self):
signal = WEBHOOK_SIGNALS.get(self.kind)
if signal:
return signal.send(sender=Event, event=self)
class Transfer(StripeObject):
# pylint: disable=C0301
event = models.ForeignKey(Event, related_name="transfers")
amount = models.DecimalField(decimal_places=2, max_digits=9)
status = models.CharField(max_length=25)
date = models.DateTimeField()
description = models.TextField(null=True, blank=True)
adjustment_count = models.IntegerField(null=True)
adjustment_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
adjustment_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
charge_count = models.IntegerField(null=True)
charge_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
charge_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
collected_fee_count = models.IntegerField(null=True)
collected_fee_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
net = models.DecimalField(decimal_places=2, max_digits=9, null=True)
refund_count = models.IntegerField(null=True)
refund_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
refund_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
validation_count = models.IntegerField(null=True)
validation_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
stripe_connect = models.ForeignKey('ConnectUser', null=True)
objects = TransferManager()
def update_status(self):
self.status = stripe.Transfer.retrieve(self.stripe_id).status
self.save()
@classmethod
def process_transfer(cls, event, transfer):
defaults = {
"amount": transfer["amount"] / decimal.Decimal("100"),
"status": transfer["status"],
"date": convert_tstamp(transfer, "date"),
"description": transfer.get("description", "")
}
summary = transfer.get("summary")
if summary:
defaults.update({
"adjustment_count": summary.get("adjustment_count"),
"adjustment_fees": summary.get("adjustment_fees"),
"adjustment_gross": summary.get("adjustment_gross"),
"charge_count": summary.get("charge_count"),
"charge_fees": summary.get("charge_fees"),
"charge_gross": summary.get("charge_gross"),
"collected_fee_count": summary.get("collected_fee_count"),
"collected_fee_gross": summary.get("collected_fee_gross"),
"refund_count": summary.get("refund_count"),
"refund_fees": summary.get("refund_fees"),
"refund_gross": summary.get("refund_gross"),
"validation_count": summary.get("validation_count"),
"validation_fees": summary.get("validation_fees"),
"net": summary.get("net") / decimal.Decimal("100")
})
for field in defaults:
if field.endswith("fees") or field.endswith("gross"):
defaults[field] = defaults[field] / decimal.Decimal("100")
if event.kind == "transfer.paid":
defaults.update({"event": event})
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
defaults=defaults
)
else:
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
event=event,
defaults=defaults
)
if event.stripe_connect:
obj.stripe_connect = event.stripe_connect
if created and summary:
for fee in summary.get("charge_fee_details", []):
obj.charge_fee_details.create(
amount=fee["amount"] / decimal.Decimal("100"),
application=fee.get("application", ""),
description=fee.get("description", ""),
kind=fee["type"]
)
else:
obj.status = transfer["status"]
obj.save()
if event.kind == "transfer.updated":
obj.update_status()
class TransferChargeFee(models.Model):
transfer = models.ForeignKey(Transfer, related_name="charge_fee_details")
amount = models.DecimalField(decimal_places=2, max_digits=7)
application = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
kind = models.CharField(max_length=150)
created_at = models.DateTimeField(default=timezone.now)
class Customer(StripeObject):
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
card_fingerprint = models.CharField(max_length=200, blank=True)
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
date_purged = models.DateTimeField(null=True, editable=False)
objects = CustomerManager()
def __unicode__(self):
return unicode(self.user)
@property
def stripe_customer(self):
return stripe.Customer.retrieve(self.stripe_id)
def purge(self):
try:
self.stripe_customer.delete()
except stripe.InvalidRequestError as e:
if e.message.startswith("No such customer:"):
# The exception was thrown because the customer was already
# deleted on the stripe side, ignore the exception
pass
else:
# The exception was raised for another reason, re-raise it
raise
self.user = None
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
self.date_purged = timezone.now()
self.save()
def delete(self, using=None):
# Only way to delete a customer is to use SQL
self.purge()
def can_charge(self):
return self.card_fingerprint and \
self.card_last_4 and \
self.card_kind and \
self.date_purged is None
def has_active_subscription(self):
try:
return self.current_subscription.is_valid()
except CurrentSubscription.DoesNotExist:
return False
def cancel(self, at_period_end=True):
try:
current = self.current_subscription
except CurrentSubscription.DoesNotExist:
return
sub = self.stripe_customer.cancel_subscription(
at_period_end=at_period_end
)
current.status = sub.status
current.cancel_at_period_end = sub.cancel_at_period_end
current.current_period_end = convert_tstamp(sub, "current_period_end")
current.save()
cancelled.send(sender=self, stripe_response=sub)
@classmethod
def create(cls, user, card=None, plan=None, charge_immediately=True):
if card and plan:
plan = PAYMENTS_PLANS[plan]["stripe_plan_id"]
elif DEFAULT_PLAN:
plan = PAYMENTS_PLANS[DEFAULT_PLAN]["stripe_plan_id"]
else:
plan = None
trial_end = None
if TRIAL_PERIOD_FOR_USER_CALLBACK and plan:
trial_days = TRIAL_PERIOD_FOR_USER_CALLBACK(user)
trial_end = datetime.datetime.utcnow() + datetime.timedelta(
days=trial_days
)
stripe_customer = stripe.Customer.create(
email=user.email,
card=card,
plan=plan or DEFAULT_PLAN,
trial_end=trial_end
)
if stripe_customer.active_card:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
card_fingerprint=stripe_customer.active_card.fingerprint,
card_last_4=stripe_customer.active_card.last4,
card_kind=stripe_customer.active_card.type
)
else:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
)
if plan:
if stripe_customer.subscription:
cus.sync_current_subscription(cu=stripe_customer)
if charge_immediately:
cus.send_invoice()
return cus
def update_card(self, token):
cu = self.stripe_customer
cu.card = token
cu.save()
self.save_card(cu)
def save_card(self, cu=None):
cu = cu or self.stripe_customer
active_card = cu.active_card
self.card_fingerprint = active_card.fingerprint
self.card_last_4 = active_card.last4
self.card_kind = active_card.type
self.save()
card_changed.send(sender=self, stripe_response=cu)
def retry_unpaid_invoices(self):
self.sync_invoices()
for inv in self.invoices.filter(paid=False, closed=False):
try:
inv.retry() # Always retry unpaid invoices
except stripe.InvalidRequestError, error:
if error.message != "Invoice is already paid":
raise error
def send_invoice(self):
try:
invoice = stripe.Invoice.create(customer=self.stripe_id)
if invoice.amount_due > 0:
invoice.pay()
return True
except stripe.InvalidRequestError:
return False # There was nothing to invoice
def sync(self, cu=None):
cu = cu or self.stripe_customer
updated = False
if hasattr(cu, "active_card") and cu.active_card:
# Test to make sure the card has changed, otherwise do not update it
# (i.e. refrain from sending any signals)
if (self.card_last_4 != cu.active_card.last4 or
self.card_fingerprint != cu.active_card.fingerprint or
self.card_kind != cu.active_card.type):
updated = True
self.card_last_4 = cu.active_card.last4
self.card_fingerprint = cu.active_card.fingerprint
self.card_kind = cu.active_card.type
else:
updated = True
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
if updated:
self.save()
card_changed.send(sender=self, stripe_response=cu)
def sync_invoices(self, cu=None):
cu = cu or self.stripe_customer
for invoice in cu.invoices().data:
Invoice.sync_from_stripe_data(invoice, send_receipt=False)
def sync_charges(self, cu=None):
cu = cu or self.stripe_customer
for charge in cu.charges().data:
self.record_charge(charge.id)
def sync_current_subscription(self, cu=None):
cu = cu or self.stripe_customer
sub = getattr(cu, "subscription", None)
if sub is None:
try:
self.current_subscription.delete()
except CurrentSubscription.DoesNotExist:
pass
else:
try:
sub_obj = self.current_subscription
sub_obj.plan = plan_from_stripe_id(sub.plan.id)
sub_obj.current_period_start = convert_tstamp(
sub.current_period_start
)
sub_obj.current_period_end = convert_tstamp(
sub.current_period_end
)
sub_obj.amount = (sub.plan.amount / decimal.Decimal("100"))
sub_obj.status = sub.status
sub_obj.cancel_at_period_end = sub.cancel_at_period_end
sub_obj.start = convert_tstamp(sub.start)
sub_obj.quantity = sub.quantity
sub_obj.save()
except CurrentSubscription.DoesNotExist:
sub_obj = CurrentSubscription.objects.create(
customer=self,
plan=plan_from_stripe_id(sub.plan.id),
current_period_start=convert_tstamp(
sub.current_period_start
),
current_period_end=convert_tstamp(
sub.current_period_end
),
amount=(sub.plan.amount / decimal.Decimal("100")),
status=sub.status,
cancel_at_period_end=sub.cancel_at_period_end,
start=convert_tstamp(sub.start),
quantity=sub.quantity
)
if sub.trial_start and sub.trial_end:
sub_obj.trial_start = convert_tstamp(sub.trial_start)
sub_obj.trial_end = convert_tstamp(sub.trial_end)
sub_obj.save()
return sub_obj
def update_plan_quantity(self, quantity, charge_immediately=False):
self.subscribe(
plan=plan_from_stripe_id(
self.stripe_customer.subscription.plan.id
),
quantity=quantity,
charge_immediately=charge_immediately
)
def subscribe(self, plan, quantity=None, trial_days=None,
charge_immediately=True, token=None, coupon=None):
if quantity is None:
if PLAN_QUANTITY_CALLBACK is not None:
quantity = PLAN_QUANTITY_CALLBACK(self)
else:
quantity = 1
cu = self.stripe_customer
subscription_params = {}
if trial_days:
subscription_params["trial_end"] = \
datetime.datetime.utcnow() + datetime.timedelta(days=trial_days)
if token:
subscription_params["card"] = token
subscription_params["plan"] = PAYMENTS_PLANS[plan]["stripe_plan_id"]
subscription_params["quantity"] = quantity
subscription_params["coupon"] = coupon
resp = cu.update_subscription(**subscription_params)
if token:
# Refetch the stripe customer so we have the updated card info
cu = self.stripe_customer
self.save_card(cu)
self.sync_current_subscription(cu)
if charge_immediately:
self.send_invoice()
subscription_made.send(sender=self, plan=plan, stripe_response=resp)
return resp
def charge(self, amount, currency="usd", description=None,
send_receipt=True, application_fee=None,
stripe_connect_user=None):
"""
This method expects `amount` and 'application_fee' to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal) or (not application_fee is None and not isinstance(application_fee, decimal.Decimal)):
raise ValueError(
"You must supply a decimal value representing dollars for amount and for application_fee (if supplied)."
)
charge_args = {
'amount': int(amount * 100),
'currency': currency,
'description': description,
}
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
charge_args['card'] = stripe.Token.create(customer=self.stripe_id, api_key=stripe_connect_user.stripe_access_token)
charge_args['api_key'] = stripe_connect_user.stripe_access_token
else:
charge_args['customer'] = self.stripe_id
if application_fee:
charge_args['application_fee'] = int(application_fee * 100)
resp = stripe.Charge.create(**charge_args)
obj = self.record_charge(resp["id"], stripe_connect_user)
if send_receipt:
obj.send_receipt()
return obj
def record_charge(self, charge_id, stripe_connect_user=None):
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
data = stripe.Charge.retrieve(charge_id, api_key=stripe_connect_user.stripe_access_token)
else:
data = stripe.Charge.retrieve(charge_id)
return Charge.sync_from_stripe_data(data)
class ConnectUser(models.Model):
"""
A user in your system who you may be routing payments to through "Stripe Connect"
"""
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
# when a webhook is received for an action related to a ConnectUser, a 'user_id' will be provided
# This is the same as an account id
account_id = models.CharField(max_length=100)
stripe_access_token = models.CharField(max_length=100)
stripe_publishable_key = models.CharField(max_length=100)
@staticmethod
def account_id_lookup(stripe_access_token):
data = stripe.Account.retrieve(api_key=stripe_access_token)
return data.get('id', None)
def __unicode__(self):
return unicode(self.user)
class CurrentSubscription(models.Model):
customer = models.OneToOneField(
Customer,
related_name="current_subscription",
null=True
)
plan = models.CharField(max_length=100)
quantity = models.IntegerField()
start = models.DateTimeField()
# trialing, active, past_due, canceled, or unpaid
status = models.CharField(max_length=25)
cancel_at_period_end = models.BooleanField(default=False)
canceled_at = models.DateTimeField(blank=True, null=True)
current_period_end = models.DateTimeField(blank=True, null=True)
current_period_start = models.DateTimeField(blank=True, null=True)
ended_at = models.DateTimeField(blank=True, null=True)
trial_end = models.DateTimeField(blank=True, null=True)
trial_start = models.DateTimeField(blank=True, null=True)
amount = models.DecimalField(decimal_places=2, max_digits=7)
created_at = models.DateTimeField(default=timezone.now)
@property
def total_amount(self):
return self.amount * self.quantity
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
def status_display(self):
return self.status.replace("_", " ").title()
def is_period_current(self):
return self.current_period_end > timezone.now()
def is_status_current(self):
return self.status in ["trialing", "active"]
def is_valid(self):
if not self.is_status_current():
return False
if self.cancel_at_period_end and not self.is_period_current():
return False
return True
def delete(self, using=None): # pylint: disable=E1002
"""
Set values to None while deleting the object so that any lingering
references will not show previous values (such as when an Event
signal is triggered after a subscription has been deleted)
"""
super(CurrentSubscription, self).delete(using=using)
self.plan = None
self.status = None
self.quantity = 0
self.amount = 0
class Invoice(models.Model):
stripe_id = models.CharField(max_length=255)
customer = models.ForeignKey(Customer, related_name="invoices")
attempted = models.NullBooleanField()
attempts = models.PositiveIntegerField(null=True)
closed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
period_end = models.DateTimeField()
period_start = models.DateTimeField()
subtotal = models.DecimalField(decimal_places=2, max_digits=7)
total = models.DecimalField(decimal_places=2, max_digits=7)
date = models.DateTimeField()
charge = models.CharField(max_length=50, blank=True)
created_at = models.DateTimeField(default=timezone.now)
stripe_connect = models.ForeignKey(ConnectUser, null=True)
class Meta: # pylint: disable=E0012,C1001
ordering = ["-date"]
def retry(self):
if not self.paid and not self.closed:
inv = stripe.Invoice.retrieve(self.stripe_id)
inv.pay()
return True
return False
def status(self):
if self.paid:
return "Paid"
return "Open"
@classmethod
def sync_from_stripe_data(cls, stripe_invoice, send_receipt=True, stripe_connect=None):
c = Customer.objects.get(stripe_id=stripe_invoice["customer"])
period_end = convert_tstamp(stripe_invoice, "period_end")
period_start = convert_tstamp(stripe_invoice, "period_start")
date = convert_tstamp(stripe_invoice, "date")
invoice, created = cls.objects.get_or_create(
stripe_id=stripe_invoice["id"],
defaults=dict(
customer=c,
attempted=stripe_invoice["attempted"],
attempts=stripe_invoice["attempt_count"],
closed=stripe_invoice["closed"],
paid=stripe_invoice["paid"],
period_end=period_end,
period_start=period_start,
subtotal=stripe_invoice["subtotal"] / decimal.Decimal("100"),
total=stripe_invoice["total"] / decimal.Decimal("100"),
date=date,
charge=stripe_invoice.get("charge") or "",
stripe_connect=stripe_connect
)
)
if not created:
# pylint: disable=C0301
invoice.attempted = stripe_invoice["attempted"]
invoice.attempts = stripe_invoice["attempt_count"]
invoice.closed = stripe_invoice["closed"]
invoice.paid = stripe_invoice["paid"]
invoice.period_end = period_end
invoice.period_start = period_start
invoice.subtotal = stripe_invoice["subtotal"] / decimal.Decimal("100")
invoice.total = stripe_invoice["total"] / decimal.Decimal("100")
invoice.date = date
invoice.charge = stripe_invoice.get("charge") or ""
invoice.stripe_connect = stripe_connect
invoice.save()
for item in stripe_invoice["lines"].get("data", []):
period_end = convert_tstamp(item["period"], "end")
period_start = convert_tstamp(item["period"], "start")
if item.get("plan"):
plan = plan_from_stripe_id(item["plan"]["id"])
else:
plan = ""
inv_item, inv_item_created = invoice.items.get_or_create(
stripe_id=item["id"],
defaults=dict(
amount=(item["amount"] / decimal.Decimal("100")),
currency=item["currency"],
proration=item["proration"],
description=item.get("description") or "",
line_type=item["type"],
plan=plan,
period_start=period_start,
period_end=period_end,
quantity=item.get("quantity")
)
)
if not inv_item_created:
inv_item.amount = (item["amount"] / decimal.Decimal("100"))
inv_item.currency = item["currency"]
inv_item.proration = item["proration"]
inv_item.description = item.get("description") or ""
inv_item.line_type = item["type"]
inv_item.plan = plan
inv_item.period_start = period_start
inv_item.period_end = period_end
inv_item.quantity = item.get("quantity")
inv_item.save()
if stripe_invoice.get("charge"):
obj = c.record_charge(stripe_invoice["charge"])
obj.invoice = invoice
obj.save()
if send_receipt:
obj.send_receipt()
return invoice
@classmethod
def handle_event(cls, event, send_receipt=SEND_EMAIL_RECEIPTS):
valid_events = ["invoice.payment_failed", "invoice.payment_succeeded"]
if event.kind in valid_events:
invoice_data = event.message["data"]["object"]
stripe_invoice = stripe.Invoice.retrieve(invoice_data["id"])
cls.sync_from_stripe_data(stripe_invoice, send_receipt=send_receipt, stripe_connect=event.stripe_connect)
class InvoiceItem(models.Model):
stripe_id = models.CharField(max_length=255)
created_at = models.DateTimeField(default=timezone.now)
invoice = models.ForeignKey(Invoice, related_name="items")
amount = models.DecimalField(decimal_places=2, max_digits=7)
currency = models.CharField(max_length=10)
period_start = models.DateTimeField()
period_end = models.DateTimeField()
proration = models.BooleanField(default=False)
line_type = models.CharField(max_length=50)
description = models.CharField(max_length=200, blank=True)
plan = models.CharField(max_length=100, blank=True)
quantity = models.IntegerField(null=True)
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
class Charge(StripeObject):
customer = models.ForeignKey(Customer, related_name="charges", null=True)
invoice = models.ForeignKey(Invoice, null=True, related_name="charges")
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=7, null=True)
amount_refunded = models.DecimalField(
decimal_places=2,
max_digits=7,
null=True
)
description = models.TextField(blank=True)
paid = models.NullBooleanField(null=True)
disputed = models.NullBooleanField(null=True)
refunded = models.NullBooleanField(null=True)
fee = models.DecimalField(decimal_places=2, max_digits=7, null=True)
receipt_sent = models.BooleanField(default=False)
charge_created = models.DateTimeField(null=True, blank=True)
stripe_connect = models.ForeignKey(ConnectUser, null=True)
objects = ChargeManager()
def calculate_refund_amount(self, amount=None):
eligible_to_refund = self.amount - (self.amount_refunded or 0)
if amount:
amount_to_refund = min(eligible_to_refund, amount)
else:
amount_to_refund = eligible_to_refund
return int(amount_to_refund * 100)
def refund(self, amount=None):
# pylint: disable=E1121
charge_obj = stripe.Charge.retrieve(
self.stripe_id
).refund(
amount=self.calculate_refund_amount(amount=amount)
)
Charge.sync_from_stripe_data(charge_obj)
@classmethod
def sync_from_stripe_data(cls, data):
obj, _ = Charge.objects.get_or_create(
stripe_id=data["id"]
)
customer_id = data.get("customer", None);
customer = Customer.objects.get(stripe_id=customer_id) if customer_id else None
obj.customer = customer
invoice_id = data.get("invoice", None)
if Invoice.objects.filter(stripe_id=invoice_id).exists():
obj.invoice = obj.customer.invoices.get(stripe_id=invoice_id)
obj.card_last_4 = data["card"]["last4"]
obj.card_kind = data["card"]["type"]
obj.amount = (data["amount"] / decimal.Decimal("100"))
obj.paid = data["paid"]
obj.refunded = data["refunded"]
obj.fee = (data["fee"] / decimal.Decimal("100"))
obj.disputed = data["dispute"] is not None
obj.charge_created = convert_tstamp(data, "created")
if data.get("description"):
obj.description = data["description"]
if data.get("amount_refunded"):
# pylint: disable=C0301
obj.amount_refunded = (data["amount_refunded"] / decimal.Decimal("100"))
if data["refunded"]:
obj.amount_refunded = (data["amount"] / decimal.Decimal("100"))
user_id = data.get("user_id", None)
if user_id and ConnectUser.objects.filter(account_id=user_id).exists():
obj.stripe_connect = ConnectUser.objects.get(account_id=user_id)
obj.save()
return obj
def send_receipt(self):
if not self.receipt_sent and self.customer:
site = Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
ctx = {
"charge": self,
"site": site,
"protocol": protocol,
}
subject = render_to_string("payments/email/subject.txt", ctx)
subject = subject.strip()
message = render_to_string("payments/email/body.txt", ctx)
num_sent = EmailMessage(
subject,
message,
to=[self.customer.user.email],
from_email=INVOICE_FROM_EMAIL
).send()
self.receipt_sent = num_sent > 0
self.save()
@classmethod
def create(cls, card, amount, currency="usd", description=None, application_fee=None, stripe_connect_user=None):
"""
This method expects `amount` and 'application_fee' to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal) or (not application_fee is None and not isinstance(application_fee, decimal.Decimal)):
raise ValueError(
"You must supply a decimal value representing dollars for amount and for application_fee (if supplied)."
)
charge_args = {
'amount': int(amount * 100),
'currency': currency,
'description': description,
'card': card,
}
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
charge_args['api_key'] = stripe_connect_user.stripe_access_token
elif stripe_connect_user:
charge_args['api_key'] = stripe_connect_user
if application_fee:
charge_args['application_fee'] = int(application_fee * 100)
resp = stripe.Charge.create(**charge_args)
return Charge.sync_from_stripe_data(resp)
| import datetime
import decimal
import json
import traceback
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import models
from django.utils import timezone
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
import stripe
from jsonfield.fields import JSONField
from .managers import CustomerManager, ChargeManager, TransferManager
from .settings import (
DEFAULT_PLAN,
INVOICE_FROM_EMAIL,
PAYMENTS_PLANS,
plan_from_stripe_id,
SEND_EMAIL_RECEIPTS,
TRIAL_PERIOD_FOR_USER_CALLBACK,
PLAN_QUANTITY_CALLBACK
)
from .signals import (
cancelled,
card_changed,
subscription_made,
webhook_processing_error,
WEBHOOK_SIGNALS,
)
from .utils import convert_tstamp
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.api_version = getattr(settings, "STRIPE_API_VERSION", "2012-11-07")
class StripeObject(models.Model):
stripe_id = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta: # pylint: disable=E0012,C1001
abstract = True
class EventProcessingException(models.Model):
event = models.ForeignKey("Event", null=True)
data = models.TextField()
message = models.CharField(max_length=500)
traceback = models.TextField()
created_at = models.DateTimeField(default=timezone.now)
@classmethod
def log(cls, data, exception, event):
cls.objects.create(
event=event,
data=data or "",
message=str(exception),
traceback=traceback.format_exc()
)
def __unicode__(self):
return u"<%s, pk=%s, Event=%s>" % (self.message, self.pk, self.event)
class Event(StripeObject):
kind = models.CharField(max_length=250)
livemode = models.BooleanField()
customer = models.ForeignKey("Customer", null=True)
webhook_message = JSONField()
validated_message = JSONField(null=True)
valid = models.NullBooleanField(null=True)
processed = models.BooleanField(default=False)
stripe_connect = models.ForeignKey('ConnectUser', null=True)
@property
def message(self):
return self.validated_message
def __unicode__(self):
return "%s - %s" % (self.kind, self.stripe_id)
def link_customer(self):
cus_id = None
customer_crud_events = [
"customer.created",
"customer.updated",
"customer.deleted"
]
if self.kind in customer_crud_events:
cus_id = self.message["data"]["object"]["id"]
else:
cus_id = self.message["data"]["object"].get("customer", None)
if cus_id is not None:
try:
self.customer = Customer.objects.get(stripe_id=cus_id)
self.save()
except Customer.DoesNotExist:
pass
def link_stripe_connect(self):
connect_id = self.message["data"]["object"].get("user_id", None)
if connect_id is not None:
try:
self.stripe_connect = ConnectUser.objects.get(account_id=connect_id)
self.save()
except ConnectUser.DoesNotExist:
pass
def validate(self):
evt = stripe.Event.retrieve(self.stripe_id)
self.validated_message = json.loads(
json.dumps(
evt.to_dict(),
sort_keys=True,
cls=stripe.StripeObjectEncoder
)
)
if self.webhook_message["data"] == self.validated_message["data"]:
self.valid = True
else:
self.valid = False
self.save()
def process(self):
"""
"account.updated",
"account.application.deauthorized",
"charge.succeeded",
"charge.failed",
"charge.refunded",
"charge.dispute.created",
"charge.dispute.updated",
"chagne.dispute.closed",
"customer.created",
"customer.updated",
"customer.deleted",
"customer.subscription.created",
"customer.subscription.updated",
"customer.subscription.deleted",
"customer.subscription.trial_will_end",
"customer.discount.created",
"customer.discount.updated",
"customer.discount.deleted",
"invoice.created",
"invoice.updated",
"invoice.payment_succeeded",
"invoice.payment_failed",
"invoiceitem.created",
"invoiceitem.updated",
"invoiceitem.deleted",
"plan.created",
"plan.updated",
"plan.deleted",
"coupon.created",
"coupon.updated",
"coupon.deleted",
"transfer.created",
"transfer.updated",
"transfer.failed",
"ping"
"""
if self.valid and not self.processed:
try:
if not self.kind.startswith("plan.") and \
not self.kind.startswith("transfer."):
self.link_customer()
if not self.stripe_connect:
self.link_stripe_connect()
if self.kind.startswith("invoice."):
Invoice.handle_event(self)
elif self.kind.startswith("charge."):
if not self.customer:
self.link_customer()
self.customer.record_charge(
self.message["data"]["object"]["id"]
)
elif self.kind.startswith("transfer."):
Transfer.process_transfer(
self,
self.message["data"]["object"]
)
elif self.kind.startswith("customer.subscription."):
if not self.customer:
self.link_customer()
if self.customer:
self.customer.sync_current_subscription()
elif self.kind == "customer.deleted":
if not self.customer:
self.link_customer()
self.customer.purge()
self.send_signal()
self.processed = True
self.save()
except stripe.StripeError, e:
EventProcessingException.log(
data=e.http_body,
exception=e,
event=self
)
webhook_processing_error.send(
sender=Event,
data=e.http_body,
exception=e
)
def send_signal(self):
signal = WEBHOOK_SIGNALS.get(self.kind)
if signal:
return signal.send(sender=Event, event=self)
class Transfer(StripeObject):
# pylint: disable=C0301
event = models.ForeignKey(Event, related_name="transfers")
amount = models.DecimalField(decimal_places=2, max_digits=9)
status = models.CharField(max_length=25)
date = models.DateTimeField()
description = models.TextField(null=True, blank=True)
adjustment_count = models.IntegerField(null=True)
adjustment_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
adjustment_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
charge_count = models.IntegerField(null=True)
charge_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
charge_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
collected_fee_count = models.IntegerField(null=True)
collected_fee_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
net = models.DecimalField(decimal_places=2, max_digits=9, null=True)
refund_count = models.IntegerField(null=True)
refund_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
refund_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
validation_count = models.IntegerField(null=True)
validation_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
stripe_connect = models.ForeignKey('ConnectUser', null=True)
objects = TransferManager()
def update_status(self):
self.status = stripe.Transfer.retrieve(self.stripe_id).status
self.save()
@classmethod
def process_transfer(cls, event, transfer):
defaults = {
"amount": transfer["amount"] / decimal.Decimal("100"),
"status": transfer["status"],
"date": convert_tstamp(transfer, "date"),
"description": transfer.get("description", "")
}
summary = transfer.get("summary")
if summary:
defaults.update({
"adjustment_count": summary.get("adjustment_count"),
"adjustment_fees": summary.get("adjustment_fees"),
"adjustment_gross": summary.get("adjustment_gross"),
"charge_count": summary.get("charge_count"),
"charge_fees": summary.get("charge_fees"),
"charge_gross": summary.get("charge_gross"),
"collected_fee_count": summary.get("collected_fee_count"),
"collected_fee_gross": summary.get("collected_fee_gross"),
"refund_count": summary.get("refund_count"),
"refund_fees": summary.get("refund_fees"),
"refund_gross": summary.get("refund_gross"),
"validation_count": summary.get("validation_count"),
"validation_fees": summary.get("validation_fees"),
"net": summary.get("net") / decimal.Decimal("100")
})
for field in defaults:
if field.endswith("fees") or field.endswith("gross"):
defaults[field] = defaults[field] / decimal.Decimal("100")
if event.kind == "transfer.paid":
defaults.update({"event": event})
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
defaults=defaults
)
else:
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
event=event,
defaults=defaults
)
if event.stripe_connect:
obj.stripe_connect = event.stripe_connect
if created and summary:
for fee in summary.get("charge_fee_details", []):
obj.charge_fee_details.create(
amount=fee["amount"] / decimal.Decimal("100"),
application=fee.get("application", ""),
description=fee.get("description", ""),
kind=fee["type"]
)
else:
obj.status = transfer["status"]
obj.save()
if event.kind == "transfer.updated":
obj.update_status()
class TransferChargeFee(models.Model):
transfer = models.ForeignKey(Transfer, related_name="charge_fee_details")
amount = models.DecimalField(decimal_places=2, max_digits=7)
application = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
kind = models.CharField(max_length=150)
created_at = models.DateTimeField(default=timezone.now)
class Customer(StripeObject):
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
card_fingerprint = models.CharField(max_length=200, blank=True)
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
date_purged = models.DateTimeField(null=True, editable=False)
objects = CustomerManager()
def __unicode__(self):
return unicode(self.user)
@property
def stripe_customer(self):
return stripe.Customer.retrieve(self.stripe_id)
def purge(self):
try:
self.stripe_customer.delete()
except stripe.InvalidRequestError as e:
if e.message.startswith("No such customer:"):
# The exception was thrown because the customer was already
# deleted on the stripe side, ignore the exception
pass
else:
# The exception was raised for another reason, re-raise it
raise
self.user = None
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
self.date_purged = timezone.now()
self.save()
def delete(self, using=None):
# Only way to delete a customer is to use SQL
self.purge()
def can_charge(self):
return self.card_fingerprint and \
self.card_last_4 and \
self.card_kind and \
self.date_purged is None
def has_active_subscription(self):
try:
return self.current_subscription.is_valid()
except CurrentSubscription.DoesNotExist:
return False
def cancel(self, at_period_end=True):
try:
current = self.current_subscription
except CurrentSubscription.DoesNotExist:
return
sub = self.stripe_customer.cancel_subscription(
at_period_end=at_period_end
)
current.status = sub.status
current.cancel_at_period_end = sub.cancel_at_period_end
current.current_period_end = convert_tstamp(sub, "current_period_end")
current.save()
cancelled.send(sender=self, stripe_response=sub)
@classmethod
def create(cls, user, card=None, plan=None, charge_immediately=True):
if card and plan:
plan = PAYMENTS_PLANS[plan]["stripe_plan_id"]
elif DEFAULT_PLAN:
plan = PAYMENTS_PLANS[DEFAULT_PLAN]["stripe_plan_id"]
else:
plan = None
trial_end = None
if TRIAL_PERIOD_FOR_USER_CALLBACK and plan:
trial_days = TRIAL_PERIOD_FOR_USER_CALLBACK(user)
trial_end = datetime.datetime.utcnow() + datetime.timedelta(
days=trial_days
)
stripe_customer = stripe.Customer.create(
email=user.email,
card=card,
plan=plan or DEFAULT_PLAN,
trial_end=trial_end
)
if stripe_customer.active_card:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
card_fingerprint=stripe_customer.active_card.fingerprint,
card_last_4=stripe_customer.active_card.last4,
card_kind=stripe_customer.active_card.type
)
else:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
)
if plan:
if stripe_customer.subscription:
cus.sync_current_subscription(cu=stripe_customer)
if charge_immediately:
cus.send_invoice()
return cus
def update_card(self, token):
cu = self.stripe_customer
cu.card = token
cu.save()
self.save_card(cu)
def save_card(self, cu=None):
cu = cu or self.stripe_customer
active_card = cu.active_card
self.card_fingerprint = active_card.fingerprint
self.card_last_4 = active_card.last4
self.card_kind = active_card.type
self.save()
card_changed.send(sender=self, stripe_response=cu)
def retry_unpaid_invoices(self):
self.sync_invoices()
for inv in self.invoices.filter(paid=False, closed=False):
try:
inv.retry() # Always retry unpaid invoices
except stripe.InvalidRequestError, error:
if error.message != "Invoice is already paid":
raise error
def send_invoice(self):
try:
invoice = stripe.Invoice.create(customer=self.stripe_id)
if invoice.amount_due > 0:
invoice.pay()
return True
except stripe.InvalidRequestError:
return False # There was nothing to invoice
def sync(self, cu=None):
cu = cu or self.stripe_customer
updated = False
if hasattr(cu, "active_card") and cu.active_card:
# Test to make sure the card has changed, otherwise do not update it
# (i.e. refrain from sending any signals)
if (self.card_last_4 != cu.active_card.last4 or
self.card_fingerprint != cu.active_card.fingerprint or
self.card_kind != cu.active_card.type):
updated = True
self.card_last_4 = cu.active_card.last4
self.card_fingerprint = cu.active_card.fingerprint
self.card_kind = cu.active_card.type
else:
updated = True
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
if updated:
self.save()
card_changed.send(sender=self, stripe_response=cu)
def sync_invoices(self, cu=None):
cu = cu or self.stripe_customer
for invoice in cu.invoices().data:
Invoice.sync_from_stripe_data(invoice, send_receipt=False)
def sync_charges(self, cu=None):
cu = cu or self.stripe_customer
for charge in cu.charges().data:
self.record_charge(charge.id)
def sync_current_subscription(self, cu=None):
cu = cu or self.stripe_customer
sub = getattr(cu, "subscription", None)
if sub is None:
try:
self.current_subscription.delete()
except CurrentSubscription.DoesNotExist:
pass
else:
try:
sub_obj = self.current_subscription
sub_obj.plan = plan_from_stripe_id(sub.plan.id)
sub_obj.current_period_start = convert_tstamp(
sub.current_period_start
)
sub_obj.current_period_end = convert_tstamp(
sub.current_period_end
)
sub_obj.amount = (sub.plan.amount / decimal.Decimal("100"))
sub_obj.status = sub.status
sub_obj.cancel_at_period_end = sub.cancel_at_period_end
sub_obj.start = convert_tstamp(sub.start)
sub_obj.quantity = sub.quantity
sub_obj.save()
except CurrentSubscription.DoesNotExist:
sub_obj = CurrentSubscription.objects.create(
customer=self,
plan=plan_from_stripe_id(sub.plan.id),
current_period_start=convert_tstamp(
sub.current_period_start
),
current_period_end=convert_tstamp(
sub.current_period_end
),
amount=(sub.plan.amount / decimal.Decimal("100")),
status=sub.status,
cancel_at_period_end=sub.cancel_at_period_end,
start=convert_tstamp(sub.start),
quantity=sub.quantity
)
if sub.trial_start and sub.trial_end:
sub_obj.trial_start = convert_tstamp(sub.trial_start)
sub_obj.trial_end = convert_tstamp(sub.trial_end)
sub_obj.save()
return sub_obj
def update_plan_quantity(self, quantity, charge_immediately=False):
self.subscribe(
plan=plan_from_stripe_id(
self.stripe_customer.subscription.plan.id
),
quantity=quantity,
charge_immediately=charge_immediately
)
def subscribe(self, plan, quantity=None, trial_days=None,
charge_immediately=True, token=None, coupon=None):
if quantity is None:
if PLAN_QUANTITY_CALLBACK is not None:
quantity = PLAN_QUANTITY_CALLBACK(self)
else:
quantity = 1
cu = self.stripe_customer
subscription_params = {}
if trial_days:
subscription_params["trial_end"] = \
datetime.datetime.utcnow() + datetime.timedelta(days=trial_days)
if token:
subscription_params["card"] = token
subscription_params["plan"] = PAYMENTS_PLANS[plan]["stripe_plan_id"]
subscription_params["quantity"] = quantity
subscription_params["coupon"] = coupon
resp = cu.update_subscription(**subscription_params)
if token:
# Refetch the stripe customer so we have the updated card info
cu = self.stripe_customer
self.save_card(cu)
self.sync_current_subscription(cu)
if charge_immediately:
self.send_invoice()
subscription_made.send(sender=self, plan=plan, stripe_response=resp)
return resp
def charge(self, amount, currency="usd", description=None,
send_receipt=True, application_fee=None,
stripe_connect_user=None):
"""
This method expects `amount` and 'application_fee' to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal) or (not application_fee is None and not isinstance(application_fee, decimal.Decimal)):
raise ValueError(
"You must supply a decimal value representing dollars for amount and for application_fee (if supplied)."
)
charge_args = {
'amount': int(amount * 100),
'currency': currency,
'description': description,
}
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
charge_args['card'] = stripe.Token.create(customer=self.stripe_id, api_key=stripe_connect_user.stripe_access_token)
charge_args['api_key'] = stripe_connect_user.stripe_access_token
else:
charge_args['customer'] = self.stripe_id
if application_fee:
charge_args['application_fee'] = int(application_fee * 100)
resp = stripe.Charge.create(**charge_args)
obj = self.record_charge(resp["id"], stripe_connect_user)
if send_receipt:
obj.send_receipt()
return obj
def record_charge(self, charge_id, stripe_connect_user=None):
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
data = stripe.Charge.retrieve(charge_id, api_key=stripe_connect_user.stripe_access_token)
else:
data = stripe.Charge.retrieve(charge_id)
return Charge.sync_from_stripe_data(data)
class ConnectUser(models.Model):
"""
A user in your system who you may be routing payments to through "Stripe Connect"
"""
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
# when a webhook is received for an action related to a ConnectUser, a 'user_id' will be provided
# This is the same as an account id
account_id = models.CharField(max_length=100)
stripe_access_token = models.CharField(max_length=100)
stripe_publishable_key = models.CharField(max_length=100)
@staticmethod
def account_id_lookup(stripe_access_token):
data = stripe.Account.retrieve(api_key=stripe_access_token)
return data.get('id', None)
def __unicode__(self):
return unicode(self.user)
class CurrentSubscription(models.Model):
customer = models.OneToOneField(
Customer,
related_name="current_subscription",
null=True
)
plan = models.CharField(max_length=100)
quantity = models.IntegerField()
start = models.DateTimeField()
# trialing, active, past_due, canceled, or unpaid
status = models.CharField(max_length=25)
cancel_at_period_end = models.BooleanField(default=False)
canceled_at = models.DateTimeField(blank=True, null=True)
current_period_end = models.DateTimeField(blank=True, null=True)
current_period_start = models.DateTimeField(blank=True, null=True)
ended_at = models.DateTimeField(blank=True, null=True)
trial_end = models.DateTimeField(blank=True, null=True)
trial_start = models.DateTimeField(blank=True, null=True)
amount = models.DecimalField(decimal_places=2, max_digits=7)
created_at = models.DateTimeField(default=timezone.now)
@property
def total_amount(self):
return self.amount * self.quantity
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
def status_display(self):
return self.status.replace("_", " ").title()
def is_period_current(self):
return self.current_period_end > timezone.now()
def is_status_current(self):
return self.status in ["trialing", "active"]
def is_valid(self):
if not self.is_status_current():
return False
if self.cancel_at_period_end and not self.is_period_current():
return False
return True
def delete(self, using=None): # pylint: disable=E1002
"""
Set values to None while deleting the object so that any lingering
references will not show previous values (such as when an Event
signal is triggered after a subscription has been deleted)
"""
super(CurrentSubscription, self).delete(using=using)
self.plan = None
self.status = None
self.quantity = 0
self.amount = 0
class Invoice(models.Model):
stripe_id = models.CharField(max_length=255)
customer = models.ForeignKey(Customer, related_name="invoices")
attempted = models.NullBooleanField()
attempts = models.PositiveIntegerField(null=True)
closed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
period_end = models.DateTimeField()
period_start = models.DateTimeField()
subtotal = models.DecimalField(decimal_places=2, max_digits=7)
total = models.DecimalField(decimal_places=2, max_digits=7)
date = models.DateTimeField()
charge = models.CharField(max_length=50, blank=True)
created_at = models.DateTimeField(default=timezone.now)
stripe_connect = models.ForeignKey(ConnectUser, null=True)
class Meta: # pylint: disable=E0012,C1001
ordering = ["-date"]
def retry(self):
if not self.paid and not self.closed:
inv = stripe.Invoice.retrieve(self.stripe_id)
inv.pay()
return True
return False
def status(self):
if self.paid:
return "Paid"
return "Open"
@classmethod
def sync_from_stripe_data(cls, stripe_invoice, send_receipt=True, stripe_connect=None):
c = Customer.objects.get(stripe_id=stripe_invoice["customer"])
period_end = convert_tstamp(stripe_invoice, "period_end")
period_start = convert_tstamp(stripe_invoice, "period_start")
date = convert_tstamp(stripe_invoice, "date")
invoice, created = cls.objects.get_or_create(
stripe_id=stripe_invoice["id"],
defaults=dict(
customer=c,
attempted=stripe_invoice["attempted"],
attempts=stripe_invoice["attempt_count"],
closed=stripe_invoice["closed"],
paid=stripe_invoice["paid"],
period_end=period_end,
period_start=period_start,
subtotal=stripe_invoice["subtotal"] / decimal.Decimal("100"),
total=stripe_invoice["total"] / decimal.Decimal("100"),
date=date,
charge=stripe_invoice.get("charge") or "",
stripe_connect=stripe_connect
)
)
if not created:
# pylint: disable=C0301
invoice.attempted = stripe_invoice["attempted"]
invoice.attempts = stripe_invoice["attempt_count"]
invoice.closed = stripe_invoice["closed"]
invoice.paid = stripe_invoice["paid"]
invoice.period_end = period_end
invoice.period_start = period_start
invoice.subtotal = stripe_invoice["subtotal"] / decimal.Decimal("100")
invoice.total = stripe_invoice["total"] / decimal.Decimal("100")
invoice.date = date
invoice.charge = stripe_invoice.get("charge") or ""
invoice.stripe_connect = stripe_connect
invoice.save()
for item in stripe_invoice["lines"].get("data", []):
period_end = convert_tstamp(item["period"], "end")
period_start = convert_tstamp(item["period"], "start")
if item.get("plan"):
plan = plan_from_stripe_id(item["plan"]["id"])
else:
plan = ""
inv_item, inv_item_created = invoice.items.get_or_create(
stripe_id=item["id"],
defaults=dict(
amount=(item["amount"] / decimal.Decimal("100")),
currency=item["currency"],
proration=item["proration"],
description=item.get("description") or "",
line_type=item["type"],
plan=plan,
period_start=period_start,
period_end=period_end,
quantity=item.get("quantity")
)
)
if not inv_item_created:
inv_item.amount = (item["amount"] / decimal.Decimal("100"))
inv_item.currency = item["currency"]
inv_item.proration = item["proration"]
inv_item.description = item.get("description") or ""
inv_item.line_type = item["type"]
inv_item.plan = plan
inv_item.period_start = period_start
inv_item.period_end = period_end
inv_item.quantity = item.get("quantity")
inv_item.save()
if stripe_invoice.get("charge"):
obj = c.record_charge(stripe_invoice["charge"])
obj.invoice = invoice
obj.save()
if send_receipt:
obj.send_receipt()
return invoice
@classmethod
def handle_event(cls, event, send_receipt=SEND_EMAIL_RECEIPTS):
valid_events = ["invoice.payment_failed", "invoice.payment_succeeded"]
if event.kind in valid_events:
invoice_data = event.message["data"]["object"]
stripe_invoice = stripe.Invoice.retrieve(invoice_data["id"])
cls.sync_from_stripe_data(stripe_invoice, send_receipt=send_receipt, stripe_connect=event.stripe_connect)
class InvoiceItem(models.Model):
stripe_id = models.CharField(max_length=255)
created_at = models.DateTimeField(default=timezone.now)
invoice = models.ForeignKey(Invoice, related_name="items")
amount = models.DecimalField(decimal_places=2, max_digits=7)
currency = models.CharField(max_length=10)
period_start = models.DateTimeField()
period_end = models.DateTimeField()
proration = models.BooleanField(default=False)
line_type = models.CharField(max_length=50)
description = models.CharField(max_length=200, blank=True)
plan = models.CharField(max_length=100, blank=True)
quantity = models.IntegerField(null=True)
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
class Charge(StripeObject):
customer = models.ForeignKey(Customer, related_name="charges", null=True)
invoice = models.ForeignKey(Invoice, null=True, related_name="charges")
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=7, null=True)
amount_refunded = models.DecimalField(
decimal_places=2,
max_digits=7,
null=True
)
description = models.TextField(blank=True)
paid = models.NullBooleanField(null=True)
disputed = models.NullBooleanField(null=True)
refunded = models.NullBooleanField(null=True)
fee = models.DecimalField(decimal_places=2, max_digits=7, null=True)
receipt_sent = models.BooleanField(default=False)
charge_created = models.DateTimeField(null=True, blank=True)
stripe_connect = models.ForeignKey(ConnectUser, null=True)
objects = ChargeManager()
def calculate_refund_amount(self, amount=None):
eligible_to_refund = self.amount - (self.amount_refunded or 0)
if amount:
amount_to_refund = min(eligible_to_refund, amount)
else:
amount_to_refund = eligible_to_refund
return int(amount_to_refund * 100)
def refund(self, amount=None):
# pylint: disable=E1121
charge_obj = stripe.Charge.retrieve(
self.stripe_id
).refund(
amount=self.calculate_refund_amount(amount=amount)
)
Charge.sync_from_stripe_data(charge_obj)
@classmethod
def sync_from_stripe_data(cls, data):
obj, _ = Charge.objects.get_or_create(
stripe_id=data["id"]
)
customer_id = data.get("customer", None);
customer = Customer.objects.get(stripe_id=customer_id) if customer_id else None
obj.customer = customer
invoice_id = data.get("invoice", None)
if Invoice.objects.filter(stripe_id=invoice_id).exists():
obj.invoice = obj.customer.invoices.get(stripe_id=invoice_id)
obj.card_last_4 = data["card"]["last4"]
obj.card_kind = data["card"]["type"]
obj.amount = (data["amount"] / decimal.Decimal("100"))
obj.paid = data["paid"]
obj.refunded = data["refunded"]
obj.fee = (data["fee"] / decimal.Decimal("100"))
obj.disputed = data["dispute"] is not None
obj.charge_created = convert_tstamp(data, "created")
if data.get("description"):
obj.description = data["description"]
if data.get("amount_refunded"):
# pylint: disable=C0301
obj.amount_refunded = (data["amount_refunded"] / decimal.Decimal("100"))
if data["refunded"]:
obj.amount_refunded = (data["amount"] / decimal.Decimal("100"))
user_id = data.get("user_id", None)
if user_id and ConnectUser.objects.filter(account_id=user_id).exists():
obj.stripe_connect = ConnectUser.objects.get(account_id=user_id)
obj.save()
return obj
def send_receipt(self):
if not self.receipt_sent and self.customer:
site = Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
ctx = {
"charge": self,
"site": site,
"protocol": protocol,
}
subject = render_to_string("payments/email/subject.txt", ctx)
subject = subject.strip()
message = render_to_string("payments/email/body.txt", ctx)
num_sent = EmailMessage(
subject,
message,
to=[self.customer.user.email],
from_email=INVOICE_FROM_EMAIL
).send()
self.receipt_sent = num_sent > 0
self.save()
@classmethod
def create(cls, card, amount, currency="usd", description=None, application_fee=None, stripe_connect_user=None):
"""
This method expects `amount` and 'application_fee' to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal) or (not application_fee is None and not isinstance(application_fee, decimal.Decimal)):
raise ValueError(
"You must supply a decimal value representing dollars for amount and for application_fee (if supplied)."
)
charge_args = {
'amount': int(amount * 100),
'currency': currency,
'description': description,
'card': card,
}
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
charge_args['api_key'] = stripe_connect_user.stripe_access_token
elif stripe_connect_user:
charge_args['api_key'] = stripe_connect_user
if application_fee:
charge_args['application_fee'] = int(application_fee * 100)
resp = stripe.Charge.create(**charge_args)
return Charge.sync_from_stripe_data(resp) | en | 0.833848 | # pylint: disable=E0012,C1001 "account.updated", "account.application.deauthorized", "charge.succeeded", "charge.failed", "charge.refunded", "charge.dispute.created", "charge.dispute.updated", "chagne.dispute.closed", "customer.created", "customer.updated", "customer.deleted", "customer.subscription.created", "customer.subscription.updated", "customer.subscription.deleted", "customer.subscription.trial_will_end", "customer.discount.created", "customer.discount.updated", "customer.discount.deleted", "invoice.created", "invoice.updated", "invoice.payment_succeeded", "invoice.payment_failed", "invoiceitem.created", "invoiceitem.updated", "invoiceitem.deleted", "plan.created", "plan.updated", "plan.deleted", "coupon.created", "coupon.updated", "coupon.deleted", "transfer.created", "transfer.updated", "transfer.failed", "ping" # pylint: disable=C0301 # The exception was thrown because the customer was already # deleted on the stripe side, ignore the exception # The exception was raised for another reason, re-raise it # Only way to delete a customer is to use SQL # Always retry unpaid invoices # There was nothing to invoice # Test to make sure the card has changed, otherwise do not update it # (i.e. refrain from sending any signals) # Refetch the stripe customer so we have the updated card info This method expects `amount` and 'application_fee' to be a Decimal type representing a dollar amount. It will be converted to cents so any decimals beyond two will be ignored. A user in your system who you may be routing payments to through "Stripe Connect" # when a webhook is received for an action related to a ConnectUser, a 'user_id' will be provided # This is the same as an account id # trialing, active, past_due, canceled, or unpaid # pylint: disable=E1002 Set values to None while deleting the object so that any lingering references will not show previous values (such as when an Event signal is triggered after a subscription has been deleted) # pylint: disable=E0012,C1001 # pylint: disable=C0301 # pylint: disable=E1121 # pylint: disable=C0301 This method expects `amount` and 'application_fee' to be a Decimal type representing a dollar amount. It will be converted to cents so any decimals beyond two will be ignored. | 1.890476 | 2 |
mars/tensor/base/flip.py | tomzhang/mars-1 | 2 | 9007 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..datasource import tensor as astensor
def flip(m, axis):
"""
Reverse the order of elements in a tensor along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input tensor.
axis : integer
Axis in tensor, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip a tensor vertically (axis=0).
fliplr : Flip a tensor horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> import mars.tensor as mt
>>> A = mt.arange(8).reshape((2,2,2))
>>> A.execute()
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> mt.flip(A, 0).execute()
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> mt.flip(A, 1).execute()
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = mt.random.randn(3,4,5)
>>> mt.all(mt.flip(A,2) == A[:,:,::-1,...]).execute()
True
"""
m = astensor(m)
sl = [slice(None)] * m.ndim
try:
sl[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input tensor"
% (axis, m.ndim))
return m[tuple(sl)]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..datasource import tensor as astensor
def flip(m, axis):
"""
Reverse the order of elements in a tensor along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input tensor.
axis : integer
Axis in tensor, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip a tensor vertically (axis=0).
fliplr : Flip a tensor horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> import mars.tensor as mt
>>> A = mt.arange(8).reshape((2,2,2))
>>> A.execute()
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> mt.flip(A, 0).execute()
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> mt.flip(A, 1).execute()
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = mt.random.randn(3,4,5)
>>> mt.all(mt.flip(A,2) == A[:,:,::-1,...]).execute()
True
"""
m = astensor(m)
sl = [slice(None)] * m.ndim
try:
sl[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input tensor"
% (axis, m.ndim))
return m[tuple(sl)]
| en | 0.725855 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Reverse the order of elements in a tensor along the given axis. The shape of the array is preserved, but the elements are reordered. Parameters ---------- m : array_like Input tensor. axis : integer Axis in tensor, which entries are reversed. Returns ------- out : array_like A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. See Also -------- flipud : Flip a tensor vertically (axis=0). fliplr : Flip a tensor horizontally (axis=1). Notes ----- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. Examples -------- >>> import mars.tensor as mt >>> A = mt.arange(8).reshape((2,2,2)) >>> A.execute() array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> mt.flip(A, 0).execute() array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) >>> mt.flip(A, 1).execute() array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) >>> A = mt.random.randn(3,4,5) >>> mt.all(mt.flip(A,2) == A[:,:,::-1,...]).execute() True | 3.617157 | 4 |
tests/test_ops/test_upfirdn2d.py | imabackstabber/mmcv | 0 | 9008 | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck, gradgradcheck
_USING_PARROTS = False
class TestUpFirDn2d:
"""Unit test for UpFirDn2d.
Here, we just test the basic case of upsample version. More gerneal tests
will be included in other unit test for UpFirDnUpsample and
UpFirDnDownSample modules.
"""
@classmethod
def setup_class(cls):
kernel_1d = torch.tensor([1., 3., 3., 1.])
cls.kernel = kernel_1d[:, None] * kernel_1d[None, :]
cls.kernel = cls.kernel / cls.kernel.sum()
cls.factor = 2
pad = cls.kernel.shape[0] - cls.factor
cls.pad = ((pad + 1) // 2 + cls.factor - 1, pad // 2)
cls.input_tensor = torch.randn((2, 3, 4, 4), requires_grad=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_upfirdn2d(self):
from mmcv.ops import upfirdn2d
if _USING_PARROTS:
gradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
delta=1e-4,
pt_atol=1e-3)
else:
gradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
eps=1e-4,
atol=1e-3)
gradgradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
eps=1e-4,
atol=1e-3)
| # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck, gradgradcheck
_USING_PARROTS = False
class TestUpFirDn2d:
"""Unit test for UpFirDn2d.
Here, we just test the basic case of upsample version. More gerneal tests
will be included in other unit test for UpFirDnUpsample and
UpFirDnDownSample modules.
"""
@classmethod
def setup_class(cls):
kernel_1d = torch.tensor([1., 3., 3., 1.])
cls.kernel = kernel_1d[:, None] * kernel_1d[None, :]
cls.kernel = cls.kernel / cls.kernel.sum()
cls.factor = 2
pad = cls.kernel.shape[0] - cls.factor
cls.pad = ((pad + 1) // 2 + cls.factor - 1, pad // 2)
cls.input_tensor = torch.randn((2, 3, 4, 4), requires_grad=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_upfirdn2d(self):
from mmcv.ops import upfirdn2d
if _USING_PARROTS:
gradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
delta=1e-4,
pt_atol=1e-3)
else:
gradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
eps=1e-4,
atol=1e-3)
gradgradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
eps=1e-4,
atol=1e-3)
| en | 0.806097 | # Copyright (c) OpenMMLab. All rights reserved. Unit test for UpFirDn2d. Here, we just test the basic case of upsample version. More gerneal tests will be included in other unit test for UpFirDnUpsample and UpFirDnDownSample modules. | 2.313103 | 2 |
dataset_creation/description_task2.py | rmorain/kirby | 1 | 9009 | import pandas as pd
from tqdm import tqdm
data_list = []
def get_questions(row):
global data_list
random_samples = df.sample(n=num_choices - 1)
distractors = random_samples["description"].tolist()
data = {
"question": "What is " + row["label"] + "?",
"correct": row["description"],
"distractors": distractors,
"knowledge": "{" + row["label"] + " : " + row["description"] + "}",
}
data_list.append(data)
debug = False
num_choices = 4
tqdm.pandas(desc="Progress")
df = pd.read_pickle("data/augmented_datasets/pickle/label_description.pkl")
if debug:
df = df.iloc[:10]
df = df.progress_apply(get_questions, axis=1)
new_df = pd.DataFrame(data_list)
if not debug:
new_df.to_pickle("data/augmented_datasets/pickle/description_qa_knowledge.pkl")
else:
__import__("pudb").set_trace()
| import pandas as pd
from tqdm import tqdm
data_list = []
def get_questions(row):
global data_list
random_samples = df.sample(n=num_choices - 1)
distractors = random_samples["description"].tolist()
data = {
"question": "What is " + row["label"] + "?",
"correct": row["description"],
"distractors": distractors,
"knowledge": "{" + row["label"] + " : " + row["description"] + "}",
}
data_list.append(data)
debug = False
num_choices = 4
tqdm.pandas(desc="Progress")
df = pd.read_pickle("data/augmented_datasets/pickle/label_description.pkl")
if debug:
df = df.iloc[:10]
df = df.progress_apply(get_questions, axis=1)
new_df = pd.DataFrame(data_list)
if not debug:
new_df.to_pickle("data/augmented_datasets/pickle/description_qa_knowledge.pkl")
else:
__import__("pudb").set_trace()
| none | 1 | 2.653242 | 3 |
|
scarab/commands/attach.py | gonzoua/scarab | 5 | 9010 | <filename>scarab/commands/attach.py<gh_stars>1-10
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
'attach' command implementation'''
"""
from base64 import b64encode
import argparse
import magic
from ..bugzilla import BugzillaError
from ..context import bugzilla_instance
from .. import ui
from .base import Base
class Command(Base):
"""Attach file to the existing PR"""
def register(self, subparsers):
"""Register 'attach' parser"""
parser = subparsers.add_parser('attach')
parser.set_defaults(func=self.run)
parser.add_argument('attachment', type=str, help='path to the attachment')
parser.add_argument('pr', type=int, help='PR number')
parser.add_argument('-b', '--batch', action='store_true', \
help='batch mode, only print newly created attachment\'s id')
parser.add_argument('-s', '--summary', dest='summary', help='summary for the attachment')
comment_group = parser.add_mutually_exclusive_group()
comment_group.add_argument('-c', '--comment', dest='comment', help='comment text')
comment_group.add_argument('-F', '--comment-file', dest='comment_file', \
type=argparse.FileType('r'), help='file with comment text')
parser.add_argument('-t', '--content-type', dest='content_type', help='file content type')
def run(self, args):
"""Run 'attach' command"""
bugzilla = bugzilla_instance()
content_type = args.content_type
# Read data and encode it to base64
try:
with open(args.attachment, 'rb') as attach_file:
data = attach_file.read()
except IOError as ex:
ui.fatal('error reading file: {}'.format(str(ex)))
comment = args.comment
if comment is None:
if args.comment_file:
comment = args.comment_file.read()
if comment is None:
if args.batch:
comment = ''
else:
comment = ui.edit_message()
# Try and guess file content type
if content_type is None:
mime = magic.Magic(mime=True)
content_type = mime.from_file(args.attachment)
try:
attachment = bugzilla.add_attachment(args.pr, args.attachment, data, \
summary=args.summary, comment=comment, content_type=content_type)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
if args.batch:
ui.output('{}'.format(attachment))
else:
ui.output('New attachment {} has been added to bug {}'.format(attachment, args.pr))
ui.output('Attachment URL: {}'.format(bugzilla.attachment_url(attachment)))
ui.output('Bug URL: {}'.format(bugzilla.bug_url(args.pr)))
| <filename>scarab/commands/attach.py<gh_stars>1-10
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
'attach' command implementation'''
"""
from base64 import b64encode
import argparse
import magic
from ..bugzilla import BugzillaError
from ..context import bugzilla_instance
from .. import ui
from .base import Base
class Command(Base):
"""Attach file to the existing PR"""
def register(self, subparsers):
"""Register 'attach' parser"""
parser = subparsers.add_parser('attach')
parser.set_defaults(func=self.run)
parser.add_argument('attachment', type=str, help='path to the attachment')
parser.add_argument('pr', type=int, help='PR number')
parser.add_argument('-b', '--batch', action='store_true', \
help='batch mode, only print newly created attachment\'s id')
parser.add_argument('-s', '--summary', dest='summary', help='summary for the attachment')
comment_group = parser.add_mutually_exclusive_group()
comment_group.add_argument('-c', '--comment', dest='comment', help='comment text')
comment_group.add_argument('-F', '--comment-file', dest='comment_file', \
type=argparse.FileType('r'), help='file with comment text')
parser.add_argument('-t', '--content-type', dest='content_type', help='file content type')
def run(self, args):
"""Run 'attach' command"""
bugzilla = bugzilla_instance()
content_type = args.content_type
# Read data and encode it to base64
try:
with open(args.attachment, 'rb') as attach_file:
data = attach_file.read()
except IOError as ex:
ui.fatal('error reading file: {}'.format(str(ex)))
comment = args.comment
if comment is None:
if args.comment_file:
comment = args.comment_file.read()
if comment is None:
if args.batch:
comment = ''
else:
comment = ui.edit_message()
# Try and guess file content type
if content_type is None:
mime = magic.Magic(mime=True)
content_type = mime.from_file(args.attachment)
try:
attachment = bugzilla.add_attachment(args.pr, args.attachment, data, \
summary=args.summary, comment=comment, content_type=content_type)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
if args.batch:
ui.output('{}'.format(attachment))
else:
ui.output('New attachment {} has been added to bug {}'.format(attachment, args.pr))
ui.output('Attachment URL: {}'.format(bugzilla.attachment_url(attachment)))
ui.output('Bug URL: {}'.format(bugzilla.bug_url(args.pr)))
| en | 0.520267 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 'attach' command implementation''' Attach file to the existing PR Register 'attach' parser Run 'attach' command # Read data and encode it to base64 # Try and guess file content type | 2.590868 | 3 |
test/test_airfoil.py | chabotsi/pygmsh | 0 | 9011 | <reponame>chabotsi/pygmsh
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import numpy
import pygmsh
from helpers import compute_volume
def test():
# Airfoil coordinates
airfoil_coordinates = numpy.array([
[1.000000, 0.000000, 0.0],
[0.999023, 0.000209, 0.0],
[0.996095, 0.000832, 0.0],
[0.991228, 0.001863, 0.0],
[0.984438, 0.003289, 0.0],
[0.975752, 0.005092, 0.0],
[0.965201, 0.007252, 0.0],
[0.952825, 0.009744, 0.0],
[0.938669, 0.012538, 0.0],
[0.922788, 0.015605, 0.0],
[0.905240, 0.018910, 0.0],
[0.886092, 0.022419, 0.0],
[0.865417, 0.026096, 0.0],
[0.843294, 0.029903, 0.0],
[0.819807, 0.033804, 0.0],
[0.795047, 0.037760, 0.0],
[0.769109, 0.041734, 0.0],
[0.742094, 0.045689, 0.0],
[0.714107, 0.049588, 0.0],
[0.685258, 0.053394, 0.0],
[0.655659, 0.057071, 0.0],
[0.625426, 0.060584, 0.0],
[0.594680, 0.063897, 0.0],
[0.563542, 0.066977, 0.0],
[0.532136, 0.069789, 0.0],
[0.500587, 0.072303, 0.0],
[0.469022, 0.074486, 0.0],
[0.437567, 0.076312, 0.0],
[0.406350, 0.077752, 0.0],
[0.375297, 0.078743, 0.0],
[0.344680, 0.079180, 0.0],
[0.314678, 0.079051, 0.0],
[0.285418, 0.078355, 0.0],
[0.257025, 0.077096, 0.0],
[0.229618, 0.075287, 0.0],
[0.203313, 0.072945, 0.0],
[0.178222, 0.070096, 0.0],
[0.154449, 0.066770, 0.0],
[0.132094, 0.063005, 0.0],
[0.111248, 0.058842, 0.0],
[0.091996, 0.054325, 0.0],
[0.074415, 0.049504, 0.0],
[0.058573, 0.044427, 0.0],
[0.044532, 0.039144, 0.0],
[0.032343, 0.033704, 0.0],
[0.022051, 0.028152, 0.0],
[0.013692, 0.022531, 0.0],
[0.007292, 0.016878, 0.0],
[0.002870, 0.011224, 0.0],
[0.000439, 0.005592, 0.0],
[0.000000, 0.000000, 0.0],
[0.001535, -0.005395, 0.0],
[0.005015, -0.010439, 0.0],
[0.010421, -0.015126, 0.0],
[0.017725, -0.019451, 0.0],
[0.026892, -0.023408, 0.0],
[0.037880, -0.026990, 0.0],
[0.050641, -0.030193, 0.0],
[0.065120, -0.033014, 0.0],
[0.081257, -0.035451, 0.0],
[0.098987, -0.037507, 0.0],
[0.118239, -0.039185, 0.0],
[0.138937, -0.040493, 0.0],
[0.161004, -0.041444, 0.0],
[0.184354, -0.042054, 0.0],
[0.208902, -0.042343, 0.0],
[0.234555, -0.042335, 0.0],
[0.261221, -0.042058, 0.0],
[0.288802, -0.041541, 0.0],
[0.317197, -0.040817, 0.0],
[0.346303, -0.039923, 0.0],
[0.376013, -0.038892, 0.0],
[0.406269, -0.037757, 0.0],
[0.437099, -0.036467, 0.0],
[0.468187, -0.035009, 0.0],
[0.499413, -0.033414, 0.0],
[0.530654, -0.031708, 0.0],
[0.561791, -0.029917, 0.0],
[0.592701, -0.028066, 0.0],
[0.623264, -0.026176, 0.0],
[0.653358, -0.024269, 0.0],
[0.682867, -0.022360, 0.0],
[0.711672, -0.020466, 0.0],
[0.739659, -0.018600, 0.0],
[0.766718, -0.016774, 0.0],
[0.792738, -0.014999, 0.0],
[0.817617, -0.013284, 0.0],
[0.841253, -0.011637, 0.0],
[0.863551, -0.010068, 0.0],
[0.884421, -0.008583, 0.0],
[0.903777, -0.007191, 0.0],
[0.921540, -0.005900, 0.0],
[0.937637, -0.004717, 0.0],
[0.952002, -0.003650, 0.0],
[0.964576, -0.002708, 0.0],
[0.975305, -0.001896, 0.0],
[0.984145, -0.001222, 0.0],
[0.991060, -0.000691, 0.0],
[0.996020, -0.000308, 0.0],
[0.999004, -0.000077, 0.0]
])
# Scale airfoil to input coord
coord = 1.0
airfoil_coordinates *= coord
# Instantiate geometry object
geom = pygmsh.built_in.Geometry()
# Create polygon for airfoil
char_length = 1.0e-1
airfoil = geom.add_polygon(
airfoil_coordinates,
char_length,
make_surface=False
)
# Create surface for numerical domain with an airfoil-shaped hole
left_dist = 1.0
right_dist = 3.0
top_dist = 1.0
bottom_dist = 1.0
xmin = airfoil_coordinates[:, 0].min() - left_dist*coord
xmax = airfoil_coordinates[:, 0].max() + right_dist*coord
ymin = airfoil_coordinates[:, 1].min() - bottom_dist*coord
ymax = airfoil_coordinates[:, 1].max() + top_dist*coord
domainCoordinates = numpy.array([
[xmin, ymin, 0.0],
[xmax, ymin, 0.0],
[xmax, ymax, 0.0],
[xmin, ymax, 0.0],
])
polygon = geom.add_polygon(
domainCoordinates,
char_length,
holes=[airfoil]
)
geom.add_raw_code('Recombine Surface {%s};' % polygon.surface.id)
ref = 10.525891646546
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
assert abs(compute_volume(points, cells) - ref) < 1.0e-2 * ref
return points, cells
if __name__ == '__main__':
import meshio
meshio.write('airfoil.vtu', *test())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import numpy
import pygmsh
from helpers import compute_volume
def test():
# Airfoil coordinates
airfoil_coordinates = numpy.array([
[1.000000, 0.000000, 0.0],
[0.999023, 0.000209, 0.0],
[0.996095, 0.000832, 0.0],
[0.991228, 0.001863, 0.0],
[0.984438, 0.003289, 0.0],
[0.975752, 0.005092, 0.0],
[0.965201, 0.007252, 0.0],
[0.952825, 0.009744, 0.0],
[0.938669, 0.012538, 0.0],
[0.922788, 0.015605, 0.0],
[0.905240, 0.018910, 0.0],
[0.886092, 0.022419, 0.0],
[0.865417, 0.026096, 0.0],
[0.843294, 0.029903, 0.0],
[0.819807, 0.033804, 0.0],
[0.795047, 0.037760, 0.0],
[0.769109, 0.041734, 0.0],
[0.742094, 0.045689, 0.0],
[0.714107, 0.049588, 0.0],
[0.685258, 0.053394, 0.0],
[0.655659, 0.057071, 0.0],
[0.625426, 0.060584, 0.0],
[0.594680, 0.063897, 0.0],
[0.563542, 0.066977, 0.0],
[0.532136, 0.069789, 0.0],
[0.500587, 0.072303, 0.0],
[0.469022, 0.074486, 0.0],
[0.437567, 0.076312, 0.0],
[0.406350, 0.077752, 0.0],
[0.375297, 0.078743, 0.0],
[0.344680, 0.079180, 0.0],
[0.314678, 0.079051, 0.0],
[0.285418, 0.078355, 0.0],
[0.257025, 0.077096, 0.0],
[0.229618, 0.075287, 0.0],
[0.203313, 0.072945, 0.0],
[0.178222, 0.070096, 0.0],
[0.154449, 0.066770, 0.0],
[0.132094, 0.063005, 0.0],
[0.111248, 0.058842, 0.0],
[0.091996, 0.054325, 0.0],
[0.074415, 0.049504, 0.0],
[0.058573, 0.044427, 0.0],
[0.044532, 0.039144, 0.0],
[0.032343, 0.033704, 0.0],
[0.022051, 0.028152, 0.0],
[0.013692, 0.022531, 0.0],
[0.007292, 0.016878, 0.0],
[0.002870, 0.011224, 0.0],
[0.000439, 0.005592, 0.0],
[0.000000, 0.000000, 0.0],
[0.001535, -0.005395, 0.0],
[0.005015, -0.010439, 0.0],
[0.010421, -0.015126, 0.0],
[0.017725, -0.019451, 0.0],
[0.026892, -0.023408, 0.0],
[0.037880, -0.026990, 0.0],
[0.050641, -0.030193, 0.0],
[0.065120, -0.033014, 0.0],
[0.081257, -0.035451, 0.0],
[0.098987, -0.037507, 0.0],
[0.118239, -0.039185, 0.0],
[0.138937, -0.040493, 0.0],
[0.161004, -0.041444, 0.0],
[0.184354, -0.042054, 0.0],
[0.208902, -0.042343, 0.0],
[0.234555, -0.042335, 0.0],
[0.261221, -0.042058, 0.0],
[0.288802, -0.041541, 0.0],
[0.317197, -0.040817, 0.0],
[0.346303, -0.039923, 0.0],
[0.376013, -0.038892, 0.0],
[0.406269, -0.037757, 0.0],
[0.437099, -0.036467, 0.0],
[0.468187, -0.035009, 0.0],
[0.499413, -0.033414, 0.0],
[0.530654, -0.031708, 0.0],
[0.561791, -0.029917, 0.0],
[0.592701, -0.028066, 0.0],
[0.623264, -0.026176, 0.0],
[0.653358, -0.024269, 0.0],
[0.682867, -0.022360, 0.0],
[0.711672, -0.020466, 0.0],
[0.739659, -0.018600, 0.0],
[0.766718, -0.016774, 0.0],
[0.792738, -0.014999, 0.0],
[0.817617, -0.013284, 0.0],
[0.841253, -0.011637, 0.0],
[0.863551, -0.010068, 0.0],
[0.884421, -0.008583, 0.0],
[0.903777, -0.007191, 0.0],
[0.921540, -0.005900, 0.0],
[0.937637, -0.004717, 0.0],
[0.952002, -0.003650, 0.0],
[0.964576, -0.002708, 0.0],
[0.975305, -0.001896, 0.0],
[0.984145, -0.001222, 0.0],
[0.991060, -0.000691, 0.0],
[0.996020, -0.000308, 0.0],
[0.999004, -0.000077, 0.0]
])
# Scale airfoil to input coord
coord = 1.0
airfoil_coordinates *= coord
# Instantiate geometry object
geom = pygmsh.built_in.Geometry()
# Create polygon for airfoil
char_length = 1.0e-1
airfoil = geom.add_polygon(
airfoil_coordinates,
char_length,
make_surface=False
)
# Create surface for numerical domain with an airfoil-shaped hole
left_dist = 1.0
right_dist = 3.0
top_dist = 1.0
bottom_dist = 1.0
xmin = airfoil_coordinates[:, 0].min() - left_dist*coord
xmax = airfoil_coordinates[:, 0].max() + right_dist*coord
ymin = airfoil_coordinates[:, 1].min() - bottom_dist*coord
ymax = airfoil_coordinates[:, 1].max() + top_dist*coord
domainCoordinates = numpy.array([
[xmin, ymin, 0.0],
[xmax, ymin, 0.0],
[xmax, ymax, 0.0],
[xmin, ymax, 0.0],
])
polygon = geom.add_polygon(
domainCoordinates,
char_length,
holes=[airfoil]
)
geom.add_raw_code('Recombine Surface {%s};' % polygon.surface.id)
ref = 10.525891646546
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
assert abs(compute_volume(points, cells) - ref) < 1.0e-2 * ref
return points, cells
if __name__ == '__main__':
import meshio
meshio.write('airfoil.vtu', *test()) | en | 0.646336 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Airfoil coordinates # Scale airfoil to input coord # Instantiate geometry object # Create polygon for airfoil # Create surface for numerical domain with an airfoil-shaped hole | 2.458082 | 2 |
LeetCode/python3/287.py | ZintrulCre/LeetCode_Archiver | 279 | 9012 | class Solution:
def findDuplicate(self, nums: List[int]) -> int:
p1, p2 = nums[0], nums[nums[0]]
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[nums[p2]]
p2 = 0
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[p2]
return nums[p1]
| class Solution:
def findDuplicate(self, nums: List[int]) -> int:
p1, p2 = nums[0], nums[nums[0]]
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[nums[p2]]
p2 = 0
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[p2]
return nums[p1]
| none | 1 | 3.449634 | 3 |
|
src/twisted/test/myrebuilder1.py | mathieui/twisted | 9,953 | 9013 | <reponame>mathieui/twisted<filename>src/twisted/test/myrebuilder1.py
class A:
def a(self):
return 'a'
class B(A, object):
def b(self):
return 'b'
class Inherit(A):
def a(self):
return 'c'
| class A:
def a(self):
return 'a'
class B(A, object):
def b(self):
return 'b'
class Inherit(A):
def a(self):
return 'c' | none | 1 | 3.213656 | 3 |
|
examples/test_yield_8.py | MateuszG/django_auth | 2 | 9014 | import pytest
@pytest.yield_fixture
def passwd():
print ("\nsetup before yield")
f = open("/etc/passwd")
yield f.readlines()
print ("teardown after yield")
f.close()
def test_has_lines(passwd):
print ("test called")
assert passwd
| import pytest
@pytest.yield_fixture
def passwd():
print ("\nsetup before yield")
f = open("/etc/passwd")
yield f.readlines()
print ("teardown after yield")
f.close()
def test_has_lines(passwd):
print ("test called")
assert passwd
| none | 1 | 2.063829 | 2 |
|
modules/google-earth-engine/docker/src/sepalinternal/gee.py | BuddyVolly/sepal | 153 | 9015 | import json
from threading import Semaphore
import ee
from flask import request
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
service_account_credentials = None
import logging
export_semaphore = Semaphore(5)
get_info_semaphore = Semaphore(2)
def init_service_account_credentials(args):
global service_account_credentials
with open(args['gee_key_path'], 'r') as file_:
key_data = file_.read()
signer = crypt.RSASigner.from_string(key_data)
service_account_credentials = service_account.Credentials(
signer=signer,
service_account_email=args['gee_email'],
token_uri=ee.oauth.TOKEN_URI,
scopes=ee.oauth.SCOPES + ['https://www.googleapis.com/auth/drive']
)
def init_ee():
credentials = service_account_credentials
if 'sepal-user' in request.headers:
user = json.loads(request.headers['sepal-user'])
googleTokens = user.get('googleTokens', None)
if googleTokens:
credentials = Credentials(googleTokens['accessToken'])
ee.InitializeThread(credentials)
def to_asset_id(asset_path):
asset_roots = ee.data.getAssetRoots()
if not asset_roots:
raise Exception('User has no GEE asset roots')
return asset_roots[0]['id'] + '/' + asset_path
def delete_asset_collection(asset_id):
logging.info('Recursively deleting ' + asset_id)
if ee.data.getInfo(asset_id):
images = ee.data.getList({
'id': asset_id,
'fields': 'id'
})
for image in images:
ee.data.deleteAsset(image['id'])
logging.info('Deleted ' + image['id'])
ee.data.deleteAsset(asset_id)
logging.info('Deleted ' + asset_id)
def create_asset_image_collection(asset_id):
delete_asset_collection(asset_id)
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_IMAGE_COLL,
mk_parents=True
)
def create_asset_folder(asset_id):
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_FOLDER,
mk_parents=True
)
def get_info(ee_object):
try:
get_info_semaphore.acquire()
return ee_object.getInfo()
finally:
get_info_semaphore.release()
| import json
from threading import Semaphore
import ee
from flask import request
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
service_account_credentials = None
import logging
export_semaphore = Semaphore(5)
get_info_semaphore = Semaphore(2)
def init_service_account_credentials(args):
global service_account_credentials
with open(args['gee_key_path'], 'r') as file_:
key_data = file_.read()
signer = crypt.RSASigner.from_string(key_data)
service_account_credentials = service_account.Credentials(
signer=signer,
service_account_email=args['gee_email'],
token_uri=ee.oauth.TOKEN_URI,
scopes=ee.oauth.SCOPES + ['https://www.googleapis.com/auth/drive']
)
def init_ee():
credentials = service_account_credentials
if 'sepal-user' in request.headers:
user = json.loads(request.headers['sepal-user'])
googleTokens = user.get('googleTokens', None)
if googleTokens:
credentials = Credentials(googleTokens['accessToken'])
ee.InitializeThread(credentials)
def to_asset_id(asset_path):
asset_roots = ee.data.getAssetRoots()
if not asset_roots:
raise Exception('User has no GEE asset roots')
return asset_roots[0]['id'] + '/' + asset_path
def delete_asset_collection(asset_id):
logging.info('Recursively deleting ' + asset_id)
if ee.data.getInfo(asset_id):
images = ee.data.getList({
'id': asset_id,
'fields': 'id'
})
for image in images:
ee.data.deleteAsset(image['id'])
logging.info('Deleted ' + image['id'])
ee.data.deleteAsset(asset_id)
logging.info('Deleted ' + asset_id)
def create_asset_image_collection(asset_id):
delete_asset_collection(asset_id)
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_IMAGE_COLL,
mk_parents=True
)
def create_asset_folder(asset_id):
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_FOLDER,
mk_parents=True
)
def get_info(ee_object):
try:
get_info_semaphore.acquire()
return ee_object.getInfo()
finally:
get_info_semaphore.release()
| none | 1 | 2.300301 | 2 |
|
micropython/007_boat_sink.py | mirontoli/tolle-rasp | 2 | 9016 | #https://microbit-micropython.readthedocs.io/en/latest/tutorials/images.html#animation
from microbit import *
boat1 = Image("05050:05050:05050:99999:09990")
boat2 = Image("00000:05050:05050:05050:99999")
boat3 = Image("00000:00000:05050:05050:05050")
boat4 = Image("00000:00000:00000:05050:05050")
boat5 = Image("00000:00000:00000:00000:05050")
boat6 = Image("00000:00000:00000:00000:00000")
all_boats = [boat1, boat2, boat3, boat4, boat5, boat6]
display.show(all_boats, delay=200) | #https://microbit-micropython.readthedocs.io/en/latest/tutorials/images.html#animation
from microbit import *
boat1 = Image("05050:05050:05050:99999:09990")
boat2 = Image("00000:05050:05050:05050:99999")
boat3 = Image("00000:00000:05050:05050:05050")
boat4 = Image("00000:00000:00000:05050:05050")
boat5 = Image("00000:00000:00000:00000:05050")
boat6 = Image("00000:00000:00000:00000:00000")
all_boats = [boat1, boat2, boat3, boat4, boat5, boat6]
display.show(all_boats, delay=200) | en | 0.615244 | #https://microbit-micropython.readthedocs.io/en/latest/tutorials/images.html#animation | 3.028838 | 3 |
examples/api-samples/inc_samples/convert_callback.py | groupdocs-legacy-sdk/python | 0 | 9017 | import os
import json
import shutil
import time
from pyramid.renderers import render_to_response
from pyramid.response import Response
from groupdocs.ApiClient import ApiClient
from groupdocs.AsyncApi import AsyncApi
from groupdocs.StorageApi import StorageApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
def convert_callback(request):
currentDir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(currentDir + '/../user_info.txt'):
f = open(currentDir + '/../user_info.txt')
lines = f.readlines()
f.close()
clientId = lines[0].replace("\r\n", "")
privateKey = lines[1]
if IsNotNull(request.json_body):
jsonPostData = request.json_body
jobId = jsonPostData['SourceId']
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create AsyncApi object
async = AsyncApi(apiClient)
# Create Storage object
api = StorageApi(apiClient)
if jobId != '':
time.sleep(5)
# Make request to api for get document info by job id
jobs = async.GetJobDocuments(clientId, jobId)
if jobs.status == 'Ok':
# Get file guid
resultGuid = jobs.result.inputs[0].outputs[0].guid
name = jobs.result.inputs[0].outputs[0].name
currentDir = os.path.dirname(os.path.realpath(__file__))
downloadFolder = currentDir + '/../downloads/'
if not os.path.isdir(downloadFolder):
os.makedirs(downloadFolder)
#Downlaoding of file
fs = api.GetFile(clientId, resultGuid);
if fs:
filePath = downloadFolder + name
with open(filePath, 'wb') as fp:
shutil.copyfileobj(fs.inputStream, fp)
| import os
import json
import shutil
import time
from pyramid.renderers import render_to_response
from pyramid.response import Response
from groupdocs.ApiClient import ApiClient
from groupdocs.AsyncApi import AsyncApi
from groupdocs.StorageApi import StorageApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
def convert_callback(request):
currentDir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(currentDir + '/../user_info.txt'):
f = open(currentDir + '/../user_info.txt')
lines = f.readlines()
f.close()
clientId = lines[0].replace("\r\n", "")
privateKey = lines[1]
if IsNotNull(request.json_body):
jsonPostData = request.json_body
jobId = jsonPostData['SourceId']
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create AsyncApi object
async = AsyncApi(apiClient)
# Create Storage object
api = StorageApi(apiClient)
if jobId != '':
time.sleep(5)
# Make request to api for get document info by job id
jobs = async.GetJobDocuments(clientId, jobId)
if jobs.status == 'Ok':
# Get file guid
resultGuid = jobs.result.inputs[0].outputs[0].guid
name = jobs.result.inputs[0].outputs[0].name
currentDir = os.path.dirname(os.path.realpath(__file__))
downloadFolder = currentDir + '/../downloads/'
if not os.path.isdir(downloadFolder):
os.makedirs(downloadFolder)
#Downlaoding of file
fs = api.GetFile(clientId, resultGuid);
if fs:
filePath = downloadFolder + name
with open(filePath, 'wb') as fp:
shutil.copyfileobj(fs.inputStream, fp)
| en | 0.640266 | # Checking value on null # Create signer object # Create apiClient object # Create AsyncApi object # Create Storage object # Make request to api for get document info by job id # Get file guid #Downlaoding of file | 2.236854 | 2 |
PyIK/src/litearm.py | AliShug/EvoArm | 110 | 9018 | <reponame>AliShug/EvoArm
from __future__ import print_function
import numpy as np
import struct
import solvers
import pid
from util import *
MOTORSPEED = 0.9
MOTORMARGIN = 1
MOTORSLOPE = 30
ERRORLIM = 5.0
class ArmConfig:
"""Holds an arm's proportions, limits and other configuration data"""
def __init__(self,
main_length = 148.4,
forearm_length = 160,
linkage_length = 155,
lower_actuator_length = 65,
upper_actuator_length = 54.4,
wrist_length = 90.52,
shoulder_offset = [-9.7, 18.71]):
self.main_length = main_length
self.forearm_length = forearm_length
self.linkage_length = linkage_length
self.lower_actuator_length = lower_actuator_length
self.upper_actuator_length = upper_actuator_length
self.wrist_length = wrist_length;
self.shoulder_offset = shoulder_offset
class ArmPose:
"""
Defines a physical configuration of a LiteArm robot arm.
Internal angles are relative to vertical (elevator/actuator) or straight
forward (swing), and are stored in radians. Extracted servo angles range
0-300 and are measured in degrees.
Provides methods for:
- finding the required servo angles to reach the pose
- checking the validity of the pose
"""
structFormat = 'fffff'
@staticmethod
def calcElevatorAngle(servoAngle):
return radians(178.21 - servoAngle)
@staticmethod
def calcSwingAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcActuatorAngle(servoAngle):
return radians(servoAngle - 204.78)
@staticmethod
def calcWristXAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcWristYAngle(servoAngle):
return radians(servoAngle - 147.0)
def __init__(self,
arm_config,
swing_angle,
shoulder_angle,
actuator_angle,
elbow_angle,
elbow2D,
wrist2D,
effector2D,
effector,
wrist_x,
wrist_y):
self.cfg = arm_config
self.swing_angle = swing_angle
self.shoulder_angle = shoulder_angle
self.actuator_angle = actuator_angle
self.elbow_angle = elbow_angle
# Joints in the arm
shoulder = rotate(self.cfg.shoulder_offset, swing_angle)
self.shoulder2D = [self.cfg.shoulder_offset[1], 0]
self.shoulder = [shoulder[0], 0, shoulder[1]]
self.wrist2D = wrist2D
self.effector2D = effector2D
self.effector = effector
# Construct the 3D elbow & wrist positions from the 2D (planar) IK
# solution
arm_vec = effector - self.shoulder
arm_vec[1] = 0
self.elbow2D = elbow2D
self.elbow = self.shoulder + normalize(arm_vec)*elbow2D[0]
self.elbow[1] = elbow2D[1]
self.wrist = self.effector - normalize(arm_vec)*arm_config.wrist_length
# Wrist pose
self.wristXAngle = wrist_x
self.wristYAngle = wrist_y
def getServoElevator(self):
return 178.21 - degrees(self.shoulder_angle)
def getServoActuator(self):
return degrees(self.actuator_angle) + 204.78
def getServoSwing(self):
return 150 - degrees(self.swing_angle)
def getServoWristX(self):
return 150 - degrees(self.wristXAngle)
def getServoWristY(self):
return 147 + degrees(self.wristYAngle)
def armDiffAngle(self):
return degrees(self.shoulder_angle - self.actuator_angle)
def checkActuator(self):
angle = self.getServoActuator()
return angle >= 95 and angle <= 250
def checkDiff(self):
angle = self.armDiffAngle()
return angle >= 44 and angle <= 175
def checkElevator(self):
angle = self.getServoElevator()
return angle >= 60 and angle <= 210
def checkForearm(self):
angle = degrees(self.elbow_angle + self.shoulder_angle)
return angle < 200 and angle > 80
def checkSwing(self):
angle = self.getServoSwing()
return angle >= 60 and angle <= 240
def checkWristX(self):
angle = self.getServoWristX()
return angle >= 60 and angle <= 240
def checkWristY(self):
angle = self.getServoWristY()
return angle >= 60 and angle <= 160
def checkPositioning(self):
# When Y>0 Forearm always faces outwards
if self.wrist2D[1] > 0 and self.wrist2D[0] < self.elbow2D[0]:
return False
# No valid positions X<=0
if self.wrist2D[0] <= 0:
return False
# Effector height range
if self.effector[1] > 180 or self.effector[1] < -200:
return False
return True
def checkClearance(self):
return (self.checkDiff() and self.checkActuator() and
self.checkElevator() and self.checkSwing() and
self.checkWristX() and self.checkWristY() and
self.checkPositioning() and self.checkForearm())
def serialize(self):
"""Returns a packed struct holding the pose information"""
return struct.pack(
ArmPose.structFormat,
self.swing_angle,
self.shoulder_angle,
self.elbow_angle,
self.wristXAngle,
self.wristYAngle
)
class ArmController:
def __init__(self,
servo_swing,
servo_shoulder,
servo_elbow,
servo_wrist_x,
servo_wrist_y,
arm_config,
motion_enable = False):
# Solvers are responsible for calculating the target servo positions to
# reach a given goal position
self.ik = solvers.IKSolver(
arm_config.main_length,
arm_config.forearm_length,
arm_config.wrist_length,
arm_config.shoulder_offset)
self.physsolver = solvers.PhysicalSolver(
arm_config.main_length,
arm_config.linkage_length,
arm_config.lower_actuator_length,
arm_config.upper_actuator_length)
# Servos
self.servos = {}
self.servos["swing"] = servo_swing
self.servos["shoulder"] = servo_shoulder
self.servos["elbow"] = servo_elbow
self.servos["wrist_x"] = servo_wrist_x
self.servos["wrist_y"] = servo_wrist_y
for key, servo in self.servos.iteritems():
if servo is None:
print ("Warning: {0} servo not connected".format(key))
else:
# Initialise a PID controller for the servo
if servo.protocol == 1:
servo.setGoalSpeed(-MOTORSPEED)
servo.data['pid'] = pid.PIDControl(2.4, 0, 0.4)
else:
servo.setGoalSpeed(0)
servo.data['error'] = 0.0
# Make sure the goal speed is set
servo.setTorqueEnable(1)
if servo.protocol == 1:
print("Setting slope")
servo.setCWMargin(MOTORMARGIN)
servo.setCCWMargin(MOTORMARGIN)
servo.setCWSlope(MOTORSLOPE)
servo.setCCWSlope(MOTORSLOPE)
# Store parameters
self.motion_enable = True
self.enableMovement(False)
self.cfg = arm_config
# Dirty flags for stored poses
self.ik_pose = None
self.ik_dirty = True
self.real_pose = None
self.real_dirty = True
# Current target pose
self.target_pose = None
def enableMovement(self, enable):
changed = False
if enable and not self.motion_enable:
print ("Warning: Arm enabled")
self.motion_enable = True
changed = True
elif not enable:
self.motion_enable = False
changed = True
if changed:
# Set servos on/off
if self.servos['swing'] is not None:
self.servos['swing'].setTorqueEnable(self.motion_enable)
if self.servos['shoulder'] is not None:
self.servos['shoulder'].setTorqueEnable(self.motion_enable)
if self.servos['elbow'] is not None:
self.servos['elbow'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setTorqueEnable(self.motion_enable)
def setWristGoalPosition(self, pos):
self.ik.setGoal(pos)
self.ik_dirty = True
def setWristGoalDirection(self, normal):
self.ik.setWristDir(normal)
self.ik_dirty = True
def getIKPose(self):
if self.ik_dirty and self.ik.valid:
# Construct geometry of arm from IK state
main_arm = self.ik.elbow - self.ik.originpl
arm_vert_angle = sigangle(main_arm, vertical)
forearm = self.ik.wristpl - self.ik.elbow
elbow_angle = angle_between(main_arm, forearm)
# Solve actuator angle for given elbow angle
# Base angle is between the main arm and actuator
base_angle = self.physsolver.inverse_forearm(elbow_angle)
actuator_angle = arm_vert_angle - base_angle
self.ik_pose = ArmPose(
self.cfg,
swing_angle = self.ik.swing,
# angles from vertical
shoulder_angle = arm_vert_angle,
actuator_angle = actuator_angle,
# angle between the main arm and forearm
elbow_angle = elbow_angle,
elbow2D = self.ik.elbow,
wrist2D = self.ik.wristpl,
effector2D = self.ik.goalpl,
effector = self.ik.goal,
wrist_x = self.ik.wrist_x,
wrist_y = self.ik.wrist_y
)
return self.ik_pose
def pollServos(self):
"""Poll the real-world servo positions"""
for servo in self.servos.itervalues():
if servo is not None:
newPos = servo.getPosition()
if type(newPos) is float:
servo.data['pos'] = newPos
def clearPositionError(self):
"""Clears the servo's position-error accumulators"""
for servo in self.servos.itervalues():
if servo is not None and servo.protocol == 1:
servo.data['error'] = 0.0
def getRealPose(self):
"""Retrieve the real-world arm pose, or None if not all servos are
connected.
"""
if any([servo is None for servo in self.servos.itervalues()]):
return None
# This whole function is essentially just FK based on the known servo
# angles
swing_servo = self.servos['swing'].data['pos']
elevator_servo = self.servos['shoulder'].data['pos']
actuator_servo = self.servos['elbow'].data['pos']
wrist_x_servo = self.servos['wrist_x'].data['pos']
wrist_y_servo = self.servos['wrist_y'].data['pos']
# Find the internal arm-pose angles for the given servo positions
swing_angle = ArmPose.calcSwingAngle(swing_servo)
elevator_angle = ArmPose.calcElevatorAngle(elevator_servo)
actuator_angle = ArmPose.calcActuatorAngle(actuator_servo)
wrist_x_angle = ArmPose.calcWristXAngle(wrist_x_servo)
wrist_y_angle = ArmPose.calcWristYAngle(wrist_y_servo)
# Solve elbow angle for given actuator and elevator angles
# (this is the angle from the elevator arm's direction to the forearm's)
elbow_angle = self.physsolver.solve_forearm(elevator_angle, actuator_angle)
# FK positions from config and angles
offset = self.cfg.shoulder_offset
shoulder2D = np.array([offset[1], 0])
elbow2D = shoulder2D + rotate(vertical, elevator_angle)*self.cfg.main_length
wrist2D = elbow2D + rotate(vertical, elevator_angle + elbow_angle)*self.cfg.forearm_length
effector2D = wrist2D + [self.cfg.wrist_length, 0]
# 3D Effector calculation is a little more involved
td = rotate([offset[0], effector2D[0]], swing_angle)
effector = np.array([td[0], effector2D[1], td[1]])
pose = ArmPose(
self.cfg,
swing_angle, elevator_angle, actuator_angle,
elbow_angle, elbow2D, wrist2D, effector2D,
effector, wrist_x_angle, wrist_y_angle)
return pose
def setTargetPose(self, new_pose):
self.target_pose = new_pose
def tick(self):
if self.target_pose is not None:
if self.motion_enable:
# Drive servos
gain = 0.1
if self.servos['swing'] is not None:
s = self.servos['swing']
pos = s.data['pos']
target = self.target_pose.getServoSwing()
# err = min(10, pos-target)
# s.data['error'] += err*gain
s.setGoalPosition(target)
if self.servos['shoulder'] is not None:
s = self.servos['shoulder']
# cumulative error
pos = s.data['pos']
target = self.target_pose.getServoElevator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['elbow'] is not None:
s = self.servos['elbow']
pos = s.data['pos']
target = self.target_pose.getServoActuator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setGoalPosition(self.target_pose.getServoWristX())
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setGoalPosition(self.target_pose.getServoWristY())
| from __future__ import print_function
import numpy as np
import struct
import solvers
import pid
from util import *
MOTORSPEED = 0.9
MOTORMARGIN = 1
MOTORSLOPE = 30
ERRORLIM = 5.0
class ArmConfig:
"""Holds an arm's proportions, limits and other configuration data"""
def __init__(self,
main_length = 148.4,
forearm_length = 160,
linkage_length = 155,
lower_actuator_length = 65,
upper_actuator_length = 54.4,
wrist_length = 90.52,
shoulder_offset = [-9.7, 18.71]):
self.main_length = main_length
self.forearm_length = forearm_length
self.linkage_length = linkage_length
self.lower_actuator_length = lower_actuator_length
self.upper_actuator_length = upper_actuator_length
self.wrist_length = wrist_length;
self.shoulder_offset = shoulder_offset
class ArmPose:
"""
Defines a physical configuration of a LiteArm robot arm.
Internal angles are relative to vertical (elevator/actuator) or straight
forward (swing), and are stored in radians. Extracted servo angles range
0-300 and are measured in degrees.
Provides methods for:
- finding the required servo angles to reach the pose
- checking the validity of the pose
"""
structFormat = 'fffff'
@staticmethod
def calcElevatorAngle(servoAngle):
return radians(178.21 - servoAngle)
@staticmethod
def calcSwingAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcActuatorAngle(servoAngle):
return radians(servoAngle - 204.78)
@staticmethod
def calcWristXAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcWristYAngle(servoAngle):
return radians(servoAngle - 147.0)
def __init__(self,
arm_config,
swing_angle,
shoulder_angle,
actuator_angle,
elbow_angle,
elbow2D,
wrist2D,
effector2D,
effector,
wrist_x,
wrist_y):
self.cfg = arm_config
self.swing_angle = swing_angle
self.shoulder_angle = shoulder_angle
self.actuator_angle = actuator_angle
self.elbow_angle = elbow_angle
# Joints in the arm
shoulder = rotate(self.cfg.shoulder_offset, swing_angle)
self.shoulder2D = [self.cfg.shoulder_offset[1], 0]
self.shoulder = [shoulder[0], 0, shoulder[1]]
self.wrist2D = wrist2D
self.effector2D = effector2D
self.effector = effector
# Construct the 3D elbow & wrist positions from the 2D (planar) IK
# solution
arm_vec = effector - self.shoulder
arm_vec[1] = 0
self.elbow2D = elbow2D
self.elbow = self.shoulder + normalize(arm_vec)*elbow2D[0]
self.elbow[1] = elbow2D[1]
self.wrist = self.effector - normalize(arm_vec)*arm_config.wrist_length
# Wrist pose
self.wristXAngle = wrist_x
self.wristYAngle = wrist_y
def getServoElevator(self):
return 178.21 - degrees(self.shoulder_angle)
def getServoActuator(self):
return degrees(self.actuator_angle) + 204.78
def getServoSwing(self):
return 150 - degrees(self.swing_angle)
def getServoWristX(self):
return 150 - degrees(self.wristXAngle)
def getServoWristY(self):
return 147 + degrees(self.wristYAngle)
def armDiffAngle(self):
return degrees(self.shoulder_angle - self.actuator_angle)
def checkActuator(self):
angle = self.getServoActuator()
return angle >= 95 and angle <= 250
def checkDiff(self):
angle = self.armDiffAngle()
return angle >= 44 and angle <= 175
def checkElevator(self):
angle = self.getServoElevator()
return angle >= 60 and angle <= 210
def checkForearm(self):
angle = degrees(self.elbow_angle + self.shoulder_angle)
return angle < 200 and angle > 80
def checkSwing(self):
angle = self.getServoSwing()
return angle >= 60 and angle <= 240
def checkWristX(self):
angle = self.getServoWristX()
return angle >= 60 and angle <= 240
def checkWristY(self):
angle = self.getServoWristY()
return angle >= 60 and angle <= 160
def checkPositioning(self):
# When Y>0 Forearm always faces outwards
if self.wrist2D[1] > 0 and self.wrist2D[0] < self.elbow2D[0]:
return False
# No valid positions X<=0
if self.wrist2D[0] <= 0:
return False
# Effector height range
if self.effector[1] > 180 or self.effector[1] < -200:
return False
return True
def checkClearance(self):
return (self.checkDiff() and self.checkActuator() and
self.checkElevator() and self.checkSwing() and
self.checkWristX() and self.checkWristY() and
self.checkPositioning() and self.checkForearm())
def serialize(self):
"""Returns a packed struct holding the pose information"""
return struct.pack(
ArmPose.structFormat,
self.swing_angle,
self.shoulder_angle,
self.elbow_angle,
self.wristXAngle,
self.wristYAngle
)
class ArmController:
def __init__(self,
servo_swing,
servo_shoulder,
servo_elbow,
servo_wrist_x,
servo_wrist_y,
arm_config,
motion_enable = False):
# Solvers are responsible for calculating the target servo positions to
# reach a given goal position
self.ik = solvers.IKSolver(
arm_config.main_length,
arm_config.forearm_length,
arm_config.wrist_length,
arm_config.shoulder_offset)
self.physsolver = solvers.PhysicalSolver(
arm_config.main_length,
arm_config.linkage_length,
arm_config.lower_actuator_length,
arm_config.upper_actuator_length)
# Servos
self.servos = {}
self.servos["swing"] = servo_swing
self.servos["shoulder"] = servo_shoulder
self.servos["elbow"] = servo_elbow
self.servos["wrist_x"] = servo_wrist_x
self.servos["wrist_y"] = servo_wrist_y
for key, servo in self.servos.iteritems():
if servo is None:
print ("Warning: {0} servo not connected".format(key))
else:
# Initialise a PID controller for the servo
if servo.protocol == 1:
servo.setGoalSpeed(-MOTORSPEED)
servo.data['pid'] = pid.PIDControl(2.4, 0, 0.4)
else:
servo.setGoalSpeed(0)
servo.data['error'] = 0.0
# Make sure the goal speed is set
servo.setTorqueEnable(1)
if servo.protocol == 1:
print("Setting slope")
servo.setCWMargin(MOTORMARGIN)
servo.setCCWMargin(MOTORMARGIN)
servo.setCWSlope(MOTORSLOPE)
servo.setCCWSlope(MOTORSLOPE)
# Store parameters
self.motion_enable = True
self.enableMovement(False)
self.cfg = arm_config
# Dirty flags for stored poses
self.ik_pose = None
self.ik_dirty = True
self.real_pose = None
self.real_dirty = True
# Current target pose
self.target_pose = None
def enableMovement(self, enable):
changed = False
if enable and not self.motion_enable:
print ("Warning: Arm enabled")
self.motion_enable = True
changed = True
elif not enable:
self.motion_enable = False
changed = True
if changed:
# Set servos on/off
if self.servos['swing'] is not None:
self.servos['swing'].setTorqueEnable(self.motion_enable)
if self.servos['shoulder'] is not None:
self.servos['shoulder'].setTorqueEnable(self.motion_enable)
if self.servos['elbow'] is not None:
self.servos['elbow'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setTorqueEnable(self.motion_enable)
def setWristGoalPosition(self, pos):
self.ik.setGoal(pos)
self.ik_dirty = True
def setWristGoalDirection(self, normal):
self.ik.setWristDir(normal)
self.ik_dirty = True
def getIKPose(self):
if self.ik_dirty and self.ik.valid:
# Construct geometry of arm from IK state
main_arm = self.ik.elbow - self.ik.originpl
arm_vert_angle = sigangle(main_arm, vertical)
forearm = self.ik.wristpl - self.ik.elbow
elbow_angle = angle_between(main_arm, forearm)
# Solve actuator angle for given elbow angle
# Base angle is between the main arm and actuator
base_angle = self.physsolver.inverse_forearm(elbow_angle)
actuator_angle = arm_vert_angle - base_angle
self.ik_pose = ArmPose(
self.cfg,
swing_angle = self.ik.swing,
# angles from vertical
shoulder_angle = arm_vert_angle,
actuator_angle = actuator_angle,
# angle between the main arm and forearm
elbow_angle = elbow_angle,
elbow2D = self.ik.elbow,
wrist2D = self.ik.wristpl,
effector2D = self.ik.goalpl,
effector = self.ik.goal,
wrist_x = self.ik.wrist_x,
wrist_y = self.ik.wrist_y
)
return self.ik_pose
def pollServos(self):
"""Poll the real-world servo positions"""
for servo in self.servos.itervalues():
if servo is not None:
newPos = servo.getPosition()
if type(newPos) is float:
servo.data['pos'] = newPos
def clearPositionError(self):
"""Clears the servo's position-error accumulators"""
for servo in self.servos.itervalues():
if servo is not None and servo.protocol == 1:
servo.data['error'] = 0.0
def getRealPose(self):
"""Retrieve the real-world arm pose, or None if not all servos are
connected.
"""
if any([servo is None for servo in self.servos.itervalues()]):
return None
# This whole function is essentially just FK based on the known servo
# angles
swing_servo = self.servos['swing'].data['pos']
elevator_servo = self.servos['shoulder'].data['pos']
actuator_servo = self.servos['elbow'].data['pos']
wrist_x_servo = self.servos['wrist_x'].data['pos']
wrist_y_servo = self.servos['wrist_y'].data['pos']
# Find the internal arm-pose angles for the given servo positions
swing_angle = ArmPose.calcSwingAngle(swing_servo)
elevator_angle = ArmPose.calcElevatorAngle(elevator_servo)
actuator_angle = ArmPose.calcActuatorAngle(actuator_servo)
wrist_x_angle = ArmPose.calcWristXAngle(wrist_x_servo)
wrist_y_angle = ArmPose.calcWristYAngle(wrist_y_servo)
# Solve elbow angle for given actuator and elevator angles
# (this is the angle from the elevator arm's direction to the forearm's)
elbow_angle = self.physsolver.solve_forearm(elevator_angle, actuator_angle)
# FK positions from config and angles
offset = self.cfg.shoulder_offset
shoulder2D = np.array([offset[1], 0])
elbow2D = shoulder2D + rotate(vertical, elevator_angle)*self.cfg.main_length
wrist2D = elbow2D + rotate(vertical, elevator_angle + elbow_angle)*self.cfg.forearm_length
effector2D = wrist2D + [self.cfg.wrist_length, 0]
# 3D Effector calculation is a little more involved
td = rotate([offset[0], effector2D[0]], swing_angle)
effector = np.array([td[0], effector2D[1], td[1]])
pose = ArmPose(
self.cfg,
swing_angle, elevator_angle, actuator_angle,
elbow_angle, elbow2D, wrist2D, effector2D,
effector, wrist_x_angle, wrist_y_angle)
return pose
def setTargetPose(self, new_pose):
self.target_pose = new_pose
def tick(self):
if self.target_pose is not None:
if self.motion_enable:
# Drive servos
gain = 0.1
if self.servos['swing'] is not None:
s = self.servos['swing']
pos = s.data['pos']
target = self.target_pose.getServoSwing()
# err = min(10, pos-target)
# s.data['error'] += err*gain
s.setGoalPosition(target)
if self.servos['shoulder'] is not None:
s = self.servos['shoulder']
# cumulative error
pos = s.data['pos']
target = self.target_pose.getServoElevator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['elbow'] is not None:
s = self.servos['elbow']
pos = s.data['pos']
target = self.target_pose.getServoActuator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setGoalPosition(self.target_pose.getServoWristX())
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setGoalPosition(self.target_pose.getServoWristY()) | en | 0.819092 | Holds an arm's proportions, limits and other configuration data Defines a physical configuration of a LiteArm robot arm. Internal angles are relative to vertical (elevator/actuator) or straight forward (swing), and are stored in radians. Extracted servo angles range 0-300 and are measured in degrees. Provides methods for: - finding the required servo angles to reach the pose - checking the validity of the pose # Joints in the arm # Construct the 3D elbow & wrist positions from the 2D (planar) IK # solution # Wrist pose # When Y>0 Forearm always faces outwards # No valid positions X<=0 # Effector height range Returns a packed struct holding the pose information # Solvers are responsible for calculating the target servo positions to # reach a given goal position # Servos # Initialise a PID controller for the servo # Make sure the goal speed is set # Store parameters # Dirty flags for stored poses # Current target pose # Set servos on/off # Construct geometry of arm from IK state # Solve actuator angle for given elbow angle # Base angle is between the main arm and actuator # angles from vertical # angle between the main arm and forearm Poll the real-world servo positions Clears the servo's position-error accumulators Retrieve the real-world arm pose, or None if not all servos are connected. # This whole function is essentially just FK based on the known servo # angles # Find the internal arm-pose angles for the given servo positions # Solve elbow angle for given actuator and elevator angles # (this is the angle from the elevator arm's direction to the forearm's) # FK positions from config and angles # 3D Effector calculation is a little more involved # Drive servos # err = min(10, pos-target) # s.data['error'] += err*gain # cumulative error | 3.196585 | 3 |
create_augmented_versions.py | jakobabesser/piano_aug | 0 | 9019 | from pedalboard import Reverb, Compressor, Gain, LowpassFilter, Pedalboard
import soundfile as sf
if __name__ == '__main__':
# replace by path of unprocessed piano file if necessar
fn_wav_source = 'live_grand_piano.wav'
# augmentation settings using Pedalboard library
settings = {'rev-': [Reverb(room_size=.4)],
'rev+': [Reverb(room_size=.8)],
'comp+': [Compressor(threshold_db=-15, ratio=20)],
'comp-': [Compressor(threshold_db=-10, ratio=10)],
'gain+': [Gain(gain_db=15)], # clipping
'gain-': [Gain(gain_db=5)],
'lpf-': [LowpassFilter(cutoff_frequency_hz=50)],
'lpf+': [LowpassFilter(cutoff_frequency_hz=250)]}
# create augmented versions
for s in settings.keys():
# load unprocessed piano recording
audio, sample_rate = sf.read(fn_wav_source)
# create Pedalboard object
board = Pedalboard(settings[s])
# create augmented audio
effected = board(audio, sample_rate)
# save it
fn_target = fn_wav_source.replace('.wav', f'_{s}.wav')
sf.write(fn_target, effected, sample_rate)
| from pedalboard import Reverb, Compressor, Gain, LowpassFilter, Pedalboard
import soundfile as sf
if __name__ == '__main__':
# replace by path of unprocessed piano file if necessar
fn_wav_source = 'live_grand_piano.wav'
# augmentation settings using Pedalboard library
settings = {'rev-': [Reverb(room_size=.4)],
'rev+': [Reverb(room_size=.8)],
'comp+': [Compressor(threshold_db=-15, ratio=20)],
'comp-': [Compressor(threshold_db=-10, ratio=10)],
'gain+': [Gain(gain_db=15)], # clipping
'gain-': [Gain(gain_db=5)],
'lpf-': [LowpassFilter(cutoff_frequency_hz=50)],
'lpf+': [LowpassFilter(cutoff_frequency_hz=250)]}
# create augmented versions
for s in settings.keys():
# load unprocessed piano recording
audio, sample_rate = sf.read(fn_wav_source)
# create Pedalboard object
board = Pedalboard(settings[s])
# create augmented audio
effected = board(audio, sample_rate)
# save it
fn_target = fn_wav_source.replace('.wav', f'_{s}.wav')
sf.write(fn_target, effected, sample_rate)
| en | 0.659061 | # replace by path of unprocessed piano file if necessar # augmentation settings using Pedalboard library # clipping # create augmented versions # load unprocessed piano recording # create Pedalboard object # create augmented audio # save it | 2.453048 | 2 |
flux/migrations/versions/9ba67b798fa_add_request_system.py | siq/flux | 0 | 9020 | """add_request_system
Revision: <KEY>
Revises: 31b92bf6506d
Created: 2013-07-23 02:49:09.342814
"""
revision = '<KEY>'
down_revision = '31b92bf6506d'
from alembic import op
from spire.schema.fields import *
from spire.mesh import SurrogateType
from sqlalchemy import (Column, ForeignKey, ForeignKeyConstraint, PrimaryKeyConstraint,
CheckConstraint, UniqueConstraint)
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('request',
Column('id', UUIDType(), nullable=False),
Column('name', TextType(), nullable=False),
Column('status', EnumerationType(), nullable=False),
Column('originator', TokenType(), nullable=False),
Column('assignee', TokenType(), nullable=False),
PrimaryKeyConstraint('id'),
UniqueConstraint('name'),
)
op.create_table('request_slot',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=False),
Column('title', TextType(), nullable=True),
Column('slot', TokenType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
UniqueConstraint('request_id','token'),
)
op.create_table('request_attachment',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=True),
Column('title', TextType(), nullable=True),
Column('attachment', SurrogateType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
)
op.create_table('request_product',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=False),
Column('title', TextType(), nullable=True),
Column('product', SurrogateType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
UniqueConstraint('request_id','token'),
)
op.create_table('message',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('author', TokenType(), nullable=False),
Column('occurrence', DateTimeType(timezone=True), nullable=False),
Column('message', TextType(), nullable=True),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('message')
op.drop_table('request_product')
op.drop_table('request_attachment')
op.drop_table('request_slot')
op.drop_table('request')
| """add_request_system
Revision: <KEY>
Revises: 31b92bf6506d
Created: 2013-07-23 02:49:09.342814
"""
revision = '<KEY>'
down_revision = '31b92bf6506d'
from alembic import op
from spire.schema.fields import *
from spire.mesh import SurrogateType
from sqlalchemy import (Column, ForeignKey, ForeignKeyConstraint, PrimaryKeyConstraint,
CheckConstraint, UniqueConstraint)
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('request',
Column('id', UUIDType(), nullable=False),
Column('name', TextType(), nullable=False),
Column('status', EnumerationType(), nullable=False),
Column('originator', TokenType(), nullable=False),
Column('assignee', TokenType(), nullable=False),
PrimaryKeyConstraint('id'),
UniqueConstraint('name'),
)
op.create_table('request_slot',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=False),
Column('title', TextType(), nullable=True),
Column('slot', TokenType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
UniqueConstraint('request_id','token'),
)
op.create_table('request_attachment',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=True),
Column('title', TextType(), nullable=True),
Column('attachment', SurrogateType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
)
op.create_table('request_product',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=False),
Column('title', TextType(), nullable=True),
Column('product', SurrogateType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
UniqueConstraint('request_id','token'),
)
op.create_table('message',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('author', TokenType(), nullable=False),
Column('occurrence', DateTimeType(timezone=True), nullable=False),
Column('message', TextType(), nullable=True),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('message')
op.drop_table('request_product')
op.drop_table('request_attachment')
op.drop_table('request_slot')
op.drop_table('request')
| en | 0.52525 | add_request_system Revision: <KEY> Revises: 31b92bf6506d Created: 2013-07-23 02:49:09.342814 | 1.604071 | 2 |
src/python/Vector2_TEST.py | clalancette/ign-math | 43 | 9021 | # Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import math
from ignition.math import Vector2d
from ignition.math import Vector2f
class TestVector2(unittest.TestCase):
def test_construction(self):
v = Vector2d()
self.assertAlmostEqual(0.0, v.x())
self.assertAlmostEqual(0.0, v.y())
vec = Vector2d(1, 0)
self.assertEqual(vec.x(), 1)
self.assertEqual(vec.y(), 0)
vec2 = Vector2d(vec)
self.assertEqual(vec2, vec)
# Copy
vec3 = vec
self.assertEqual(vec3, vec)
# Inequality
vec4 = Vector2d()
self.assertNotEqual(vec, vec4)
def test_vector2(self):
v = Vector2d(1, 2)
# Distance
self.assertAlmostEqual(2.236, v.distance(Vector2d()), delta=1e-2)
# Normalize
v.normalize()
self.assertTrue(v.equal(Vector2d(0.447214, 0.894427), 1e-4))
# Set
v.set(4, 5)
self.assertTrue(v.equal(Vector2d(4, 5), 1e-4))
# Abs
v.set(-1, -2)
self.assertTrue(v.abs().equal(Vector2d(1, 2), 1e-4))
# _eq_
v = Vector2d(6, 7)
self.assertTrue(v.equal(Vector2d(6, 7), 1e-4))
# _add_
v = v + Vector2d(1, 2)
self.assertTrue(v.equal(Vector2d(7, 9), 1e-4))
v += Vector2d(5, 6)
self.assertTrue(v.equal(Vector2d(12, 15), 1e-4))
# __sub__
v = v - Vector2d(2, 4)
self.assertTrue(v.equal(Vector2d(10, 11), 1e-4))
v.set(2, 4)
v -= Vector2d(1, 6)
self.assertTrue(v.equal(Vector2d(1, -2), 1e-4))
# __truediv__
v.set(10, 6)
v = v / Vector2d(2, 3)
self.assertTrue(v.equal(Vector2d(5, 2), 1e-4))
v.set(10, 6)
v /= Vector2d(2, 3)
self.assertTrue(v.equal(Vector2d(5, 2), 1e-4))
# __truediv__ int
v.set(10, 6)
v = v / 2
self.assertTrue(v.equal(Vector2d(5, 3), 1e-4))
v.set(10, 6)
v /= 2
self.assertTrue(v.equal(Vector2d(5, 3), 1e-4))
# __mul__
v.set(10, 6)
v = v * Vector2d(2, 4)
self.assertTrue(v.equal(Vector2d(20, 24), 1e-4))
v.set(10, 6)
v *= Vector2d(2, 4)
self.assertTrue(v.equal(Vector2d(20, 24), 1e-4))
# __mul__ int
v.set(10, 6)
v = v * 2
self.assertTrue(v.equal(Vector2d(20, 12), 1e-4))
v.set(10, 6)
v *= 2
self.assertTrue(v.equal(Vector2d(20, 12), 1e-4))
# is_finite
self.assertTrue(v.is_finite())
def test_max(self):
vec1 = Vector2d(0.1, 0.2)
vec2 = Vector2d(0.3, 0.5)
vec3 = Vector2d(0.4, 0.2)
self.assertAlmostEqual(vec1.max(), 0.2)
self.assertAlmostEqual(vec3.max(), 0.4)
vec1.max(vec2)
self.assertAlmostEqual(vec1, Vector2d(0.3, 0.5))
vec1.max(vec3)
self.assertAlmostEqual(vec1, Vector2d(0.4, 0.5))
def test_min(self):
vec1 = Vector2d(0.3, 0.5)
vec2 = Vector2d(0.1, 0.2)
vec3 = Vector2d(0.05, 0.1)
self.assertAlmostEqual(vec1.min(), 0.3)
self.assertAlmostEqual(vec3.min(), 0.05)
vec1.min(vec2)
self.assertAlmostEqual(vec1, Vector2d(0.1, 0.2))
vec1.min(vec3)
self.assertAlmostEqual(vec1, Vector2d(0.05, 0.1))
def test_equal_tolerance(self):
# Test Equal function with specified tolerance
self.assertFalse(Vector2d.ZERO.equal(Vector2d.ONE, 1e-6))
self.assertFalse(Vector2d.ZERO.equal(Vector2d.ONE, 1e-3))
self.assertFalse(Vector2d.ZERO.equal(Vector2d.ONE, 1e-1))
self.assertTrue(Vector2d.ZERO.equal(Vector2d.ONE, 1))
self.assertTrue(Vector2d.ZERO.equal(Vector2d.ONE, 1.1))
def test_dot(self):
v = Vector2d(1, 2)
self.assertAlmostEqual(v.dot(Vector2d(3, 4)), 11.0)
self.assertAlmostEqual(v.dot(Vector2d(0, 0)), 0.0)
self.assertAlmostEqual(v.dot(Vector2d(1, 0)), 1.0)
self.assertAlmostEqual(v.dot(Vector2d(0, 1)), 2.0)
def test_correct(self):
vec1 = Vector2d(0, float("nan"))
vec2 = Vector2d(float("inf"), -1)
vec3 = Vector2d(10, -2)
vec1.correct()
vec2.correct()
vec3.correct()
self.assertAlmostEqual(vec1, Vector2d(0, 0))
self.assertAlmostEqual(vec2, Vector2d(0, -1))
self.assertAlmostEqual(vec3, Vector2d(10, -2))
def test_abs_dot(self):
v = Vector2d(1, -2)
self.assertAlmostEqual(v.abs_dot(Vector2d(3, 4)), 11.0)
self.assertAlmostEqual(v.abs_dot(Vector2d(0, 0)), 0.0)
self.assertAlmostEqual(v.abs_dot(Vector2d(1, 0)), 1.0)
self.assertAlmostEqual(v.abs_dot(Vector2d(0, 1)), 2.0)
def test_add(self):
vec1 = Vector2d(0.1, 0.2)
vec2 = Vector2d(1.1, 2.2)
vec3 = vec1
vec3 += vec2
self.assertAlmostEqual(vec1 + vec2, Vector2d(1.2, 2.4))
self.assertAlmostEqual(vec3, Vector2d(1.2, 2.4))
# Add zero
# Scalar right
self.assertEqual(vec1 + 0, vec1)
# Vector left and right
self.assertAlmostEqual(Vector2d.ZERO + vec1, vec1)
self.assertAlmostEqual(vec1 + Vector2d.ZERO, vec1)
# Addition assigment
vec4 = Vector2d(vec1)
vec4 += 0
self.assertEqual(vec4, vec1)
vec4 += Vector2d.ZERO
self.assertAlmostEqual(vec4, vec1)
# Add non-trivial scalar values left and right
self.assertEqual(vec1 + 2.5, Vector2d(2.6, 2.7))
vec1 = vec4
vec4 += 2.5
self.assertEqual(vec4, Vector2d(2.6, 2.7))
def test_sub(self):
vec1 = Vector2d(0.1, 0.2)
vec2 = Vector2d(1.1, 2.2)
vec3 = vec2
vec3 -= vec1
self.assertAlmostEqual(vec2 - vec1, Vector2d(1.0, 2.0))
self.assertAlmostEqual(vec3, Vector2d(1.0, 2.0))
# Subtraction with zeros
# Scalar right
self.assertEqual(vec1 - 0, vec1)
# Vector left and right
self.assertAlmostEqual(Vector2d.ZERO - vec1, -vec1)
self.assertAlmostEqual(vec1 - Vector2d.ZERO, vec1)
# Subtraction assignment
vec4 = Vector2d(vec1)
vec4 -= 0
self.assertEqual(vec4, vec1)
vec4 -= Vector2d.ZERO
self.assertAlmostEqual(vec4, vec1)
# Subtract non-trivial scalar values left and right
self.assertEqual(vec1 - 2.5, -Vector2d(2.4, 2.3))
vec4 = vec1
vec4 -= 2.5
self.assertEqual(vec4, -Vector2d(2.4, 2.3))
def test_multiply(self):
v = Vector2d(0.1, -4.2)
vec2 = v * 2.0
self.assertEqual(vec2, Vector2d(0.2, -8.4))
vec2 *= 4.0
self.assertEqual(vec2, Vector2d(0.8, -33.6))
# Multiply by zero
# Scalar right
self.assertEqual(v * 0, Vector2d.ZERO)
# Element-wise vector multiplication
self.assertEqual(v * Vector2d.ZERO, Vector2d.ZERO)
# Multiply by one
# Scalar right
self.assertEqual(v * 1, v)
# Element-wise vector multiplication
self.assertEqual(v * Vector2d.ONE, v)
# Multiply by non-trivial scalar value
scalar = 2.5
expect = Vector2d(0.25, -10.5)
self.assertEqual(v * scalar, expect)
# Multiply by itself element-wise
v.set(0.1, 0.5)
self.assertAlmostEqual(v * v, Vector2d(0.01, 0.25))
def test_lenght(self):
# Zero vector
self.assertAlmostEqual(Vector2d.ZERO.length(), 0.0)
self.assertAlmostEqual(Vector2d.ZERO.squared_length(), 0.0)
# One vector
self.assertAlmostEqual(Vector2d.ONE.length(),
math.sqrt(2), delta=1e-10)
self.assertAlmostEqual(Vector2d.ONE.squared_length(), 2.0)
# Arbitrary vector
v = Vector2d(0.1, -4.2)
self.assertAlmostEqual(v.length(), 4.20119030752, delta=1e-10)
self.assertAlmostEqual(v.squared_length(), 17.65)
# Integer vector
v = Vector2d(3, 4)
self.assertAlmostEqual(v.length(), 5)
self.assertAlmostEqual(v.squared_length(), 25)
def test_nan(self):
nanVec = Vector2d.NAN
self.assertFalse(nanVec.is_finite())
self.assertTrue(math.isnan(nanVec.x()))
self.assertTrue(math.isnan(nanVec.y()))
nanVec.correct()
self.assertEqual(Vector2d.ZERO, nanVec)
self.assertTrue(nanVec.is_finite())
nanVecF = Vector2f.NAN
self.assertFalse(nanVecF.is_finite())
self.assertTrue(math.isnan(nanVecF.x()))
self.assertTrue(math.isnan(nanVecF.y()))
nanVecF.correct()
self.assertEqual(Vector2f.ZERO, nanVecF)
self.assertTrue(nanVecF.is_finite())
if __name__ == '__main__':
unittest.main()
| # Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import math
from ignition.math import Vector2d
from ignition.math import Vector2f
class TestVector2(unittest.TestCase):
def test_construction(self):
v = Vector2d()
self.assertAlmostEqual(0.0, v.x())
self.assertAlmostEqual(0.0, v.y())
vec = Vector2d(1, 0)
self.assertEqual(vec.x(), 1)
self.assertEqual(vec.y(), 0)
vec2 = Vector2d(vec)
self.assertEqual(vec2, vec)
# Copy
vec3 = vec
self.assertEqual(vec3, vec)
# Inequality
vec4 = Vector2d()
self.assertNotEqual(vec, vec4)
def test_vector2(self):
v = Vector2d(1, 2)
# Distance
self.assertAlmostEqual(2.236, v.distance(Vector2d()), delta=1e-2)
# Normalize
v.normalize()
self.assertTrue(v.equal(Vector2d(0.447214, 0.894427), 1e-4))
# Set
v.set(4, 5)
self.assertTrue(v.equal(Vector2d(4, 5), 1e-4))
# Abs
v.set(-1, -2)
self.assertTrue(v.abs().equal(Vector2d(1, 2), 1e-4))
# _eq_
v = Vector2d(6, 7)
self.assertTrue(v.equal(Vector2d(6, 7), 1e-4))
# _add_
v = v + Vector2d(1, 2)
self.assertTrue(v.equal(Vector2d(7, 9), 1e-4))
v += Vector2d(5, 6)
self.assertTrue(v.equal(Vector2d(12, 15), 1e-4))
# __sub__
v = v - Vector2d(2, 4)
self.assertTrue(v.equal(Vector2d(10, 11), 1e-4))
v.set(2, 4)
v -= Vector2d(1, 6)
self.assertTrue(v.equal(Vector2d(1, -2), 1e-4))
# __truediv__
v.set(10, 6)
v = v / Vector2d(2, 3)
self.assertTrue(v.equal(Vector2d(5, 2), 1e-4))
v.set(10, 6)
v /= Vector2d(2, 3)
self.assertTrue(v.equal(Vector2d(5, 2), 1e-4))
# __truediv__ int
v.set(10, 6)
v = v / 2
self.assertTrue(v.equal(Vector2d(5, 3), 1e-4))
v.set(10, 6)
v /= 2
self.assertTrue(v.equal(Vector2d(5, 3), 1e-4))
# __mul__
v.set(10, 6)
v = v * Vector2d(2, 4)
self.assertTrue(v.equal(Vector2d(20, 24), 1e-4))
v.set(10, 6)
v *= Vector2d(2, 4)
self.assertTrue(v.equal(Vector2d(20, 24), 1e-4))
# __mul__ int
v.set(10, 6)
v = v * 2
self.assertTrue(v.equal(Vector2d(20, 12), 1e-4))
v.set(10, 6)
v *= 2
self.assertTrue(v.equal(Vector2d(20, 12), 1e-4))
# is_finite
self.assertTrue(v.is_finite())
def test_max(self):
vec1 = Vector2d(0.1, 0.2)
vec2 = Vector2d(0.3, 0.5)
vec3 = Vector2d(0.4, 0.2)
self.assertAlmostEqual(vec1.max(), 0.2)
self.assertAlmostEqual(vec3.max(), 0.4)
vec1.max(vec2)
self.assertAlmostEqual(vec1, Vector2d(0.3, 0.5))
vec1.max(vec3)
self.assertAlmostEqual(vec1, Vector2d(0.4, 0.5))
def test_min(self):
vec1 = Vector2d(0.3, 0.5)
vec2 = Vector2d(0.1, 0.2)
vec3 = Vector2d(0.05, 0.1)
self.assertAlmostEqual(vec1.min(), 0.3)
self.assertAlmostEqual(vec3.min(), 0.05)
vec1.min(vec2)
self.assertAlmostEqual(vec1, Vector2d(0.1, 0.2))
vec1.min(vec3)
self.assertAlmostEqual(vec1, Vector2d(0.05, 0.1))
def test_equal_tolerance(self):
# Test Equal function with specified tolerance
self.assertFalse(Vector2d.ZERO.equal(Vector2d.ONE, 1e-6))
self.assertFalse(Vector2d.ZERO.equal(Vector2d.ONE, 1e-3))
self.assertFalse(Vector2d.ZERO.equal(Vector2d.ONE, 1e-1))
self.assertTrue(Vector2d.ZERO.equal(Vector2d.ONE, 1))
self.assertTrue(Vector2d.ZERO.equal(Vector2d.ONE, 1.1))
def test_dot(self):
v = Vector2d(1, 2)
self.assertAlmostEqual(v.dot(Vector2d(3, 4)), 11.0)
self.assertAlmostEqual(v.dot(Vector2d(0, 0)), 0.0)
self.assertAlmostEqual(v.dot(Vector2d(1, 0)), 1.0)
self.assertAlmostEqual(v.dot(Vector2d(0, 1)), 2.0)
def test_correct(self):
vec1 = Vector2d(0, float("nan"))
vec2 = Vector2d(float("inf"), -1)
vec3 = Vector2d(10, -2)
vec1.correct()
vec2.correct()
vec3.correct()
self.assertAlmostEqual(vec1, Vector2d(0, 0))
self.assertAlmostEqual(vec2, Vector2d(0, -1))
self.assertAlmostEqual(vec3, Vector2d(10, -2))
def test_abs_dot(self):
v = Vector2d(1, -2)
self.assertAlmostEqual(v.abs_dot(Vector2d(3, 4)), 11.0)
self.assertAlmostEqual(v.abs_dot(Vector2d(0, 0)), 0.0)
self.assertAlmostEqual(v.abs_dot(Vector2d(1, 0)), 1.0)
self.assertAlmostEqual(v.abs_dot(Vector2d(0, 1)), 2.0)
def test_add(self):
vec1 = Vector2d(0.1, 0.2)
vec2 = Vector2d(1.1, 2.2)
vec3 = vec1
vec3 += vec2
self.assertAlmostEqual(vec1 + vec2, Vector2d(1.2, 2.4))
self.assertAlmostEqual(vec3, Vector2d(1.2, 2.4))
# Add zero
# Scalar right
self.assertEqual(vec1 + 0, vec1)
# Vector left and right
self.assertAlmostEqual(Vector2d.ZERO + vec1, vec1)
self.assertAlmostEqual(vec1 + Vector2d.ZERO, vec1)
# Addition assigment
vec4 = Vector2d(vec1)
vec4 += 0
self.assertEqual(vec4, vec1)
vec4 += Vector2d.ZERO
self.assertAlmostEqual(vec4, vec1)
# Add non-trivial scalar values left and right
self.assertEqual(vec1 + 2.5, Vector2d(2.6, 2.7))
vec1 = vec4
vec4 += 2.5
self.assertEqual(vec4, Vector2d(2.6, 2.7))
def test_sub(self):
vec1 = Vector2d(0.1, 0.2)
vec2 = Vector2d(1.1, 2.2)
vec3 = vec2
vec3 -= vec1
self.assertAlmostEqual(vec2 - vec1, Vector2d(1.0, 2.0))
self.assertAlmostEqual(vec3, Vector2d(1.0, 2.0))
# Subtraction with zeros
# Scalar right
self.assertEqual(vec1 - 0, vec1)
# Vector left and right
self.assertAlmostEqual(Vector2d.ZERO - vec1, -vec1)
self.assertAlmostEqual(vec1 - Vector2d.ZERO, vec1)
# Subtraction assignment
vec4 = Vector2d(vec1)
vec4 -= 0
self.assertEqual(vec4, vec1)
vec4 -= Vector2d.ZERO
self.assertAlmostEqual(vec4, vec1)
# Subtract non-trivial scalar values left and right
self.assertEqual(vec1 - 2.5, -Vector2d(2.4, 2.3))
vec4 = vec1
vec4 -= 2.5
self.assertEqual(vec4, -Vector2d(2.4, 2.3))
def test_multiply(self):
v = Vector2d(0.1, -4.2)
vec2 = v * 2.0
self.assertEqual(vec2, Vector2d(0.2, -8.4))
vec2 *= 4.0
self.assertEqual(vec2, Vector2d(0.8, -33.6))
# Multiply by zero
# Scalar right
self.assertEqual(v * 0, Vector2d.ZERO)
# Element-wise vector multiplication
self.assertEqual(v * Vector2d.ZERO, Vector2d.ZERO)
# Multiply by one
# Scalar right
self.assertEqual(v * 1, v)
# Element-wise vector multiplication
self.assertEqual(v * Vector2d.ONE, v)
# Multiply by non-trivial scalar value
scalar = 2.5
expect = Vector2d(0.25, -10.5)
self.assertEqual(v * scalar, expect)
# Multiply by itself element-wise
v.set(0.1, 0.5)
self.assertAlmostEqual(v * v, Vector2d(0.01, 0.25))
def test_lenght(self):
# Zero vector
self.assertAlmostEqual(Vector2d.ZERO.length(), 0.0)
self.assertAlmostEqual(Vector2d.ZERO.squared_length(), 0.0)
# One vector
self.assertAlmostEqual(Vector2d.ONE.length(),
math.sqrt(2), delta=1e-10)
self.assertAlmostEqual(Vector2d.ONE.squared_length(), 2.0)
# Arbitrary vector
v = Vector2d(0.1, -4.2)
self.assertAlmostEqual(v.length(), 4.20119030752, delta=1e-10)
self.assertAlmostEqual(v.squared_length(), 17.65)
# Integer vector
v = Vector2d(3, 4)
self.assertAlmostEqual(v.length(), 5)
self.assertAlmostEqual(v.squared_length(), 25)
def test_nan(self):
nanVec = Vector2d.NAN
self.assertFalse(nanVec.is_finite())
self.assertTrue(math.isnan(nanVec.x()))
self.assertTrue(math.isnan(nanVec.y()))
nanVec.correct()
self.assertEqual(Vector2d.ZERO, nanVec)
self.assertTrue(nanVec.is_finite())
nanVecF = Vector2f.NAN
self.assertFalse(nanVecF.is_finite())
self.assertTrue(math.isnan(nanVecF.x()))
self.assertTrue(math.isnan(nanVecF.y()))
nanVecF.correct()
self.assertEqual(Vector2f.ZERO, nanVecF)
self.assertTrue(nanVecF.is_finite())
if __name__ == '__main__':
unittest.main()
| en | 0.730648 | # Copyright (C) 2021 Open Source Robotics Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copy # Inequality # Distance # Normalize # Set # Abs # _eq_ # _add_ # __sub__ # __truediv__ # __truediv__ int # __mul__ # __mul__ int # is_finite # Test Equal function with specified tolerance # Add zero # Scalar right # Vector left and right # Addition assigment # Add non-trivial scalar values left and right # Subtraction with zeros # Scalar right # Vector left and right # Subtraction assignment # Subtract non-trivial scalar values left and right # Multiply by zero # Scalar right # Element-wise vector multiplication # Multiply by one # Scalar right # Element-wise vector multiplication # Multiply by non-trivial scalar value # Multiply by itself element-wise # Zero vector # One vector # Arbitrary vector # Integer vector | 2.647163 | 3 |
fsspec/tests/test_mapping.py | sodre/filesystem_spec | 0 | 9022 | <gh_stars>0
import os
import fsspec
from fsspec.implementations.memory import MemoryFileSystem
import pickle
import pytest
def test_mapping_prefix(tmpdir):
tmpdir = str(tmpdir)
os.makedirs(os.path.join(tmpdir, "afolder"))
open(os.path.join(tmpdir, "afile"), "w").write("test")
open(os.path.join(tmpdir, "afolder", "anotherfile"), "w").write("test2")
m = fsspec.get_mapper("file://" + tmpdir)
assert "afile" in m
assert m["afolder/anotherfile"] == b"test2"
fs = fsspec.filesystem("file")
m2 = fs.get_mapper(tmpdir)
m3 = fs.get_mapper("file://" + tmpdir)
assert m == m2 == m3
def test_ops():
MemoryFileSystem.store.clear()
m = fsspec.get_mapper("memory://")
assert not m
assert list(m) == []
with pytest.raises(KeyError):
m["hi"]
assert m.pop("key", 0) == 0
m["key0"] = b"data"
assert list(m) == ["key0"]
assert m["key0"] == b"data"
m.clear()
assert list(m) == []
def test_pickle():
m = fsspec.get_mapper("memory://")
assert isinstance(m.fs, MemoryFileSystem)
m["key"] = b"data"
m2 = pickle.loads(pickle.dumps(m))
assert list(m) == list(m2)
def test_keys_view():
# https://github.com/intake/filesystem_spec/issues/186
m = fsspec.get_mapper("memory://")
m["key"] = b"data"
keys = m.keys()
assert len(keys) == 1
# check that we don't consume the keys
assert len(keys) == 1
| import os
import fsspec
from fsspec.implementations.memory import MemoryFileSystem
import pickle
import pytest
def test_mapping_prefix(tmpdir):
tmpdir = str(tmpdir)
os.makedirs(os.path.join(tmpdir, "afolder"))
open(os.path.join(tmpdir, "afile"), "w").write("test")
open(os.path.join(tmpdir, "afolder", "anotherfile"), "w").write("test2")
m = fsspec.get_mapper("file://" + tmpdir)
assert "afile" in m
assert m["afolder/anotherfile"] == b"test2"
fs = fsspec.filesystem("file")
m2 = fs.get_mapper(tmpdir)
m3 = fs.get_mapper("file://" + tmpdir)
assert m == m2 == m3
def test_ops():
MemoryFileSystem.store.clear()
m = fsspec.get_mapper("memory://")
assert not m
assert list(m) == []
with pytest.raises(KeyError):
m["hi"]
assert m.pop("key", 0) == 0
m["key0"] = b"data"
assert list(m) == ["key0"]
assert m["key0"] == b"data"
m.clear()
assert list(m) == []
def test_pickle():
m = fsspec.get_mapper("memory://")
assert isinstance(m.fs, MemoryFileSystem)
m["key"] = b"data"
m2 = pickle.loads(pickle.dumps(m))
assert list(m) == list(m2)
def test_keys_view():
# https://github.com/intake/filesystem_spec/issues/186
m = fsspec.get_mapper("memory://")
m["key"] = b"data"
keys = m.keys()
assert len(keys) == 1
# check that we don't consume the keys
assert len(keys) == 1 | en | 0.871978 | # https://github.com/intake/filesystem_spec/issues/186 # check that we don't consume the keys | 2.430238 | 2 |
testedome/questions/quest_5.py | EderReisS/pythonChallenges | 0 | 9023 | """
A
/ |
B C
'B, C'
"""
class CategoryTree:
def __init__(self):
self.root = {}
self.all_categories = []
def add_category(self, category, parent):
if category in self.all_categories:
raise KeyError(f"{category} exists")
if parent is None:
self.root[category] = set()
if parent:
if parent not in self.root:
raise KeyError(f"{parent} invalid")
self.root[category] = set()
self.root[parent].add(category)
self.all_categories.append(category)
def get_children(self, parent):
if parent and parent not in self.root:
raise KeyError(f"{parent} invalid")
return list(self.root[parent])
if __name__ == "__main__":
c = CategoryTree()
c.add_category('A', None)
c.add_category('B', 'A')
c.add_category('C', 'A')
print(','.join(c.get_children('A') or []))
print(','.join(c.get_children('E') or []))
| """
A
/ |
B C
'B, C'
"""
class CategoryTree:
def __init__(self):
self.root = {}
self.all_categories = []
def add_category(self, category, parent):
if category in self.all_categories:
raise KeyError(f"{category} exists")
if parent is None:
self.root[category] = set()
if parent:
if parent not in self.root:
raise KeyError(f"{parent} invalid")
self.root[category] = set()
self.root[parent].add(category)
self.all_categories.append(category)
def get_children(self, parent):
if parent and parent not in self.root:
raise KeyError(f"{parent} invalid")
return list(self.root[parent])
if __name__ == "__main__":
c = CategoryTree()
c.add_category('A', None)
c.add_category('B', 'A')
c.add_category('C', 'A')
print(','.join(c.get_children('A') or []))
print(','.join(c.get_children('E') or []))
| en | 0.671334 | A / | B C 'B, C' | 3.663921 | 4 |
sppas/sppas/src/anndata/aio/__init__.py | mirfan899/MTTS | 0 | 9024 | <reponame>mirfan899/MTTS
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
anndata.aio
~~~~~~~~~~~
Readers and writers of annotated data.
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
"""
from .annotationpro import sppasANT
from .annotationpro import sppasANTX
from .anvil import sppasAnvil
from .audacity import sppasAudacity
from .elan import sppasEAF
from .htk import sppasLab
from .phonedit import sppasMRK
from .phonedit import sppasSignaix
from .praat import sppasTextGrid
from .praat import sppasIntensityTier
from .praat import sppasPitchTier
from .sclite import sppasCTM
from .sclite import sppasSTM
from .subtitle import sppasSubRip
from .subtitle import sppasSubViewer
from .text import sppasRawText
from .text import sppasCSV
from .weka import sppasARFF
from .weka import sppasXRFF
from .xtrans import sppasTDF
from .xra import sppasXRA
# ----------------------------------------------------------------------------
# Variables
# ----------------------------------------------------------------------------
# TODO: get extension from the "default_extension" member of each class
ext_sppas = ['.xra', '.[Xx][Rr][Aa]']
ext_praat = ['.TextGrid', '.PitchTier', '.[Tt][eE][xX][tT][Gg][Rr][Ii][dD]','.[Pp][Ii][tT][cC][hH][Tt][Ii][Ee][rR]']
ext_transcriber = ['.trs','.[tT][rR][sS]']
ext_elan = ['.eaf', '[eE][aA][fF]']
ext_ascii = ['.txt', '.csv', '.[cC][sS][vV]', '.[tT][xX][Tt]', '.info']
ext_phonedit = ['.mrk', '.[mM][rR][kK]']
ext_signaix = ['.hz', '.[Hh][zZ]']
ext_sclite = ['.stm', '.ctm', '.[sScC][tT][mM]']
ext_htk = ['.lab', '.mlf']
ext_subtitles = ['.sub', '.srt', '.[sS][uU][bB]', '.[sS][rR][tT]']
ext_anvil = ['.anvil', '.[aA][aN][vV][iI][lL]']
ext_annotationpro = ['.antx', '.[aA][aN][tT][xX]']
ext_xtrans = ['.tdf', '.[tT][dD][fF]']
ext_audacity = ['.aup']
ext_weka = ['.arff', '.xrff']
primary_in = ['.hz', '.PitchTier']
annotations_in = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.txt', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', '.antx', '.anvil', '.aup', '.trs', '.tdf']
extensions = ['.xra', '.textgrid', '.pitchtier', '.hz', '.eaf', '.trs', '.csv', '.mrk', '.txt', '.mrk', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', 'anvil', '.antx', '.tdf', '.arff', '.xrff']
extensionsul = ext_sppas + ext_praat + ext_transcriber + ext_elan + ext_ascii + ext_phonedit + ext_signaix + ext_sclite + ext_htk + ext_subtitles + ext_anvil + ext_annotationpro + ext_xtrans + ext_audacity + ext_weka
extensions_in = primary_in + annotations_in
extensions_out = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.txt', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', '.antx', '.arff', '.xrff']
extensions_out_multitiers = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.antx', '.arff', '.xrff']
# ----------------------------------------------------------------------------
__all__ = (
"sppasANT",
"sppasANTX",
"sppasAnvil",
"sppasAudacity",
"sppasEAF",
"sppasLab",
"sppasMRK",
"sppasSignaix",
"sppasTextGrid",
"sppasIntensityTier",
"sppasPitchTier",
"sppasCTM",
"sppasSTM",
"sppasSubRip",
"sppasSubViewer",
"sppasRawText",
"sppasCSV",
"sppasARFF",
"sppasXRFF",
"sppasTDF",
"sppasXRA",
"extensions",
"extensions_in",
"extensions_out"
)
| # -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
anndata.aio
~~~~~~~~~~~
Readers and writers of annotated data.
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
"""
from .annotationpro import sppasANT
from .annotationpro import sppasANTX
from .anvil import sppasAnvil
from .audacity import sppasAudacity
from .elan import sppasEAF
from .htk import sppasLab
from .phonedit import sppasMRK
from .phonedit import sppasSignaix
from .praat import sppasTextGrid
from .praat import sppasIntensityTier
from .praat import sppasPitchTier
from .sclite import sppasCTM
from .sclite import sppasSTM
from .subtitle import sppasSubRip
from .subtitle import sppasSubViewer
from .text import sppasRawText
from .text import sppasCSV
from .weka import sppasARFF
from .weka import sppasXRFF
from .xtrans import sppasTDF
from .xra import sppasXRA
# ----------------------------------------------------------------------------
# Variables
# ----------------------------------------------------------------------------
# TODO: get extension from the "default_extension" member of each class
ext_sppas = ['.xra', '.[Xx][Rr][Aa]']
ext_praat = ['.TextGrid', '.PitchTier', '.[Tt][eE][xX][tT][Gg][Rr][Ii][dD]','.[Pp][Ii][tT][cC][hH][Tt][Ii][Ee][rR]']
ext_transcriber = ['.trs','.[tT][rR][sS]']
ext_elan = ['.eaf', '[eE][aA][fF]']
ext_ascii = ['.txt', '.csv', '.[cC][sS][vV]', '.[tT][xX][Tt]', '.info']
ext_phonedit = ['.mrk', '.[mM][rR][kK]']
ext_signaix = ['.hz', '.[Hh][zZ]']
ext_sclite = ['.stm', '.ctm', '.[sScC][tT][mM]']
ext_htk = ['.lab', '.mlf']
ext_subtitles = ['.sub', '.srt', '.[sS][uU][bB]', '.[sS][rR][tT]']
ext_anvil = ['.anvil', '.[aA][aN][vV][iI][lL]']
ext_annotationpro = ['.antx', '.[aA][aN][tT][xX]']
ext_xtrans = ['.tdf', '.[tT][dD][fF]']
ext_audacity = ['.aup']
ext_weka = ['.arff', '.xrff']
primary_in = ['.hz', '.PitchTier']
annotations_in = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.txt', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', '.antx', '.anvil', '.aup', '.trs', '.tdf']
extensions = ['.xra', '.textgrid', '.pitchtier', '.hz', '.eaf', '.trs', '.csv', '.mrk', '.txt', '.mrk', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', 'anvil', '.antx', '.tdf', '.arff', '.xrff']
extensionsul = ext_sppas + ext_praat + ext_transcriber + ext_elan + ext_ascii + ext_phonedit + ext_signaix + ext_sclite + ext_htk + ext_subtitles + ext_anvil + ext_annotationpro + ext_xtrans + ext_audacity + ext_weka
extensions_in = primary_in + annotations_in
extensions_out = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.txt', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', '.antx', '.arff', '.xrff']
extensions_out_multitiers = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.antx', '.arff', '.xrff']
# ----------------------------------------------------------------------------
__all__ = (
"sppasANT",
"sppasANTX",
"sppasAnvil",
"sppasAudacity",
"sppasEAF",
"sppasLab",
"sppasMRK",
"sppasSignaix",
"sppasTextGrid",
"sppasIntensityTier",
"sppasPitchTier",
"sppasCTM",
"sppasSTM",
"sppasSubRip",
"sppasSubViewer",
"sppasRawText",
"sppasCSV",
"sppasARFF",
"sppasXRFF",
"sppasTDF",
"sppasXRA",
"extensions",
"extensions_in",
"extensions_out"
) | en | 0.656041 | # -*- coding: UTF-8 -*- .. --------------------------------------------------------------------- ___ __ __ __ ___ / | \ | \ | \ / the automatic \__ |__/ |__/ |___| \__ annotation and \ | | | | \ analysis ___/ | | | | ___/ of speech http://www.sppas.org/ Use of this software is governed by the GNU Public License, version 3. SPPAS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SPPAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SPPAS. If not, see <http://www.gnu.org/licenses/>. This banner notice must not be removed. --------------------------------------------------------------------- anndata.aio ~~~~~~~~~~~ Readers and writers of annotated data. :author: <NAME> :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: <EMAIL> :license: GPL, v3 :copyright: Copyright (C) 2011-2018 <NAME> # ---------------------------------------------------------------------------- # Variables # ---------------------------------------------------------------------------- # TODO: get extension from the "default_extension" member of each class # ---------------------------------------------------------------------------- | 1.307879 | 1 |
models/__init__.py | dapengchen123/hfsoftmax | 1 | 9025 | from .resnet import *
from .hynet import *
from .classifier import Classifier, HFClassifier, HNSWClassifier
from .ext_layers import ParameterClient
samplerClassifier = {
'hf': HFClassifier,
'hnsw': HNSWClassifier,
}
| from .resnet import *
from .hynet import *
from .classifier import Classifier, HFClassifier, HNSWClassifier
from .ext_layers import ParameterClient
samplerClassifier = {
'hf': HFClassifier,
'hnsw': HNSWClassifier,
}
| none | 1 | 1.36135 | 1 |
|
scripts/multiplayer/server.py | AgnirudraSil/tetris | 3 | 9026 | import pickle
import socket
import _thread
from scripts.multiplayer import game, board, tetriminos
server = "192.168.29.144"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
print(e)
s.listen()
print("Waiting for connection")
connected = set()
games = {}
idCount = 0
def threaded_client(conn, p, gameId):
global idCount
conn.send(str.encode(str(p)))
reply = ""
while True:
try:
data = conn.recv(4096).decode()
if gameId in games:
game = games[gameId]
if not data:
break
else:
game.update(p, data)
reply = game
conn.sendall(pickle.dumps(reply))
else:
break
except:
break
print("Lost Connection!")
try:
del games[gameId]
print("Closing Game", gameId)
except:
pass
idCount -= 1
conn.close()
while True:
conn, addr = s.accept()
print("Connected to: ", addr)
idCount += 1
p = 0
game_id = (idCount - 1) // 2
if idCount % 2 == 1:
games[game_id] = game.Game((0, 0, 0), None, board)
else:
games[game_id].ready = True
p = 1
_thread.start_new_thread(threaded_client, (conn, p, game_id))
| import pickle
import socket
import _thread
from scripts.multiplayer import game, board, tetriminos
server = "192.168.29.144"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
print(e)
s.listen()
print("Waiting for connection")
connected = set()
games = {}
idCount = 0
def threaded_client(conn, p, gameId):
global idCount
conn.send(str.encode(str(p)))
reply = ""
while True:
try:
data = conn.recv(4096).decode()
if gameId in games:
game = games[gameId]
if not data:
break
else:
game.update(p, data)
reply = game
conn.sendall(pickle.dumps(reply))
else:
break
except:
break
print("Lost Connection!")
try:
del games[gameId]
print("Closing Game", gameId)
except:
pass
idCount -= 1
conn.close()
while True:
conn, addr = s.accept()
print("Connected to: ", addr)
idCount += 1
p = 0
game_id = (idCount - 1) // 2
if idCount % 2 == 1:
games[game_id] = game.Game((0, 0, 0), None, board)
else:
games[game_id].ready = True
p = 1
_thread.start_new_thread(threaded_client, (conn, p, game_id))
| none | 1 | 2.769664 | 3 |
|
solutions/6-sum-suqare-difference.py | smaranjitghose/PyProjectEuler | 1 | 9027 | <filename>solutions/6-sum-suqare-difference.py
def sum_of_squares(n):
return sum(i ** 2 for i in range(1, n+1))
def square_of_sum(n):
return sum(range(1, n+1)) ** 2
| <filename>solutions/6-sum-suqare-difference.py
def sum_of_squares(n):
return sum(i ** 2 for i in range(1, n+1))
def square_of_sum(n):
return sum(range(1, n+1)) ** 2
| none | 1 | 3.638595 | 4 |
|
AdventOfCode/2018/src/day-03/app.py | AustinTSchaffer/DailyProgrammer | 1 | 9028 | <reponame>AustinTSchaffer/DailyProgrammer
import os
import re
from collections import defaultdict
class Claim(object):
def __init__(self, data_row):
match = re.match(r'#(\d+) @ (\d+),(\d+): (\d+)x(\d+)', data_row)
self.id = int(match[1])
self.x = int(match[2])
self.y = int(match[3])
self.width = int(match[4])
self.height = int(match[5])
def all_locations(self):
for x in range(self.width):
for y in range(self.height):
yield (self.x + x, self.y + y)
CURRENT_DIR, _ = os.path.split(__file__)
DATA_FLIE = os.path.join(CURRENT_DIR, 'data.txt')
def data_file_iter(data_file) -> Claim:
with open(data_file, 'r') as data:
for claim in data:
claim = claim.strip()
if (claim):
yield Claim(claim)
def part1(claims):
"""
This is basically a single-threaded collision detection method,
implemented in pure python. Computation complexity is obviously
not a consideration.
"""
# Determines how many times each locations was claimed
claimed_space_registry = defaultdict(int)
for claim in claims:
for location in claim.all_locations():
claimed_space_registry[location] += 1
# Generates the set of all locations that were claimed more than once
multi_claimed_spaces = {
location
for location,count in claimed_space_registry.items()
if count > 1
}
# Prints the number of locations that are claimed more than once
# and returns the set of locations that were claimed more than once
print('Multi-Claimed Spaces:', len(multi_claimed_spaces))
return multi_claimed_spaces
def part2(claims, multi_claimed_spaces):
"""
Might not be the optimal solution, but it runs fast enough, and uses
components that were already calculated in part 1.
"""
for claim in claims:
all_locations_are_non_overlapping = all(map(
lambda loc: loc not in multi_claimed_spaces,
claim.all_locations()
))
if all_locations_are_non_overlapping:
print('Non-overlapping claim:', claim.id)
return claim
if __name__ == '__main__':
claims = list(data_file_iter(DATA_FLIE))
mcs = part1(claims)
santas_suit_material = part2(claims, mcs)
| import os
import re
from collections import defaultdict
class Claim(object):
def __init__(self, data_row):
match = re.match(r'#(\d+) @ (\d+),(\d+): (\d+)x(\d+)', data_row)
self.id = int(match[1])
self.x = int(match[2])
self.y = int(match[3])
self.width = int(match[4])
self.height = int(match[5])
def all_locations(self):
for x in range(self.width):
for y in range(self.height):
yield (self.x + x, self.y + y)
CURRENT_DIR, _ = os.path.split(__file__)
DATA_FLIE = os.path.join(CURRENT_DIR, 'data.txt')
def data_file_iter(data_file) -> Claim:
with open(data_file, 'r') as data:
for claim in data:
claim = claim.strip()
if (claim):
yield Claim(claim)
def part1(claims):
"""
This is basically a single-threaded collision detection method,
implemented in pure python. Computation complexity is obviously
not a consideration.
"""
# Determines how many times each locations was claimed
claimed_space_registry = defaultdict(int)
for claim in claims:
for location in claim.all_locations():
claimed_space_registry[location] += 1
# Generates the set of all locations that were claimed more than once
multi_claimed_spaces = {
location
for location,count in claimed_space_registry.items()
if count > 1
}
# Prints the number of locations that are claimed more than once
# and returns the set of locations that were claimed more than once
print('Multi-Claimed Spaces:', len(multi_claimed_spaces))
return multi_claimed_spaces
def part2(claims, multi_claimed_spaces):
"""
Might not be the optimal solution, but it runs fast enough, and uses
components that were already calculated in part 1.
"""
for claim in claims:
all_locations_are_non_overlapping = all(map(
lambda loc: loc not in multi_claimed_spaces,
claim.all_locations()
))
if all_locations_are_non_overlapping:
print('Non-overlapping claim:', claim.id)
return claim
if __name__ == '__main__':
claims = list(data_file_iter(DATA_FLIE))
mcs = part1(claims)
santas_suit_material = part2(claims, mcs) | en | 0.982544 | This is basically a single-threaded collision detection method, implemented in pure python. Computation complexity is obviously not a consideration. # Determines how many times each locations was claimed # Generates the set of all locations that were claimed more than once # Prints the number of locations that are claimed more than once # and returns the set of locations that were claimed more than once Might not be the optimal solution, but it runs fast enough, and uses components that were already calculated in part 1. | 3.140856 | 3 |
facerec-master/py/facerec/distance.py | ArianeFire/HaniCam | 776 | 9029 | <reponame>ArianeFire/HaniCam<filename>facerec-master/py/facerec/distance.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import numpy as np
class AbstractDistance(object):
def __init__(self, name):
self._name = name
def __call__(self,p,q):
raise NotImplementedError("Every AbstractDistance must implement the __call__ method.")
@property
def name(self):
return self._name
def __repr__(self):
return self._name
class EuclideanDistance(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self,"EuclideanDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return np.sqrt(np.sum(np.power((p-q),2)))
class CosineDistance(AbstractDistance):
"""
Negated Mahalanobis Cosine Distance.
Literature:
"Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
"""
def __init__(self):
AbstractDistance.__init__(self,"CosineDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return -np.dot(p.T,q) / (np.sqrt(np.dot(p,p.T)*np.dot(q,q.T)))
class NormalizedCorrelation(AbstractDistance):
"""
Calculates the NormalizedCorrelation Coefficient for two vectors.
Literature:
"Multi-scale Local Binary Pattern Histogram for Face Recognition". PhD (2008). Chi Ho Chan, University Of Surrey.
"""
def __init__(self):
AbstractDistance.__init__(self,"NormalizedCorrelation")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
pmu = p.mean()
qmu = q.mean()
pm = p - pmu
qm = q - qmu
return 1.0 - (np.dot(pm, qm) / (np.sqrt(np.dot(pm, pm)) * np.sqrt(np.dot(qm, qm))))
class ChiSquareDistance(AbstractDistance):
"""
Negated Mahalanobis Cosine Distance.
Literature:
"Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
"""
def __init__(self):
AbstractDistance.__init__(self,"ChiSquareDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
bin_dists = (p-q)**2 / (p+q+np.finfo('float').eps)
return np.sum(bin_dists)
class HistogramIntersection(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self,"HistogramIntersection")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return np.sum(np.minimum(p,q))
class BinRatioDistance(AbstractDistance):
"""
Calculates the Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q))/((p+q)**2+np.finfo('float').eps)
return np.abs(np.sum(b))
class L1BinRatioDistance(AbstractDistance):
"""
Calculates the L1-Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"L1-BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p, dtype=np.float).flatten()
q = np.asarray(q, dtype=np.float).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q)) * abs(p-q) / ((p+q)**2+np.finfo('float').eps)
return np.abs(np.sum(b))
class ChiSquareBRD(AbstractDistance):
"""
Calculates the ChiSquare-Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"ChiSquare-BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p, dtype=np.float).flatten()
q = np.asarray(q, dtype=np.float).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q)) * (p-q)**2 / ((p+q)**3+np.finfo('float').eps)
return np.abs(np.sum(b))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import numpy as np
class AbstractDistance(object):
def __init__(self, name):
self._name = name
def __call__(self,p,q):
raise NotImplementedError("Every AbstractDistance must implement the __call__ method.")
@property
def name(self):
return self._name
def __repr__(self):
return self._name
class EuclideanDistance(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self,"EuclideanDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return np.sqrt(np.sum(np.power((p-q),2)))
class CosineDistance(AbstractDistance):
"""
Negated Mahalanobis Cosine Distance.
Literature:
"Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
"""
def __init__(self):
AbstractDistance.__init__(self,"CosineDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return -np.dot(p.T,q) / (np.sqrt(np.dot(p,p.T)*np.dot(q,q.T)))
class NormalizedCorrelation(AbstractDistance):
"""
Calculates the NormalizedCorrelation Coefficient for two vectors.
Literature:
"Multi-scale Local Binary Pattern Histogram for Face Recognition". PhD (2008). Chi Ho Chan, University Of Surrey.
"""
def __init__(self):
AbstractDistance.__init__(self,"NormalizedCorrelation")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
pmu = p.mean()
qmu = q.mean()
pm = p - pmu
qm = q - qmu
return 1.0 - (np.dot(pm, qm) / (np.sqrt(np.dot(pm, pm)) * np.sqrt(np.dot(qm, qm))))
class ChiSquareDistance(AbstractDistance):
"""
Negated Mahalanobis Cosine Distance.
Literature:
"Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
"""
def __init__(self):
AbstractDistance.__init__(self,"ChiSquareDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
bin_dists = (p-q)**2 / (p+q+np.finfo('float').eps)
return np.sum(bin_dists)
class HistogramIntersection(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self,"HistogramIntersection")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return np.sum(np.minimum(p,q))
class BinRatioDistance(AbstractDistance):
"""
Calculates the Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q))/((p+q)**2+np.finfo('float').eps)
return np.abs(np.sum(b))
class L1BinRatioDistance(AbstractDistance):
"""
Calculates the L1-Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"L1-BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p, dtype=np.float).flatten()
q = np.asarray(q, dtype=np.float).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q)) * abs(p-q) / ((p+q)**2+np.finfo('float').eps)
return np.abs(np.sum(b))
class ChiSquareBRD(AbstractDistance):
"""
Calculates the ChiSquare-Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"ChiSquare-BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p, dtype=np.float).flatten()
q = np.asarray(q, dtype=np.float).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q)) * (p-q)**2 / ((p+q)**3+np.finfo('float').eps)
return np.abs(np.sum(b)) | en | 0.766589 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) <NAME>. All rights reserved. # Licensed under the BSD license. See LICENSE file in the project root for full license information. Negated Mahalanobis Cosine Distance. Literature: "Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang Calculates the NormalizedCorrelation Coefficient for two vectors. Literature: "Multi-scale Local Binary Pattern Histogram for Face Recognition". PhD (2008). Chi Ho Chan, University Of Surrey. Negated Mahalanobis Cosine Distance. Literature: "Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang Calculates the Bin Ratio Dissimilarity. Literature: "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al. # NumPy needs np.dot instead of * for reducing to tensor Calculates the L1-Bin Ratio Dissimilarity. Literature: "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al. # NumPy needs np.dot instead of * for reducing to tensor Calculates the ChiSquare-Bin Ratio Dissimilarity. Literature: "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al. # NumPy needs np.dot instead of * for reducing to tensor | 3.110069 | 3 |
pgyer_uploader.py | elina8013/android_demo | 666 | 9030 | <filename>pgyer_uploader.py
#!/usr/bin/python
#coding=utf-8
import os
import requests
import time
import re
from datetime import datetime
import urllib2
import json
import mimetypes
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# configuration for pgyer
USER_KEY = "f605b7c7826690f796078e3dd23a60d5"
API_KEY = "<KEY>"
PGYER_UPLOAD_URL = "https://www.pgyer.com/apiv1/app/upload"
repo_path = 'C:/Users/Administrator/.jenkins/workspace/Demo/app'
repo_url = 'https://github.com/r17171709/iite_test'
ipa_path = "C:/Users/Administrator/.jenkins/workspace/Demo/app/build/outputs/apk/app-release.apk"
update_description = "版本更新测试"
def parseUploadResult(jsonResult):
print 'post response: %s' % jsonResult
resultCode = jsonResult['code']
send_Email(jsonResult)
if resultCode != 0:
print "Upload Fail!"
raise Exception("Reason: %s" % jsonResult['message'])
print "Upload Success"
appKey = jsonResult['data']['appKey']
appDownloadPageURL = "https://www.pgyer.com/%s" % appKey
print "appDownloadPage: %s" % appDownloadPageURL
return appDownloadPageURL
def uploadIpaToPgyer(ipaPath, updateDescription):
print "Begin to upload ipa to Pgyer: %s" % ipaPath
headers = {'enctype': 'multipart/form-data'}
payload = {
'uKey': USER_KEY,
'_api_key': API_KEY,
'publishRange': '2', # 直接发布
'isPublishToPublic': '2', # 不发布到广场
'updateDescription': updateDescription # 版本更新描述
}
try_times = 0
while try_times < 5:
try:
print "uploading ... %s" % datetime.now()
ipa_file = {'file': open(ipaPath, 'rb')}
r = requests.post(PGYER_UPLOAD_URL,
headers = headers,
files = ipa_file,
data = payload
)
assert r.status_code == requests.codes.ok
result = r.json()
appDownloadPageURL = parseUploadResult(result)
return appDownloadPageURL
except requests.exceptions.ConnectionError:
print "requests.exceptions.ConnectionError occured!"
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
except Exception as e:
print "Exception occured: %s" % str(e)
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
if try_times >= 5:
raise Exception("Failed to upload ipa to Pgyer, retried 5 times.")
def parseQRCodeImageUrl(appDownloadPageURL):
try_times = 0
while try_times < 3:
try:
response = requests.get(appDownloadPageURL)
regex = '<img src=\"(.*?)\" style='
m = re.search(regex, response.content)
assert m is not None
appQRCodeURL = m.group(1)
print "appQRCodeURL: %s" % appQRCodeURL
return appQRCodeURL
except AssertionError:
try_times += 1
time.sleep(60)
print "Can not locate QRCode image. retry ... %s: %s" % (try_times, datetime.now())
if try_times >= 3:
raise Exception("Failed to locate QRCode image in download page, retried 3 times.")
def saveQRCodeImage(appDownloadPageURL, output_folder):
appQRCodeURL = parseQRCodeImageUrl(appDownloadPageURL)
response = requests.get(appQRCodeURL)
qr_image_file_path = os.path.join(output_folder, 'QRCode.png')
if response.status_code == 200:
with open(qr_image_file_path, 'wb') as f:
f.write(response.content)
print 'Save QRCode image to file: %s' % qr_image_file_path
def main():
appDownloadPageURL = uploadIpaToPgyer(ipa_path, update_description)
try:
output_folder = os.path.dirname(ipa_path)
saveQRCodeImage(appDownloadPageURL, output_folder)
except Exception as e:
print "Exception occured: %s" % str(e)
#获取 最后一次 提交git的信息
def getCommitInfo():
#方法一 使用 python 库 前提是 当前分支 在服务器上存在
# repo = Gittle(repo_path, origin_uri=repo_url)
# commitInfo = repo.commit_info(start=0, end=1)
# lastCommitInfo = commitInfo[0]
#方法二 直接 cd 到 目录下 git log -1 打印commit 信息
os.chdir(repo_path);
lastCommitInfo = run_cmd('git log -1')
return lastCommitInfo
#发送邮件
def send_Email(json_result):
print '*******start to send mail****'
appName = json_result['data']['appName']
appKey = json_result['data']['appKey']
appVersion = json_result['data']['appVersion']
appBuildVersion = json_result['data']['appBuildVersion']
appShortcutUrl = json_result['data']['appShortcutUrl']
#邮件接受者
mail_receiver = ['<EMAIL>']
#根据不同邮箱配置 host,user,和pwd
mail_host = 'smtp.139.com'
mail_port = 465
mail_user = '<EMAIL>'
mail_pwd = '<PASSWORD>'
mail_to = ','.join(mail_receiver)
msg = MIMEMultipart()
environsString = '<p><h3>本次打包相关信息</h3><p>'
# environsString += '<p>ipa 包下载地址 : ' + 'wudizhi' + '<p>'
environsString += '<p>蒲公英安装地址 : ' + 'http://www.pgyer.com/' + str(appShortcutUrl) + '<p><p><p><p>'
# environsString += '<li><a href="itms-services://?action=download-manifest&url=https://ssl.pgyer.com/app/plist/' + str(appKey) + '"></a>点击直接安装</li>'
environsString += '<p><h3>本次git提交相关信息</h3><p>'
#获取git最后一次提交信息
lastCommitInfo = getCommitInfo()
# #提交人
# committer = lastCommitInfo['committer']['raw']
# #提交信息
# description = lastCommitInfo['description']
environsString += '<p>' + '<font color="red">' + lastCommitInfo + '</font>' + '<p>'
# environsString += '<p>Description:' + '<font color="red">' + description + '</font>' + '<p>'
message = environsString
body = MIMEText(message, _subtype='html', _charset='utf-8')
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
msg.attach(body)
msg['To'] = mail_to
msg['from'] = '<EMAIL>'
msg['subject'] = 'Android APP 最新打包文件'
try:
s = smtplib.SMTP()
# 设置为调试模式,就是在会话过程中会有输出信息
s.set_debuglevel(1)
s.connect(mail_host)
s.starttls() # 创建 SSL 安全加密 链接
s.login(mail_user, mail_pwd)
s.sendmail(mail_user, mail_receiver, msg.as_string())
s.close()
print '*******mail send ok****'
except Exception, e:
print e
def run_cmd(cmd):
try:
import subprocess
except ImportError:
_, result_f, error_f = os.popen3(cmd)
else:
process = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result_f, error_f = process.stdout, process.stderr
errors = error_f.read()
if errors: pass
result_str = result_f.read().strip()
if result_f : result_f.close()
if error_f : error_f.close()
return result_str
if __name__ == '__main__':
main()
| <filename>pgyer_uploader.py
#!/usr/bin/python
#coding=utf-8
import os
import requests
import time
import re
from datetime import datetime
import urllib2
import json
import mimetypes
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# configuration for pgyer
USER_KEY = "f605b7c7826690f796078e3dd23a60d5"
API_KEY = "<KEY>"
PGYER_UPLOAD_URL = "https://www.pgyer.com/apiv1/app/upload"
repo_path = 'C:/Users/Administrator/.jenkins/workspace/Demo/app'
repo_url = 'https://github.com/r17171709/iite_test'
ipa_path = "C:/Users/Administrator/.jenkins/workspace/Demo/app/build/outputs/apk/app-release.apk"
update_description = "版本更新测试"
def parseUploadResult(jsonResult):
print 'post response: %s' % jsonResult
resultCode = jsonResult['code']
send_Email(jsonResult)
if resultCode != 0:
print "Upload Fail!"
raise Exception("Reason: %s" % jsonResult['message'])
print "Upload Success"
appKey = jsonResult['data']['appKey']
appDownloadPageURL = "https://www.pgyer.com/%s" % appKey
print "appDownloadPage: %s" % appDownloadPageURL
return appDownloadPageURL
def uploadIpaToPgyer(ipaPath, updateDescription):
print "Begin to upload ipa to Pgyer: %s" % ipaPath
headers = {'enctype': 'multipart/form-data'}
payload = {
'uKey': USER_KEY,
'_api_key': API_KEY,
'publishRange': '2', # 直接发布
'isPublishToPublic': '2', # 不发布到广场
'updateDescription': updateDescription # 版本更新描述
}
try_times = 0
while try_times < 5:
try:
print "uploading ... %s" % datetime.now()
ipa_file = {'file': open(ipaPath, 'rb')}
r = requests.post(PGYER_UPLOAD_URL,
headers = headers,
files = ipa_file,
data = payload
)
assert r.status_code == requests.codes.ok
result = r.json()
appDownloadPageURL = parseUploadResult(result)
return appDownloadPageURL
except requests.exceptions.ConnectionError:
print "requests.exceptions.ConnectionError occured!"
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
except Exception as e:
print "Exception occured: %s" % str(e)
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
if try_times >= 5:
raise Exception("Failed to upload ipa to Pgyer, retried 5 times.")
def parseQRCodeImageUrl(appDownloadPageURL):
try_times = 0
while try_times < 3:
try:
response = requests.get(appDownloadPageURL)
regex = '<img src=\"(.*?)\" style='
m = re.search(regex, response.content)
assert m is not None
appQRCodeURL = m.group(1)
print "appQRCodeURL: %s" % appQRCodeURL
return appQRCodeURL
except AssertionError:
try_times += 1
time.sleep(60)
print "Can not locate QRCode image. retry ... %s: %s" % (try_times, datetime.now())
if try_times >= 3:
raise Exception("Failed to locate QRCode image in download page, retried 3 times.")
def saveQRCodeImage(appDownloadPageURL, output_folder):
appQRCodeURL = parseQRCodeImageUrl(appDownloadPageURL)
response = requests.get(appQRCodeURL)
qr_image_file_path = os.path.join(output_folder, 'QRCode.png')
if response.status_code == 200:
with open(qr_image_file_path, 'wb') as f:
f.write(response.content)
print 'Save QRCode image to file: %s' % qr_image_file_path
def main():
appDownloadPageURL = uploadIpaToPgyer(ipa_path, update_description)
try:
output_folder = os.path.dirname(ipa_path)
saveQRCodeImage(appDownloadPageURL, output_folder)
except Exception as e:
print "Exception occured: %s" % str(e)
#获取 最后一次 提交git的信息
def getCommitInfo():
#方法一 使用 python 库 前提是 当前分支 在服务器上存在
# repo = Gittle(repo_path, origin_uri=repo_url)
# commitInfo = repo.commit_info(start=0, end=1)
# lastCommitInfo = commitInfo[0]
#方法二 直接 cd 到 目录下 git log -1 打印commit 信息
os.chdir(repo_path);
lastCommitInfo = run_cmd('git log -1')
return lastCommitInfo
#发送邮件
def send_Email(json_result):
print '*******start to send mail****'
appName = json_result['data']['appName']
appKey = json_result['data']['appKey']
appVersion = json_result['data']['appVersion']
appBuildVersion = json_result['data']['appBuildVersion']
appShortcutUrl = json_result['data']['appShortcutUrl']
#邮件接受者
mail_receiver = ['<EMAIL>']
#根据不同邮箱配置 host,user,和pwd
mail_host = 'smtp.139.com'
mail_port = 465
mail_user = '<EMAIL>'
mail_pwd = '<PASSWORD>'
mail_to = ','.join(mail_receiver)
msg = MIMEMultipart()
environsString = '<p><h3>本次打包相关信息</h3><p>'
# environsString += '<p>ipa 包下载地址 : ' + 'wudizhi' + '<p>'
environsString += '<p>蒲公英安装地址 : ' + 'http://www.pgyer.com/' + str(appShortcutUrl) + '<p><p><p><p>'
# environsString += '<li><a href="itms-services://?action=download-manifest&url=https://ssl.pgyer.com/app/plist/' + str(appKey) + '"></a>点击直接安装</li>'
environsString += '<p><h3>本次git提交相关信息</h3><p>'
#获取git最后一次提交信息
lastCommitInfo = getCommitInfo()
# #提交人
# committer = lastCommitInfo['committer']['raw']
# #提交信息
# description = lastCommitInfo['description']
environsString += '<p>' + '<font color="red">' + lastCommitInfo + '</font>' + '<p>'
# environsString += '<p>Description:' + '<font color="red">' + description + '</font>' + '<p>'
message = environsString
body = MIMEText(message, _subtype='html', _charset='utf-8')
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
msg.attach(body)
msg['To'] = mail_to
msg['from'] = '<EMAIL>'
msg['subject'] = 'Android APP 最新打包文件'
try:
s = smtplib.SMTP()
# 设置为调试模式,就是在会话过程中会有输出信息
s.set_debuglevel(1)
s.connect(mail_host)
s.starttls() # 创建 SSL 安全加密 链接
s.login(mail_user, mail_pwd)
s.sendmail(mail_user, mail_receiver, msg.as_string())
s.close()
print '*******mail send ok****'
except Exception, e:
print e
def run_cmd(cmd):
try:
import subprocess
except ImportError:
_, result_f, error_f = os.popen3(cmd)
else:
process = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result_f, error_f = process.stdout, process.stderr
errors = error_f.read()
if errors: pass
result_str = result_f.read().strip()
if result_f : result_f.close()
if error_f : error_f.close()
return result_str
if __name__ == '__main__':
main()
| zh | 0.611113 | #!/usr/bin/python #coding=utf-8 # configuration for pgyer # 直接发布 # 不发布到广场 # 版本更新描述 #获取 最后一次 提交git的信息 #方法一 使用 python 库 前提是 当前分支 在服务器上存在 # repo = Gittle(repo_path, origin_uri=repo_url) # commitInfo = repo.commit_info(start=0, end=1) # lastCommitInfo = commitInfo[0] #方法二 直接 cd 到 目录下 git log -1 打印commit 信息 #发送邮件 #邮件接受者 #根据不同邮箱配置 host,user,和pwd # environsString += '<p>ipa 包下载地址 : ' + 'wudizhi' + '<p>' # environsString += '<li><a href="itms-services://?action=download-manifest&url=https://ssl.pgyer.com/app/plist/' + str(appKey) + '"></a>点击直接安装</li>' #获取git最后一次提交信息 # #提交人 # committer = lastCommitInfo['committer']['raw'] # #提交信息 # description = lastCommitInfo['description'] # environsString += '<p>Description:' + '<font color="red">' + description + '</font>' + '<p>' # 设置为调试模式,就是在会话过程中会有输出信息 # 创建 SSL 安全加密 链接 | 2.258492 | 2 |
edit/editpublisher.py | lokal-profil/isfdb_site | 0 | 9031 | #!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 <NAME> and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdblib import *
from isfdblib_help import *
from isfdblib_print import *
from isfdb import *
from SQLparsing import *
from login import User
if __name__ == '__main__':
publisherID = SESSION.Parameter(0, 'int')
record = SQLGetPublisher(publisherID)
if not record:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Publisher Editor')
PrintNavBar('edit/editpublisher.cgi', publisherID)
help = HelpPublisher()
printHelpBox('publisher', 'EditPublisher')
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/edit/submitpublisher.cgi">'
print '<table border="0">'
print '<tbody id="tagBody">'
# Limit the ability to edit publisher names to moderators
user = User()
user.load()
display_only = 1
if SQLisUserModerator(user.id):
display_only = 0
printfield("Publisher Name", "publisher_name", help, record[PUBLISHER_NAME], display_only)
trans_publisher_names = SQLloadTransPublisherNames(record[PUBLISHER_ID])
printmultiple(trans_publisher_names, "Transliterated Name", "trans_publisher_names", help)
webpages = SQLloadPublisherWebpages(record[PUBLISHER_ID])
printWebPages(webpages, 'publisher', help)
printtextarea('Note', 'publisher_note', help, SQLgetNotes(record[PUBLISHER_NOTE]))
printtextarea('Note to Moderator', 'mod_note', help, '')
print '</tbody>'
print '</table>'
print '<p>'
print '<input NAME="publisher_id" VALUE="%d" TYPE="HIDDEN">' % publisherID
print '<input TYPE="SUBMIT" VALUE="Submit Data" tabindex="1">'
print '</form>'
print '<p>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| #!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 <NAME> and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdblib import *
from isfdblib_help import *
from isfdblib_print import *
from isfdb import *
from SQLparsing import *
from login import User
if __name__ == '__main__':
publisherID = SESSION.Parameter(0, 'int')
record = SQLGetPublisher(publisherID)
if not record:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Publisher Editor')
PrintNavBar('edit/editpublisher.cgi', publisherID)
help = HelpPublisher()
printHelpBox('publisher', 'EditPublisher')
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/edit/submitpublisher.cgi">'
print '<table border="0">'
print '<tbody id="tagBody">'
# Limit the ability to edit publisher names to moderators
user = User()
user.load()
display_only = 1
if SQLisUserModerator(user.id):
display_only = 0
printfield("Publisher Name", "publisher_name", help, record[PUBLISHER_NAME], display_only)
trans_publisher_names = SQLloadTransPublisherNames(record[PUBLISHER_ID])
printmultiple(trans_publisher_names, "Transliterated Name", "trans_publisher_names", help)
webpages = SQLloadPublisherWebpages(record[PUBLISHER_ID])
printWebPages(webpages, 'publisher', help)
printtextarea('Note', 'publisher_note', help, SQLgetNotes(record[PUBLISHER_NOTE]))
printtextarea('Note to Moderator', 'mod_note', help, '')
print '</tbody>'
print '</table>'
print '<p>'
print '<input NAME="publisher_id" VALUE="%d" TYPE="HIDDEN">' % publisherID
print '<input TYPE="SUBMIT" VALUE="Submit Data" tabindex="1">'
print '</form>'
print '<p>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| en | 0.581942 | #!_PYTHONLOC # # (C) COPYRIGHT 2004-2021 <NAME> and Ahasuerus # ALL RIGHTS RESERVED # # The copyright notice above does not evidence any actual or # intended publication of such source code. # # Version: $Revision$ # Date: $Date$ # Limit the ability to edit publisher names to moderators | 2.104581 | 2 |
src/dispatch/incident_cost/views.py | vj-codes/dispatch | 1 | 9032 | from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from dispatch.database.core import get_db
from dispatch.database.service import common_parameters, search_filter_sort_paginate
from dispatch.auth.permissions import SensitiveProjectActionPermission, PermissionsDependency
from .models import (
IncidentCostCreate,
IncidentCostPagination,
IncidentCostRead,
IncidentCostUpdate,
)
from .service import create, delete, get, update
router = APIRouter()
@router.get("", response_model=IncidentCostPagination)
def get_incident_costs(*, common: dict = Depends(common_parameters)):
"""
Get all incident costs, or only those matching a given search term.
"""
return search_filter_sort_paginate(model="IncidentCost", **common)
@router.get("/{incident_cost_id}", response_model=IncidentCostRead)
def get_incident_cost(*, db_session: Session = Depends(get_db), incident_cost_id: int):
"""
Get an incident cost by id.
"""
incident_cost = get(db_session=db_session, incident_cost_id=incident_cost_id)
if not incident_cost:
raise HTTPException(status_code=404, detail="An incident cost with this id does not exist.")
return incident_cost
@router.post(
"",
response_model=IncidentCostRead,
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def create_incident_cost(
*, db_session: Session = Depends(get_db), incident_cost_in: IncidentCostCreate
):
"""
Create an incident cost.
"""
incident_cost = create(db_session=db_session, incident_cost_in=incident_cost_in)
return incident_cost
@router.put(
"/{incident_cost_id}",
response_model=IncidentCostRead,
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def update_incident_cost(
*,
db_session: Session = Depends(get_db),
incident_cost_id: int,
incident_cost_in: IncidentCostUpdate,
):
"""
Update an incident cost by id.
"""
incident_cost = get(db_session=db_session, incident_cost_id=incident_cost_id)
if not incident_cost:
raise HTTPException(status_code=404, detail="An incident cost with this id does not exist.")
incident_cost = update(
db_session=db_session,
incident_cost=incident_cost,
incident_cost_in=incident_cost_in,
)
return incident_cost
@router.delete(
"/{incident_cost_id}",
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def delete_incident_cost(*, db_session: Session = Depends(get_db), incident_cost_id: int):
"""
Delete an incident cost, returning only an HTTP 200 OK if successful.
"""
incident_cost = get(db_session=db_session, incident_cost_id=incident_cost_id)
if not incident_cost:
raise HTTPException(status_code=404, detail="An incident cost with this id does not exist.")
delete(db_session=db_session, incident_cost_id=incident_cost_id)
| from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from dispatch.database.core import get_db
from dispatch.database.service import common_parameters, search_filter_sort_paginate
from dispatch.auth.permissions import SensitiveProjectActionPermission, PermissionsDependency
from .models import (
IncidentCostCreate,
IncidentCostPagination,
IncidentCostRead,
IncidentCostUpdate,
)
from .service import create, delete, get, update
router = APIRouter()
@router.get("", response_model=IncidentCostPagination)
def get_incident_costs(*, common: dict = Depends(common_parameters)):
"""
Get all incident costs, or only those matching a given search term.
"""
return search_filter_sort_paginate(model="IncidentCost", **common)
@router.get("/{incident_cost_id}", response_model=IncidentCostRead)
def get_incident_cost(*, db_session: Session = Depends(get_db), incident_cost_id: int):
"""
Get an incident cost by id.
"""
incident_cost = get(db_session=db_session, incident_cost_id=incident_cost_id)
if not incident_cost:
raise HTTPException(status_code=404, detail="An incident cost with this id does not exist.")
return incident_cost
@router.post(
"",
response_model=IncidentCostRead,
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def create_incident_cost(
*, db_session: Session = Depends(get_db), incident_cost_in: IncidentCostCreate
):
"""
Create an incident cost.
"""
incident_cost = create(db_session=db_session, incident_cost_in=incident_cost_in)
return incident_cost
@router.put(
"/{incident_cost_id}",
response_model=IncidentCostRead,
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def update_incident_cost(
*,
db_session: Session = Depends(get_db),
incident_cost_id: int,
incident_cost_in: IncidentCostUpdate,
):
"""
Update an incident cost by id.
"""
incident_cost = get(db_session=db_session, incident_cost_id=incident_cost_id)
if not incident_cost:
raise HTTPException(status_code=404, detail="An incident cost with this id does not exist.")
incident_cost = update(
db_session=db_session,
incident_cost=incident_cost,
incident_cost_in=incident_cost_in,
)
return incident_cost
@router.delete(
"/{incident_cost_id}",
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def delete_incident_cost(*, db_session: Session = Depends(get_db), incident_cost_id: int):
"""
Delete an incident cost, returning only an HTTP 200 OK if successful.
"""
incident_cost = get(db_session=db_session, incident_cost_id=incident_cost_id)
if not incident_cost:
raise HTTPException(status_code=404, detail="An incident cost with this id does not exist.")
delete(db_session=db_session, incident_cost_id=incident_cost_id)
| en | 0.917487 | Get all incident costs, or only those matching a given search term. Get an incident cost by id. Create an incident cost. Update an incident cost by id. Delete an incident cost, returning only an HTTP 200 OK if successful. | 2.242516 | 2 |
tests/views/test_admin_committee_questions.py | Lunga001/pmg-cms-2 | 2 | 9033 | import os
from urllib.parse import urlparse, parse_qs
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee, CommitteeQuestion
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from flask import escape
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super().tearDown()
def test_upload_committee_question_document_with_old_format(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW190-200303.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertEqual(
question.question,
"Whether her Office has initiated the drafting of a Bill that seeks to protect and promote the rights of persons with disabilities; if not, (a) why not and (b) what steps does her Office intend taking in this regard; if so, on what date does she envisage that the Bill will be introduced in the National Assembly?",
)
self.assertEqual(
question.minister.name,
"Minister in The Presidency for Women, Youth and Persons with Disabilities",
)
self.assertEqual(question.asked_by_name, "<NAME>")
self.assertEqual(
question.answer,
"<p>Yes</p><p>(b) The Department is in the process of preparing the drafting of a Bill which will be submitted to Cabinet for approval before it will be tabled in Parliament during the 2021/2022 financial year.</p>",
)
self.assertEqual(question.code, "NW190")
# Delete the question that was created
self.created_objects.append(question)
def test_upload_committee_question_document_with_new_format(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW104-2020-02-28.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW104-2020-02-28.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/admin/committee-question/"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertEqual(
question.question,
"What (a) is the number of (i) residential properties, (ii) business erven’, (iii) government buildings and (iv) agricultural properties owned by her department in the Lephalale Local Municipality which are (aa) vacant, (bb) occupied and (cc) earmarked for disposal and (b) total amount does her department owe the municipality in outstanding rates and services?",
)
self.assertEqual(
question.minister.name, "Minister of Public Works and Infrastructure",
)
self.assertEqual(question.asked_by_name, "<NAME>")
self.assertEqual(
question.answer,
"<p><strong>The Minister of Public Works and</strong><strong> Infrastructure: </strong></p><ol><li>The Department of Public Works and Infrastructure (DPWI) has informed me that in the Lephalale Local Municipality the Department owns (i) 183 residential properties (ii) one business erven (iii) 132 government buildings and (iv) 5 agricultural properties. DPWI informed me that (aa) 8 land parcels are vacant and (bb) only one property is unutilised. </li></ol><p>(cc) DPWI has not earmarked any properties for disposal in the Lephalale Local Municipality.</p><ol><li>In August 2019 the Department started a Government Debt Project engaging directly with municipalities and Eskom to verify and reconcile accounts and the project. DPWI, on behalf of client departments, owed the Lephalale Local Municipality, as per accounts received on 17 February 2020, R 334,989.69 which relates current consumption. </li></ol>",
)
self.assertEqual(question.code, "NW104")
# Delete the question that was created
self.created_objects.append(question)
def test_upload_committee_question_document_with_navigable_string_error(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW1153-200619.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW1153-200619.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/admin/committee-question/"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertIn(
"(1)Whether, with reference to her reply to question 937 on 4 June 2020",
question.question,
)
self.assertEqual(
question.minister.name,
"Minister in The Presidency for Women, Youth and Persons with Disabilities",
)
self.assertEqual(question.asked_by_name, "<NAME>")
self.assertIn(
"There were no deviations from the standard supply chain management procedures",
question.answer,
)
self.assertEqual(question.code, "NW1153")
# Delete the question that was created
self.created_objects.append(question)
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
| import os
from urllib.parse import urlparse, parse_qs
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee, CommitteeQuestion
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from flask import escape
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super().tearDown()
def test_upload_committee_question_document_with_old_format(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW190-200303.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertEqual(
question.question,
"Whether her Office has initiated the drafting of a Bill that seeks to protect and promote the rights of persons with disabilities; if not, (a) why not and (b) what steps does her Office intend taking in this regard; if so, on what date does she envisage that the Bill will be introduced in the National Assembly?",
)
self.assertEqual(
question.minister.name,
"Minister in The Presidency for Women, Youth and Persons with Disabilities",
)
self.assertEqual(question.asked_by_name, "<NAME>")
self.assertEqual(
question.answer,
"<p>Yes</p><p>(b) The Department is in the process of preparing the drafting of a Bill which will be submitted to Cabinet for approval before it will be tabled in Parliament during the 2021/2022 financial year.</p>",
)
self.assertEqual(question.code, "NW190")
# Delete the question that was created
self.created_objects.append(question)
def test_upload_committee_question_document_with_new_format(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW104-2020-02-28.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW104-2020-02-28.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/admin/committee-question/"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertEqual(
question.question,
"What (a) is the number of (i) residential properties, (ii) business erven’, (iii) government buildings and (iv) agricultural properties owned by her department in the Lephalale Local Municipality which are (aa) vacant, (bb) occupied and (cc) earmarked for disposal and (b) total amount does her department owe the municipality in outstanding rates and services?",
)
self.assertEqual(
question.minister.name, "Minister of Public Works and Infrastructure",
)
self.assertEqual(question.asked_by_name, "<NAME>")
self.assertEqual(
question.answer,
"<p><strong>The Minister of Public Works and</strong><strong> Infrastructure: </strong></p><ol><li>The Department of Public Works and Infrastructure (DPWI) has informed me that in the Lephalale Local Municipality the Department owns (i) 183 residential properties (ii) one business erven (iii) 132 government buildings and (iv) 5 agricultural properties. DPWI informed me that (aa) 8 land parcels are vacant and (bb) only one property is unutilised. </li></ol><p>(cc) DPWI has not earmarked any properties for disposal in the Lephalale Local Municipality.</p><ol><li>In August 2019 the Department started a Government Debt Project engaging directly with municipalities and Eskom to verify and reconcile accounts and the project. DPWI, on behalf of client departments, owed the Lephalale Local Municipality, as per accounts received on 17 February 2020, R 334,989.69 which relates current consumption. </li></ol>",
)
self.assertEqual(question.code, "NW104")
# Delete the question that was created
self.created_objects.append(question)
def test_upload_committee_question_document_with_navigable_string_error(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW1153-200619.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW1153-200619.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/admin/committee-question/"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertIn(
"(1)Whether, with reference to her reply to question 937 on 4 June 2020",
question.question,
)
self.assertEqual(
question.minister.name,
"Minister in The Presidency for Women, Youth and Persons with Disabilities",
)
self.assertEqual(question.asked_by_name, "<NAME>")
self.assertIn(
"There were no deviations from the standard supply chain management procedures",
question.answer,
)
self.assertEqual(question.code, "NW1153")
# Delete the question that was created
self.created_objects.append(question)
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
| en | 0.982116 | Upload committee question document (/admin/committee-question/upload) # Test that the question that was created contains the correct data # Delete the question that was created Upload committee question document (/admin/committee-question/upload) # Test that the question that was created contains the correct data # Delete the question that was created Upload committee question document (/admin/committee-question/upload) # Test that the question that was created contains the correct data # Delete the question that was created | 2.367907 | 2 |
audioanalysis_demo/test_audio_analysis.py | tiaotiao/applets | 0 | 9034 | <filename>audioanalysis_demo/test_audio_analysis.py
import sys, wave
import AudioAnalysis
FILE_NAME = "snippet.wav"
def testWavWrite():
try:
f = wave.open(FILE_NAME, "rb")
except Exception, e:
print e
print "File type is not wav!"
return
c = wave.open("cnv_" + FILE_NAME, "wb")
print f.getnchannels()
print f.getsampwidth()
print f.getframerate()
print f.getnframes()
#print f.getparams()
total = f.getnframes()
read_count = total / 2
c.setnchannels(f.getnchannels())
c.setsampwidth(f.getsampwidth())
c.setframerate(f.getframerate())
c.setnframes(read_count)
c.setcomptype(f.getcomptype(), f.getcompname())
frames = f.readframes(read_count)
print len(frames)
print "bytes per frame: ", len(frames) / read_count
#for b in frames:
# i = int(b.encode("hex"), 16)
# print b.encode("hex")
#print '#' * (i / 10)
c.writeframes(frames)
print "----------"
f.close()
c.close()
def process(p):
print p
def testAudioAnalysis():
a = AudioAnalysis.AudioAnalysis(FILE_NAME)
print a.getFilename()
print a.getFileType()
a.setFrameInterval(0.01)
print a.analysePower(process)
print a.getPowerMin(), "\tgetPowerMin"
print a.getPowerMax(), "\tgetPowerMax"
print a.getSamplePowerMin(), "\tgetSamplePowerMin"
print a.getSamplePowerMax(), "\tgetSamplePowerMax"
print a.getFrameRate(), "\tgetFrameRate"
print a.getSampleWidth(), "\tgetSampleWidth"
print a.getDuration(), "\tgetDuration"
print a.getFrameInterval(), "\tgetFrameInterval"
print a.getSamples(), "\tgetSamples"
powers = a.getFramePower()
for p in powers:
print "%04lf" % p[0], "%-6d" % p[1] ,'#' * (p[1] / 100)
def main():
f = open(FILE_NAME, "rb")
if not f:
print "Open file failed!"
return
try:
w = wave.open(f)
except Exception, e:
print e
print "File type is not wav!"
return
print "get channels\t", w.getnchannels() # channels, single or double
print "frame rate\t", w.getframerate() # rate, frames per sec
print "samp width\t", w.getsampwidth() # maybe: channels * width = bytes per frame
print "get n frames\t", w.getnframes() # total frames
print "comp type\t", w.getcomptype() # compress
print "params\t", w.getparams()
total = w.getnframes()
read_count = 100
frames = w.readframes(read_count)
print "len(frames)\t", len(frames)
print "bytes per frame\t", len(frames) / read_count
#for b in frames:
#i = int(b.encode("hex"), 16)
#print b.encode("hex")
#print '#' * (i / 10)
print "----------"
w.close()
f.close()
if __name__ == "__main__":
main()
#testAudioAnalysis()
#testWavWrite()
| <filename>audioanalysis_demo/test_audio_analysis.py
import sys, wave
import AudioAnalysis
FILE_NAME = "snippet.wav"
def testWavWrite():
try:
f = wave.open(FILE_NAME, "rb")
except Exception, e:
print e
print "File type is not wav!"
return
c = wave.open("cnv_" + FILE_NAME, "wb")
print f.getnchannels()
print f.getsampwidth()
print f.getframerate()
print f.getnframes()
#print f.getparams()
total = f.getnframes()
read_count = total / 2
c.setnchannels(f.getnchannels())
c.setsampwidth(f.getsampwidth())
c.setframerate(f.getframerate())
c.setnframes(read_count)
c.setcomptype(f.getcomptype(), f.getcompname())
frames = f.readframes(read_count)
print len(frames)
print "bytes per frame: ", len(frames) / read_count
#for b in frames:
# i = int(b.encode("hex"), 16)
# print b.encode("hex")
#print '#' * (i / 10)
c.writeframes(frames)
print "----------"
f.close()
c.close()
def process(p):
print p
def testAudioAnalysis():
a = AudioAnalysis.AudioAnalysis(FILE_NAME)
print a.getFilename()
print a.getFileType()
a.setFrameInterval(0.01)
print a.analysePower(process)
print a.getPowerMin(), "\tgetPowerMin"
print a.getPowerMax(), "\tgetPowerMax"
print a.getSamplePowerMin(), "\tgetSamplePowerMin"
print a.getSamplePowerMax(), "\tgetSamplePowerMax"
print a.getFrameRate(), "\tgetFrameRate"
print a.getSampleWidth(), "\tgetSampleWidth"
print a.getDuration(), "\tgetDuration"
print a.getFrameInterval(), "\tgetFrameInterval"
print a.getSamples(), "\tgetSamples"
powers = a.getFramePower()
for p in powers:
print "%04lf" % p[0], "%-6d" % p[1] ,'#' * (p[1] / 100)
def main():
f = open(FILE_NAME, "rb")
if not f:
print "Open file failed!"
return
try:
w = wave.open(f)
except Exception, e:
print e
print "File type is not wav!"
return
print "get channels\t", w.getnchannels() # channels, single or double
print "frame rate\t", w.getframerate() # rate, frames per sec
print "samp width\t", w.getsampwidth() # maybe: channels * width = bytes per frame
print "get n frames\t", w.getnframes() # total frames
print "comp type\t", w.getcomptype() # compress
print "params\t", w.getparams()
total = w.getnframes()
read_count = 100
frames = w.readframes(read_count)
print "len(frames)\t", len(frames)
print "bytes per frame\t", len(frames) / read_count
#for b in frames:
#i = int(b.encode("hex"), 16)
#print b.encode("hex")
#print '#' * (i / 10)
print "----------"
w.close()
f.close()
if __name__ == "__main__":
main()
#testAudioAnalysis()
#testWavWrite()
| en | 0.318007 | #print f.getparams() #for b in frames: # i = int(b.encode("hex"), 16) # print b.encode("hex") #print '#' * (i / 10) # channels, single or double # rate, frames per sec # maybe: channels * width = bytes per frame # total frames # compress #for b in frames: #i = int(b.encode("hex"), 16) #print b.encode("hex") #print '#' * (i / 10) #testAudioAnalysis() #testWavWrite() | 3.083511 | 3 |
syloga/transform/evaluation.py | xaedes/python-symbolic-logic-to-gate | 0 | 9035 |
from syloga.core.map_expression_args import map_expression_args
from syloga.utils.identity import identity
from syloga.ast.BooleanNot import BooleanNot
from syloga.ast.BooleanValue import BooleanValue
from syloga.ast.BooleanOr import BooleanOr
from syloga.ast.BooleanAnd import BooleanAnd
from syloga.ast.BooleanNand import BooleanNand
from syloga.ast.BooleanNor import BooleanNor
from syloga.ast.BooleanXor import BooleanXor
from syloga.ast.BreakOut import BreakOut
# from syloga.core.assert_equality_by_table import assert_equality_by_table
def evaluate_expr(expression):
recurse = evaluate_expr
# result = assert_equality_by_table
result = identity
#arg_is_value = lambda arg: isinstance(arg, (BooleanValue, bool))
arg_is_value = lambda arg: type(arg) in [BooleanValue, bool]
def arg_is_value(arg):
is_value = type(arg) in [BooleanValue, bool]
#print("is arg a value? " + str(type(arg)) + " " + str(arg))
#print("is_value", is_value)
return is_value
args_are_values = lambda args: all(map(arg_is_value, args))
get_value = lambda arg: arg if type(arg) == bool else arg.value
is_true = lambda val: val == True
is_false = lambda val: val == False
#print("looking at " + str(type(expression)))
if type(expression) == BooleanNot:
assert(len(expression.args) == 1)
arg = recurse(expression.args[0]);
if arg_is_value(arg):
return result(BooleanValue(not get_value(arg)))
else:
return result(BooleanNot(arg))
elif type(expression) == BooleanOr:
args = list(map(recurse, expression.args))
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
args_wo_neutral = list(filter(lambda x: not(arg_is_value(x) and is_false(get_value(x))),args))
if args_are_values(args):
return result(BooleanValue(any(arg_values)))
elif any(map(is_true,arg_values)):
return result(BooleanValue(True))
elif len(args) == 1:
return result(recurse(args[0]))
elif len(args_wo_neutral) < len(args):
return result(recurse(BooleanOr(*args_wo_neutral)))
else:
return result(BooleanOr(*args))
elif type(expression) == BooleanAnd:
args = list(map(recurse, expression.args))
#print(expression.args)
#print(args)
#negated_atom_values = [not get_value(arg) for arg in args if arg_is_value(arg)]
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
args_wo_neutral = list(filter(lambda x: not(arg_is_value(x) and is_true(get_value(x))),args))
#print(arg_values)
if args_are_values(args):
return result(BooleanValue(all(map(is_true,arg_values))))
elif any(map(is_false,arg_values)):
return result(BooleanValue(False))
elif len(args) == 1:
return result(recurse(args[0]))
elif len(args_wo_neutral) < len(args):
return result(recurse(BooleanAnd(*args_wo_neutral)))
else:
return result(BooleanAnd(*args))
elif type(expression) == BooleanNand:
return result(recurse(BooleanNot(BooleanAnd(*expression.args))))
elif type(expression) == BooleanNor:
return result(recurse(BooleanNot(BooleanOr(*expression.args))))
elif type(expression) == BooleanXor:
args = list(map(recurse, expression.args))
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
non_value_args = [arg for arg in args if not arg_is_value(arg)]
if len(args) == 0:
raise ValueError("args are missing")
elif len(args) == 1:
return result(args[0])
elif len(arg_values) == 0:
return result(BooleanXor(*non_value_args))
elif len(arg_values) == 1:
if is_true(arg_values[0]):
return result(BooleanXor(arg_values[0], *non_value_args))
else:
return result(recurse(BooleanXor(*non_value_args)))
elif len(arg_values) > 1:
evaluated = is_true(arg_values[0])
for a in arg_values[1:]:
evaluated ^= is_true(a)
evaluated = bool(evaluated)
return result(recurse(BooleanXor(evaluated, *non_value_args)))
elif type(expression) == BreakOut:
expr = recurse(expression.expr)
if arg_is_value(expr):
return result(BooleanValue(expr))
else:
return result(BreakOut(expr))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
|
from syloga.core.map_expression_args import map_expression_args
from syloga.utils.identity import identity
from syloga.ast.BooleanNot import BooleanNot
from syloga.ast.BooleanValue import BooleanValue
from syloga.ast.BooleanOr import BooleanOr
from syloga.ast.BooleanAnd import BooleanAnd
from syloga.ast.BooleanNand import BooleanNand
from syloga.ast.BooleanNor import BooleanNor
from syloga.ast.BooleanXor import BooleanXor
from syloga.ast.BreakOut import BreakOut
# from syloga.core.assert_equality_by_table import assert_equality_by_table
def evaluate_expr(expression):
recurse = evaluate_expr
# result = assert_equality_by_table
result = identity
#arg_is_value = lambda arg: isinstance(arg, (BooleanValue, bool))
arg_is_value = lambda arg: type(arg) in [BooleanValue, bool]
def arg_is_value(arg):
is_value = type(arg) in [BooleanValue, bool]
#print("is arg a value? " + str(type(arg)) + " " + str(arg))
#print("is_value", is_value)
return is_value
args_are_values = lambda args: all(map(arg_is_value, args))
get_value = lambda arg: arg if type(arg) == bool else arg.value
is_true = lambda val: val == True
is_false = lambda val: val == False
#print("looking at " + str(type(expression)))
if type(expression) == BooleanNot:
assert(len(expression.args) == 1)
arg = recurse(expression.args[0]);
if arg_is_value(arg):
return result(BooleanValue(not get_value(arg)))
else:
return result(BooleanNot(arg))
elif type(expression) == BooleanOr:
args = list(map(recurse, expression.args))
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
args_wo_neutral = list(filter(lambda x: not(arg_is_value(x) and is_false(get_value(x))),args))
if args_are_values(args):
return result(BooleanValue(any(arg_values)))
elif any(map(is_true,arg_values)):
return result(BooleanValue(True))
elif len(args) == 1:
return result(recurse(args[0]))
elif len(args_wo_neutral) < len(args):
return result(recurse(BooleanOr(*args_wo_neutral)))
else:
return result(BooleanOr(*args))
elif type(expression) == BooleanAnd:
args = list(map(recurse, expression.args))
#print(expression.args)
#print(args)
#negated_atom_values = [not get_value(arg) for arg in args if arg_is_value(arg)]
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
args_wo_neutral = list(filter(lambda x: not(arg_is_value(x) and is_true(get_value(x))),args))
#print(arg_values)
if args_are_values(args):
return result(BooleanValue(all(map(is_true,arg_values))))
elif any(map(is_false,arg_values)):
return result(BooleanValue(False))
elif len(args) == 1:
return result(recurse(args[0]))
elif len(args_wo_neutral) < len(args):
return result(recurse(BooleanAnd(*args_wo_neutral)))
else:
return result(BooleanAnd(*args))
elif type(expression) == BooleanNand:
return result(recurse(BooleanNot(BooleanAnd(*expression.args))))
elif type(expression) == BooleanNor:
return result(recurse(BooleanNot(BooleanOr(*expression.args))))
elif type(expression) == BooleanXor:
args = list(map(recurse, expression.args))
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
non_value_args = [arg for arg in args if not arg_is_value(arg)]
if len(args) == 0:
raise ValueError("args are missing")
elif len(args) == 1:
return result(args[0])
elif len(arg_values) == 0:
return result(BooleanXor(*non_value_args))
elif len(arg_values) == 1:
if is_true(arg_values[0]):
return result(BooleanXor(arg_values[0], *non_value_args))
else:
return result(recurse(BooleanXor(*non_value_args)))
elif len(arg_values) > 1:
evaluated = is_true(arg_values[0])
for a in arg_values[1:]:
evaluated ^= is_true(a)
evaluated = bool(evaluated)
return result(recurse(BooleanXor(evaluated, *non_value_args)))
elif type(expression) == BreakOut:
expr = recurse(expression.expr)
if arg_is_value(expr):
return result(BooleanValue(expr))
else:
return result(BreakOut(expr))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
| en | 0.330722 | # from syloga.core.assert_equality_by_table import assert_equality_by_table # result = assert_equality_by_table #arg_is_value = lambda arg: isinstance(arg, (BooleanValue, bool)) #print("is arg a value? " + str(type(arg)) + " " + str(arg)) #print("is_value", is_value) #print("looking at " + str(type(expression))) #print(expression.args) #print(args) #negated_atom_values = [not get_value(arg) for arg in args if arg_is_value(arg)] #print(arg_values) | 3.034837 | 3 |
oscar/apps/customer/mixins.py | Idematica/django-oscar | 1 | 9036 | <reponame>Idematica/django-oscar<filename>oscar/apps/customer/mixins.py
from django.conf import settings
from django.contrib.auth import authenticate, login as auth_login
from django.contrib.sites.models import get_current_site
from django.db.models import get_model
from oscar.apps.customer.signals import user_registered
from oscar.core.loading import get_class
from oscar.core.compat import get_user_model
User = get_user_model()
CommunicationEventType = get_model('customer', 'CommunicationEventType')
Dispatcher = get_class('customer.utils', 'Dispatcher')
class PageTitleMixin(object):
"""
Passes page_title and active_tab into context, which makes it quite useful
for the accounts views.
Dynamic page titles are possible by overriding get_page_title.
"""
page_title = None
active_tab = None
# Use a method that can be overridden and customised
def get_page_title(self):
return self.page_title
def get_context_data(self, **kwargs):
ctx = super(PageTitleMixin, self).get_context_data(**kwargs)
ctx.setdefault('page_title', self.get_page_title())
ctx.setdefault('active_tab', self.active_tab)
return ctx
class RegisterUserMixin(object):
communication_type_code = 'REGISTRATION'
def register_user(self, form):
"""
Create a user instance and send a new registration email (if configured
to).
"""
user = form.save()
if getattr(settings, 'OSCAR_SEND_REGISTRATION_EMAIL', True):
self.send_registration_email(user)
# Raise signal
user_registered.send_robust(sender=self, user=user)
# We have to authenticate before login
try:
user = authenticate(
username=user.email,
password=form.cleaned_data['<PASSWORD>'])
except User.MultipleObjectsReturned:
# Handle race condition where the registration request is made
# multiple times in quick succession. This leads to both requests
# passing the uniqueness check and creating users (as the first one
# hasn't committed when the second one runs the check). We retain
# the first one and delete the dupes.
users = User.objects.filter(email=user.email)
user = users[0]
for u in users[1:]:
u.delete()
auth_login(self.request, user)
return user
def send_registration_email(self, user):
code = self.communication_type_code
ctx = {'user': user,
'site': get_current_site(self.request)}
messages = CommunicationEventType.objects.get_and_render(
code, ctx)
if messages and messages['body']:
Dispatcher().dispatch_user_messages(user, messages)
| from django.conf import settings
from django.contrib.auth import authenticate, login as auth_login
from django.contrib.sites.models import get_current_site
from django.db.models import get_model
from oscar.apps.customer.signals import user_registered
from oscar.core.loading import get_class
from oscar.core.compat import get_user_model
User = get_user_model()
CommunicationEventType = get_model('customer', 'CommunicationEventType')
Dispatcher = get_class('customer.utils', 'Dispatcher')
class PageTitleMixin(object):
"""
Passes page_title and active_tab into context, which makes it quite useful
for the accounts views.
Dynamic page titles are possible by overriding get_page_title.
"""
page_title = None
active_tab = None
# Use a method that can be overridden and customised
def get_page_title(self):
return self.page_title
def get_context_data(self, **kwargs):
ctx = super(PageTitleMixin, self).get_context_data(**kwargs)
ctx.setdefault('page_title', self.get_page_title())
ctx.setdefault('active_tab', self.active_tab)
return ctx
class RegisterUserMixin(object):
communication_type_code = 'REGISTRATION'
def register_user(self, form):
"""
Create a user instance and send a new registration email (if configured
to).
"""
user = form.save()
if getattr(settings, 'OSCAR_SEND_REGISTRATION_EMAIL', True):
self.send_registration_email(user)
# Raise signal
user_registered.send_robust(sender=self, user=user)
# We have to authenticate before login
try:
user = authenticate(
username=user.email,
password=form.cleaned_data['<PASSWORD>'])
except User.MultipleObjectsReturned:
# Handle race condition where the registration request is made
# multiple times in quick succession. This leads to both requests
# passing the uniqueness check and creating users (as the first one
# hasn't committed when the second one runs the check). We retain
# the first one and delete the dupes.
users = User.objects.filter(email=user.email)
user = users[0]
for u in users[1:]:
u.delete()
auth_login(self.request, user)
return user
def send_registration_email(self, user):
code = self.communication_type_code
ctx = {'user': user,
'site': get_current_site(self.request)}
messages = CommunicationEventType.objects.get_and_render(
code, ctx)
if messages and messages['body']:
Dispatcher().dispatch_user_messages(user, messages) | en | 0.902315 | Passes page_title and active_tab into context, which makes it quite useful for the accounts views. Dynamic page titles are possible by overriding get_page_title. # Use a method that can be overridden and customised Create a user instance and send a new registration email (if configured to). # Raise signal # We have to authenticate before login # Handle race condition where the registration request is made # multiple times in quick succession. This leads to both requests # passing the uniqueness check and creating users (as the first one # hasn't committed when the second one runs the check). We retain # the first one and delete the dupes. | 2.11751 | 2 |
plot_integral.py | vfloeser/TumorDelivery | 0 | 9037 | from parameters import *
from library_time import *
from paths import *
import numpy as np
import pylab as plt
import matplotlib.pyplot as mplt
mplt.rc('text', usetex=True)
mplt.rcParams.update({'font.size': 16})
import logging, getopt, sys
import time
import os
##########################################################################################
# C O N F I G U R A T I O N
##########################################################################################
# activate ylim for w
var1 = w1
var3 = w3
var5 = w5
var10 = w10
var25 = w25
mode = "w" # u or w
##########################################################################################
# M A I N
##########################################################################################
if __name__ == "__main__":
if not os.path.exists('plots'):
os.makedirs('plots')
print('Created folder plots!')
if not os.path.exists('plots/integral'):
os.makedirs('plots/integral')
print('Created folder plots/integral!')
t = np.linspace(tmin, tmax, Nt)
r = np.linspace(0,R,Nr)
Ivar1 = np.zeros(Nt)
Ivar3 = np.zeros(Nt)
Ivar5 = np.zeros(Nt)
Ivar10 = np.zeros(Nt)
Ivar25 = np.zeros(Nt)
for i in range(Nt):
# /1000000 because of units
Ivar1[i] = integrate(var1, i,r, Nt)/1000000
Ivar3[i] = integrate(var3, i,r, Nt)/1000000
Ivar5[i] = integrate(var5, i,r, Nt)/1000000
Ivar10[i] = integrate(var10, i,r, Nt)/1000000
Ivar25[i] = integrate(var25, i,r, Nt)/1000000
mplt.plot(t, Ivar1, label=r'$\alpha = 1$')
mplt.plot(t, Ivar3, label=r'$\alpha = 3$')
mplt.plot(t, Ivar5, label=r'$\alpha = 5$')
mplt.plot(t, Ivar10, label=r'$\alpha = 10$')
mplt.plot(t, Ivar25, label=r'$\alpha = 25$')
mplt.xlim(tmin, tmax)
mplt.yscale('log')
mplt.xlabel(r'$t\quad [h]$')
mplt.ylabel(r'$\bar{'+mode+'}\quad [\mu mol]$')
##########################################################################################
# lim for w, because some values dont make sense
mplt.ylim(1e-11, 3e2)
# lim for w, because some values dont make sense
##########################################################################################
mplt.legend(loc=1, bbox_to_anchor=(1, 0.9))
mplt.tight_layout()
mplt.savefig('plots/integral/int'+mode+'.pdf', format='pdf')
mplt.show() | from parameters import *
from library_time import *
from paths import *
import numpy as np
import pylab as plt
import matplotlib.pyplot as mplt
mplt.rc('text', usetex=True)
mplt.rcParams.update({'font.size': 16})
import logging, getopt, sys
import time
import os
##########################################################################################
# C O N F I G U R A T I O N
##########################################################################################
# activate ylim for w
var1 = w1
var3 = w3
var5 = w5
var10 = w10
var25 = w25
mode = "w" # u or w
##########################################################################################
# M A I N
##########################################################################################
if __name__ == "__main__":
if not os.path.exists('plots'):
os.makedirs('plots')
print('Created folder plots!')
if not os.path.exists('plots/integral'):
os.makedirs('plots/integral')
print('Created folder plots/integral!')
t = np.linspace(tmin, tmax, Nt)
r = np.linspace(0,R,Nr)
Ivar1 = np.zeros(Nt)
Ivar3 = np.zeros(Nt)
Ivar5 = np.zeros(Nt)
Ivar10 = np.zeros(Nt)
Ivar25 = np.zeros(Nt)
for i in range(Nt):
# /1000000 because of units
Ivar1[i] = integrate(var1, i,r, Nt)/1000000
Ivar3[i] = integrate(var3, i,r, Nt)/1000000
Ivar5[i] = integrate(var5, i,r, Nt)/1000000
Ivar10[i] = integrate(var10, i,r, Nt)/1000000
Ivar25[i] = integrate(var25, i,r, Nt)/1000000
mplt.plot(t, Ivar1, label=r'$\alpha = 1$')
mplt.plot(t, Ivar3, label=r'$\alpha = 3$')
mplt.plot(t, Ivar5, label=r'$\alpha = 5$')
mplt.plot(t, Ivar10, label=r'$\alpha = 10$')
mplt.plot(t, Ivar25, label=r'$\alpha = 25$')
mplt.xlim(tmin, tmax)
mplt.yscale('log')
mplt.xlabel(r'$t\quad [h]$')
mplt.ylabel(r'$\bar{'+mode+'}\quad [\mu mol]$')
##########################################################################################
# lim for w, because some values dont make sense
mplt.ylim(1e-11, 3e2)
# lim for w, because some values dont make sense
##########################################################################################
mplt.legend(loc=1, bbox_to_anchor=(1, 0.9))
mplt.tight_layout()
mplt.savefig('plots/integral/int'+mode+'.pdf', format='pdf')
mplt.show() | de | 0.781621 | ########################################################################################## # C O N F I G U R A T I O N ########################################################################################## # activate ylim for w # u or w ########################################################################################## # M A I N ########################################################################################## # /1000000 because of units ########################################################################################## # lim for w, because some values dont make sense # lim for w, because some values dont make sense ########################################################################################## | 1.839002 | 2 |
tests/unit/combiner/Try.py | wangjeaf/CSSCheckStyle | 21 | 9038 | <reponame>wangjeaf/CSSCheckStyle
from helper import *
def doTest():
msg = doCssFileCompress('_test.css')
equal(msg, '@import (url-here);.test,.test2,.test3,.test4,.test5{_width:100px;*height:100px}.test6{display:none;_width:100px;*height:100px}', 'totally compressed')
msg = doCssFileCompress('_test_different_order.css')
equal(msg, '.test1,.test2,.test3,.test4,.test5{*display:none;_display:inline-block;width:100px;height:200px;border:1px solid #FFF}', 'totally compressed')
msg = doCssFileCompress('_with_margin.css')
equal(msg, '.test,.test2,.test3,.test4,.test5{_width:100px;*height:100px;margin:20px 10px 10px}.test6{display:none;_width:100px;*height:100px}', 'margin compress ok')
msg = doCssFileCompress('_just_margin.css')
equal(msg, '.test,.test2,.test3,.test4{margin:20px 10px 10px}', 'just margin compress ok')
msg = doCssFileCompress('_with_padding.css')
equal(msg, '.test,.test2,.test3,.test4,.test5{_width:100px;*height:100px;padding:20px 10px 10px}.test6{display:none;_width:100px;*height:100px}', 'padding compress ok')
msg = doCssFileCompress('_just_padding.css')
equal(msg, '.test,.test2,.test3,.test4{padding:20px 10px 10px}', 'just padding compress ok')
| from helper import *
def doTest():
msg = doCssFileCompress('_test.css')
equal(msg, '@import (url-here);.test,.test2,.test3,.test4,.test5{_width:100px;*height:100px}.test6{display:none;_width:100px;*height:100px}', 'totally compressed')
msg = doCssFileCompress('_test_different_order.css')
equal(msg, '.test1,.test2,.test3,.test4,.test5{*display:none;_display:inline-block;width:100px;height:200px;border:1px solid #FFF}', 'totally compressed')
msg = doCssFileCompress('_with_margin.css')
equal(msg, '.test,.test2,.test3,.test4,.test5{_width:100px;*height:100px;margin:20px 10px 10px}.test6{display:none;_width:100px;*height:100px}', 'margin compress ok')
msg = doCssFileCompress('_just_margin.css')
equal(msg, '.test,.test2,.test3,.test4{margin:20px 10px 10px}', 'just margin compress ok')
msg = doCssFileCompress('_with_padding.css')
equal(msg, '.test,.test2,.test3,.test4,.test5{_width:100px;*height:100px;padding:20px 10px 10px}.test6{display:none;_width:100px;*height:100px}', 'padding compress ok')
msg = doCssFileCompress('_just_padding.css')
equal(msg, '.test,.test2,.test3,.test4{padding:20px 10px 10px}', 'just padding compress ok') | en | 0.364677 | #FFF}', 'totally compressed') | 2.149843 | 2 |
tests/tests.py | desdelgado/rheology-data-toolkit | 0 | 9039 | import sys, os
sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata/extractors")
import h5py
import pandas as pd
from antonpaar import AntonPaarExtractor as APE
from ARES_G2 import ARES_G2Extractor
# %%
sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata")
from data_converter import rheo_data_transformer
import unittest
extractor = APE()
#converter = data_converter()
class TestAntonPaar(unittest.TestCase):
def setUp(self):
self.multi_file_test = "C:/Users/Delgado/Documents/Research/rheology-data-toolkit/tests/test_data/Anton_Paar/excel_test_data/two_tests_Steady State Viscosity Curve-LO50C_excel.xlsx"
self.modified_dict, self.raw_data_dict, self.cols, self.units = extractor.import_rheo_data(self.multi_file_test)
# Inilize the class to convert
self.converter = rheo_data_transformer(self.modified_dict, self.raw_data_dict, self.cols, self.units)
self.converter.load_to_hdf("test")
def test_modified_output_isdictionary(self):
self.assertIsInstance(self.modified_dict, dict)
def test_modified_output_dictionary_contains_pandas(self):
""" Test if the output is a dictonary of pandas dataframes'"""
for value in self.modified_dict.values():
self.assertIsInstance(value, pd.DataFrame)
def test_raw_output_isdictionary(self):
self.assertIsInstance(self.raw_data_dict, dict)
def test_raw_output_dictionary_contains_pandas(self):
""" Test if the output is a dictonary of pandas dataframes'"""
for value in self.raw_data_dict.values():
self.assertIsInstance(value, pd.DataFrame)
def test_project_name_added_raw_data(self):
""" Test if the output is a dictonary of pandas dataframes'"""
for df in self.raw_data_dict.values():
self.assertEqual(df.iloc[0,0], "Project:")
def test_hdf5_created(self):
name, ext = os.path.splitext("test.hdf5")
self.assertEqual(ext, ".hdf5")
def test_project_subfolders_added(self):
f = h5py.File('test.hdf5', "r")
project_keys = list(f['Project'].keys())
f.close()
self.assertListEqual(project_keys, ['Steady State Viscosity Curve-75C','Steady State Viscosity Curve-LO80C', ])
def test_analyze_cols(self):
temp_df = extractor.make_analyze_dataframes(self.multi_file_test)
for test_key in temp_df.keys():
test_cols = list(temp_df[test_key].columns)
parsed_cols = list(self.cols[test_key])
self.assertListEqual(test_cols, parsed_cols)
# TODO Write test for saving a file
if __name__ == '__main__':
unittest.main()
| import sys, os
sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata/extractors")
import h5py
import pandas as pd
from antonpaar import AntonPaarExtractor as APE
from ARES_G2 import ARES_G2Extractor
# %%
sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata")
from data_converter import rheo_data_transformer
import unittest
extractor = APE()
#converter = data_converter()
class TestAntonPaar(unittest.TestCase):
def setUp(self):
self.multi_file_test = "C:/Users/Delgado/Documents/Research/rheology-data-toolkit/tests/test_data/Anton_Paar/excel_test_data/two_tests_Steady State Viscosity Curve-LO50C_excel.xlsx"
self.modified_dict, self.raw_data_dict, self.cols, self.units = extractor.import_rheo_data(self.multi_file_test)
# Inilize the class to convert
self.converter = rheo_data_transformer(self.modified_dict, self.raw_data_dict, self.cols, self.units)
self.converter.load_to_hdf("test")
def test_modified_output_isdictionary(self):
self.assertIsInstance(self.modified_dict, dict)
def test_modified_output_dictionary_contains_pandas(self):
""" Test if the output is a dictonary of pandas dataframes'"""
for value in self.modified_dict.values():
self.assertIsInstance(value, pd.DataFrame)
def test_raw_output_isdictionary(self):
self.assertIsInstance(self.raw_data_dict, dict)
def test_raw_output_dictionary_contains_pandas(self):
""" Test if the output is a dictonary of pandas dataframes'"""
for value in self.raw_data_dict.values():
self.assertIsInstance(value, pd.DataFrame)
def test_project_name_added_raw_data(self):
""" Test if the output is a dictonary of pandas dataframes'"""
for df in self.raw_data_dict.values():
self.assertEqual(df.iloc[0,0], "Project:")
def test_hdf5_created(self):
name, ext = os.path.splitext("test.hdf5")
self.assertEqual(ext, ".hdf5")
def test_project_subfolders_added(self):
f = h5py.File('test.hdf5', "r")
project_keys = list(f['Project'].keys())
f.close()
self.assertListEqual(project_keys, ['Steady State Viscosity Curve-75C','Steady State Viscosity Curve-LO80C', ])
def test_analyze_cols(self):
temp_df = extractor.make_analyze_dataframes(self.multi_file_test)
for test_key in temp_df.keys():
test_cols = list(temp_df[test_key].columns)
parsed_cols = list(self.cols[test_key])
self.assertListEqual(test_cols, parsed_cols)
# TODO Write test for saving a file
if __name__ == '__main__':
unittest.main()
| en | 0.529939 | # %% #converter = data_converter() # Inilize the class to convert Test if the output is a dictonary of pandas dataframes' Test if the output is a dictonary of pandas dataframes' Test if the output is a dictonary of pandas dataframes' # TODO Write test for saving a file | 2.457703 | 2 |
openslides_backend/action/topic/delete.py | reiterl/openslides-backend | 0 | 9040 | from ...models.models import Topic
from ..default_schema import DefaultSchema
from ..generics import DeleteAction
from ..register import register_action
@register_action("topic.delete")
class TopicDelete(DeleteAction):
"""
Action to delete simple topics that can be shown in the agenda.
"""
model = Topic()
schema = DefaultSchema(Topic()).get_delete_schema()
| from ...models.models import Topic
from ..default_schema import DefaultSchema
from ..generics import DeleteAction
from ..register import register_action
@register_action("topic.delete")
class TopicDelete(DeleteAction):
"""
Action to delete simple topics that can be shown in the agenda.
"""
model = Topic()
schema = DefaultSchema(Topic()).get_delete_schema()
| en | 0.92193 | Action to delete simple topics that can be shown in the agenda. | 2.112637 | 2 |
main.py | Dr3xler/CookieConsentChecker | 0 | 9041 | <filename>main.py
from core import file_handling as file_h, driver_handling as driver_h
from website_handling import website_check as wc
from cookie_handling import cookie_compare
websites = file_h.website_reader()
driver = driver_h.webdriver_setup()
try:
wc.load_with_addon(driver, websites)
except:
print('ERROR: IN FIREFOX USAGE WITH ADDONS')
finally:
wc.close_driver_session(driver)
# driver need to be reloaded because we need a new session without addons
driver = driver_h.webdriver_setup()
try:
wc.load_without_addon(driver, websites)
except:
print('ERROR: IN VANILLA FIREFOX VERSION')
finally:
wc.close_driver_session(driver)
cookie_compare.compare(websites)
| <filename>main.py
from core import file_handling as file_h, driver_handling as driver_h
from website_handling import website_check as wc
from cookie_handling import cookie_compare
websites = file_h.website_reader()
driver = driver_h.webdriver_setup()
try:
wc.load_with_addon(driver, websites)
except:
print('ERROR: IN FIREFOX USAGE WITH ADDONS')
finally:
wc.close_driver_session(driver)
# driver need to be reloaded because we need a new session without addons
driver = driver_h.webdriver_setup()
try:
wc.load_without_addon(driver, websites)
except:
print('ERROR: IN VANILLA FIREFOX VERSION')
finally:
wc.close_driver_session(driver)
cookie_compare.compare(websites)
| en | 0.90483 | # driver need to be reloaded because we need a new session without addons | 2.324811 | 2 |
PyPBEC/OpticalMedium.py | photonbec/PyPBEC | 1 | 9042 | <reponame>photonbec/PyPBEC
import numpy as np
from scipy import constants as sc
from scipy.interpolate import interp1d
from pathlib import Path
from scipy.special import erf as Erf
import pandas as pd
import sys
import os
import csv
class OpticalMedium():
available_media = list()
available_media.append("Rhodamine6G")
def __init__(self, optical_medium):
"""
Initiazies an optical medium object.
Parameters:
optical_medium (str): Optical medium
"""
if not type(optical_medium) == str:
raise Exception("optical_medium is expected to be a string")
if not optical_medium in self.available_media:
raise Exception(optical_medium+" is an unknown optical medium")
if optical_medium == "Rhodamine6G":
self.medium = Rhodamine6G()
def get_rates(self, lambdas, **kwargs):
"""
Calculates the rates of absorption and emission, for a specific optical medium.
Parameters:
lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters
other medium specific arguments
"""
return self.medium.get_rates(lambdas=lambdas, **kwargs)
class Rhodamine6G(OpticalMedium):
def __init__(self):
pass
def get_rates(self, lambdas, dye_concentration, n):
"""
Rates for Rhodamine 6G
Parameters:
lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters
dye_concentration (float): In mM (milimolar) 1 mM = 1 mol / m^3
n (float): index of refraction
"""
# absorption data
min_wavelength = 480
max_wavelength = 650
absorption_spectrum_datafile = Path("data") / 'absorption_cross_sections_R6G_in_EthyleneGlycol_corrected.csv'
absorption_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / absorption_spectrum_datafile
raw_data2 = pd.read_csv(absorption_spectrum_datafile)
initial_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
raw_data2 = raw_data2.iloc[initial_index:].reset_index(drop=True)
final_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
raw_data2 = raw_data2.iloc[:final_index].reset_index(drop=True)
absorption_data = raw_data2
absorption_data_normalized = absorption_data['absorption cross-section (m^2)'].values / np.max(absorption_data['absorption cross-section (m^2)'].values)
absorption_spectrum = np.squeeze(np.array([[absorption_data['wavelength (nm)'].values], [absorption_data_normalized]], dtype=float))
interpolated_absorption_spectrum = interp1d(absorption_spectrum[0,:], absorption_spectrum[1,:], kind='cubic')
# emission data
fluorescence_spectrum_datafile = Path("data") / 'fluorescence_spectrum_R6G_in_EthyleneGlycol_corrected.csv'
fluorescence_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / fluorescence_spectrum_datafile
raw_data = pd.read_csv(fluorescence_spectrum_datafile)
initial_index = raw_data.iloc[(raw_data['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
raw_data = raw_data.iloc[initial_index:].reset_index(drop=True)
final_index = raw_data.iloc[(raw_data['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
raw_data = raw_data.iloc[:final_index].reset_index(drop=True)
fluorescence_data = raw_data
fluorescence_data_normalized = fluorescence_data['fluorescence (arb. units)'].values / np.max(fluorescence_data['fluorescence (arb. units)'].values)
emission_spectrum = np.squeeze(np.array([[fluorescence_data['wavelength (nm)'].values], [fluorescence_data_normalized]], dtype=float))
interpolated_emission_spectrum = interp1d(emission_spectrum[0,:], emission_spectrum[1,:], kind='cubic')
# Uses both datasets
if np.min(1e9*np.array(lambdas)) < 480 or np.max(1e9*np.array(lambdas)) > 650:
raise Exception('*** Restrict wavelength to the range between 480 and 650 nm ***')
temperature = 300
lamZPL = 545e-9
n_mol_per_vol= dye_concentration*sc.Avogadro
peak_Xsectn = 2.45e-20*n_mol_per_vol*sc.c/n
wpzl = 2*np.pi*sc.c/lamZPL/1e12
def freq(wl):
return 2*np.pi*sc.c/wl/1e12
def single_exp_func(det):
f_p = 2*np.pi*sc.c/(wpzl+det)*1e-3
f_m = 2*np.pi*sc.c/(wpzl-det)*1e-3
return (0.5*interpolated_absorption_spectrum(f_p)) + (0.5*interpolated_emission_spectrum(f_m))
def Err(det):
return Erf(det*1e12)
def single_adjust_func(det):
return ((1+Err(det))/2.0*single_exp_func(det)) + ((1-Err(det))/2.0*single_exp_func(-1.0*det)*np.exp(sc.h/(2*np.pi*sc.k*temperature)*det*1e12))
emission_rates = np.array([single_adjust_func(-1.0*freq(a_l)+wpzl) for a_l in lambdas])*peak_Xsectn
absorption_rates = np.array([single_adjust_func(freq(a_l)-wpzl) for a_l in lambdas])*peak_Xsectn
return absorption_rates, emission_rates | import numpy as np
from scipy import constants as sc
from scipy.interpolate import interp1d
from pathlib import Path
from scipy.special import erf as Erf
import pandas as pd
import sys
import os
import csv
class OpticalMedium():
available_media = list()
available_media.append("Rhodamine6G")
def __init__(self, optical_medium):
"""
Initiazies an optical medium object.
Parameters:
optical_medium (str): Optical medium
"""
if not type(optical_medium) == str:
raise Exception("optical_medium is expected to be a string")
if not optical_medium in self.available_media:
raise Exception(optical_medium+" is an unknown optical medium")
if optical_medium == "Rhodamine6G":
self.medium = Rhodamine6G()
def get_rates(self, lambdas, **kwargs):
"""
Calculates the rates of absorption and emission, for a specific optical medium.
Parameters:
lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters
other medium specific arguments
"""
return self.medium.get_rates(lambdas=lambdas, **kwargs)
class Rhodamine6G(OpticalMedium):
def __init__(self):
pass
def get_rates(self, lambdas, dye_concentration, n):
"""
Rates for Rhodamine 6G
Parameters:
lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters
dye_concentration (float): In mM (milimolar) 1 mM = 1 mol / m^3
n (float): index of refraction
"""
# absorption data
min_wavelength = 480
max_wavelength = 650
absorption_spectrum_datafile = Path("data") / 'absorption_cross_sections_R6G_in_EthyleneGlycol_corrected.csv'
absorption_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / absorption_spectrum_datafile
raw_data2 = pd.read_csv(absorption_spectrum_datafile)
initial_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
raw_data2 = raw_data2.iloc[initial_index:].reset_index(drop=True)
final_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
raw_data2 = raw_data2.iloc[:final_index].reset_index(drop=True)
absorption_data = raw_data2
absorption_data_normalized = absorption_data['absorption cross-section (m^2)'].values / np.max(absorption_data['absorption cross-section (m^2)'].values)
absorption_spectrum = np.squeeze(np.array([[absorption_data['wavelength (nm)'].values], [absorption_data_normalized]], dtype=float))
interpolated_absorption_spectrum = interp1d(absorption_spectrum[0,:], absorption_spectrum[1,:], kind='cubic')
# emission data
fluorescence_spectrum_datafile = Path("data") / 'fluorescence_spectrum_R6G_in_EthyleneGlycol_corrected.csv'
fluorescence_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / fluorescence_spectrum_datafile
raw_data = pd.read_csv(fluorescence_spectrum_datafile)
initial_index = raw_data.iloc[(raw_data['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
raw_data = raw_data.iloc[initial_index:].reset_index(drop=True)
final_index = raw_data.iloc[(raw_data['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
raw_data = raw_data.iloc[:final_index].reset_index(drop=True)
fluorescence_data = raw_data
fluorescence_data_normalized = fluorescence_data['fluorescence (arb. units)'].values / np.max(fluorescence_data['fluorescence (arb. units)'].values)
emission_spectrum = np.squeeze(np.array([[fluorescence_data['wavelength (nm)'].values], [fluorescence_data_normalized]], dtype=float))
interpolated_emission_spectrum = interp1d(emission_spectrum[0,:], emission_spectrum[1,:], kind='cubic')
# Uses both datasets
if np.min(1e9*np.array(lambdas)) < 480 or np.max(1e9*np.array(lambdas)) > 650:
raise Exception('*** Restrict wavelength to the range between 480 and 650 nm ***')
temperature = 300
lamZPL = 545e-9
n_mol_per_vol= dye_concentration*sc.Avogadro
peak_Xsectn = 2.45e-20*n_mol_per_vol*sc.c/n
wpzl = 2*np.pi*sc.c/lamZPL/1e12
def freq(wl):
return 2*np.pi*sc.c/wl/1e12
def single_exp_func(det):
f_p = 2*np.pi*sc.c/(wpzl+det)*1e-3
f_m = 2*np.pi*sc.c/(wpzl-det)*1e-3
return (0.5*interpolated_absorption_spectrum(f_p)) + (0.5*interpolated_emission_spectrum(f_m))
def Err(det):
return Erf(det*1e12)
def single_adjust_func(det):
return ((1+Err(det))/2.0*single_exp_func(det)) + ((1-Err(det))/2.0*single_exp_func(-1.0*det)*np.exp(sc.h/(2*np.pi*sc.k*temperature)*det*1e12))
emission_rates = np.array([single_adjust_func(-1.0*freq(a_l)+wpzl) for a_l in lambdas])*peak_Xsectn
absorption_rates = np.array([single_adjust_func(freq(a_l)-wpzl) for a_l in lambdas])*peak_Xsectn
return absorption_rates, emission_rates | en | 0.735826 | Initiazies an optical medium object. Parameters: optical_medium (str): Optical medium Calculates the rates of absorption and emission, for a specific optical medium. Parameters: lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters other medium specific arguments Rates for Rhodamine 6G Parameters: lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters dye_concentration (float): In mM (milimolar) 1 mM = 1 mol / m^3 n (float): index of refraction # absorption data # emission data # Uses both datasets | 2.949172 | 3 |
corehq/apps/appstore/urls.py | dslowikowski/commcare-hq | 1 | 9043 | from django.conf.urls.defaults import url, include, patterns
from corehq.apps.appstore.dispatcher import AppstoreDispatcher
store_urls = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore_default', name="appstore_interfaces_default"),
AppstoreDispatcher.url_pattern(),
)
urlpatterns = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore', name='appstore'),
url(r'^api/', 'appstore_api', name='appstore_api'),
url(r'^store/', include(store_urls)),
url(r'^(?P<domain>[\w\.-]+)/info/$', 'project_info', name='project_info'),
url(r'^deployments/$', 'deployments', name='deployments'),
url(r'^deployments/api/$', 'deployments_api', name='deployments_api'),
url(r'^deployments/(?P<domain>[\w\.-]+)/info/$', 'deployment_info', name='deployment_info'),
url(r'^(?P<domain>[\w\.-]+)/approve/$', 'approve_app', name='approve_appstore_app'),
url(r'^(?P<domain>[\w\.-]+)/copy/$', 'copy_snapshot', name='domain_copy_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/importapp/$', 'import_app', name='import_app_from_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/image/$', 'project_image', name='appstore_project_image'),
url(r'^(?P<domain>[\w\.-]+)/multimedia/$', 'media_files', name='media_files'),
)
| from django.conf.urls.defaults import url, include, patterns
from corehq.apps.appstore.dispatcher import AppstoreDispatcher
store_urls = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore_default', name="appstore_interfaces_default"),
AppstoreDispatcher.url_pattern(),
)
urlpatterns = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore', name='appstore'),
url(r'^api/', 'appstore_api', name='appstore_api'),
url(r'^store/', include(store_urls)),
url(r'^(?P<domain>[\w\.-]+)/info/$', 'project_info', name='project_info'),
url(r'^deployments/$', 'deployments', name='deployments'),
url(r'^deployments/api/$', 'deployments_api', name='deployments_api'),
url(r'^deployments/(?P<domain>[\w\.-]+)/info/$', 'deployment_info', name='deployment_info'),
url(r'^(?P<domain>[\w\.-]+)/approve/$', 'approve_app', name='approve_appstore_app'),
url(r'^(?P<domain>[\w\.-]+)/copy/$', 'copy_snapshot', name='domain_copy_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/importapp/$', 'import_app', name='import_app_from_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/image/$', 'project_image', name='appstore_project_image'),
url(r'^(?P<domain>[\w\.-]+)/multimedia/$', 'media_files', name='media_files'),
)
| none | 1 | 1.955226 | 2 |
|
faster-rcnn-vgg16-fpn/model/fpn.py | fengkaibit/faster-rcnn_vgg16_fpn | 13 | 9044 | from __future__ import absolute_import
import torch
from torch.nn import functional
class FPN(torch.nn.Module):
def __init__(self, out_channels):
super(FPN, self).__init__()
self.out_channels = out_channels
self.P5 = torch.nn.MaxPool2d(kernel_size=1, stride=2, padding=0)
self.P4_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P4_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P3_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P3_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P2_conv1 = torch.nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P2_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
normal_init(self.P4_conv1, 0, 0.01)
normal_init(self.P4_conv2, 0, 0.01)
normal_init(self.P3_conv1, 0, 0.01)
normal_init(self.P3_conv2, 0, 0.01)
normal_init(self.P2_conv1, 0, 0.01)
normal_init(self.P2_conv2, 0, 0.01)
def forward(self, C2, C3, C4):
p4_out = self.P4_conv1(C4)
p5_out = self.P5(p4_out)
p3_out = self._upsample_add(p4_out, self.P3_conv1(C3))
p2_out = self._upsample_add(p3_out, self.P2_conv1(C2))
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
return p2_out, p3_out, p4_out, p5_out
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return functional.interpolate(x, size=(H,W), mode='bilinear') + y
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_() | from __future__ import absolute_import
import torch
from torch.nn import functional
class FPN(torch.nn.Module):
def __init__(self, out_channels):
super(FPN, self).__init__()
self.out_channels = out_channels
self.P5 = torch.nn.MaxPool2d(kernel_size=1, stride=2, padding=0)
self.P4_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P4_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P3_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P3_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P2_conv1 = torch.nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P2_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
normal_init(self.P4_conv1, 0, 0.01)
normal_init(self.P4_conv2, 0, 0.01)
normal_init(self.P3_conv1, 0, 0.01)
normal_init(self.P3_conv2, 0, 0.01)
normal_init(self.P2_conv1, 0, 0.01)
normal_init(self.P2_conv2, 0, 0.01)
def forward(self, C2, C3, C4):
p4_out = self.P4_conv1(C4)
p5_out = self.P5(p4_out)
p3_out = self._upsample_add(p4_out, self.P3_conv1(C3))
p2_out = self._upsample_add(p3_out, self.P2_conv1(C2))
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
return p2_out, p3_out, p4_out, p5_out
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return functional.interpolate(x, size=(H,W), mode='bilinear') + y
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_() | en | 0.745157 | Upsample and add two feature maps. Args: x: (Variable) top feature map to be upsampled. y: (Variable) lateral feature map. Returns: (Variable) added feature map. Note in PyTorch, when input size is odd, the upsampled feature map with `F.upsample(..., scale_factor=2, mode='nearest')` maybe not equal to the lateral feature map size. e.g. original input size: [N,_,15,15] -> conv2d feature map size: [N,_,8,8] -> upsampled feature map size: [N,_,16,16] So we choose bilinear upsample which supports arbitrary output sizes. weight initalizer: truncated normal and random normal. # x is a parameter # not a perfect approximation | 2.384017 | 2 |
test/setups/finders/finders_test.py | bowlofstew/client | 40 | 9045 | <reponame>bowlofstew/client
import unittest
from biicode.common.settings.version import Version
from mock import patch
from biicode.client.setups.finders.finders import gnu_version
from biicode.client.setups.rpi_cross_compiler import find_gnu_arm
from biicode.client.workspace.bii_paths import get_biicode_env_folder_path
GCC_VERSION_MAC = '''Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/usr/include/c++/4.2.1
Apple LLVM version 5.1 (clang-503.0.38) (based on LLVM 3.4svn)
Target: x86_64-apple-darwin13.1.0
Thread model: posix'''
GCC_VERSION_UBUNTU = '''gcc (Ubuntu/Linaro 4.8.1-10ubuntu9) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
GCC_VERSION_WIN = '''gcc (GCC) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'''
class FindersTest(unittest.TestCase):
@patch('biicode.client.setups.finders.finders.execute')
def test_gnu_version_detection(self, execute_mock):
execute_mock.return_value = ("", GCC_VERSION_MAC)
self.assertEquals(gnu_version('gnu'), Version('4.2.1'))
execute_mock.return_value = ("", GCC_VERSION_UBUNTU)
self.assertEquals(gnu_version('gnu'), Version('4.8.1'))
execute_mock.return_value = ("", GCC_VERSION_WIN)
self.assertEquals(gnu_version('gnu'), Version('4.8.1'))
@patch('os.path.exists')
def test_find_gnu_arm(self, exists):
exists.return_value = False
self.assertEqual((None, None), find_gnu_arm())
exists.return_value = True
c_path, cpp_path = find_gnu_arm()
inst_path = get_biicode_env_folder_path().replace('\\', '/')
c_path = c_path.replace('\\', '/')
cpp_path = cpp_path.replace('\\', '/')
inst_path = '%s/raspberry_cross_compilers/arm-bcm2708/'\
'arm-bcm2708hardfp-linux-gnueabi/bin/'\
'arm-bcm2708hardfp-linux-gnueabi' % inst_path
self.assertTrue(cpp_path.endswith('%s-g++' % inst_path))
self.assertTrue(c_path.endswith('%s-gcc' % inst_path))
| import unittest
from biicode.common.settings.version import Version
from mock import patch
from biicode.client.setups.finders.finders import gnu_version
from biicode.client.setups.rpi_cross_compiler import find_gnu_arm
from biicode.client.workspace.bii_paths import get_biicode_env_folder_path
GCC_VERSION_MAC = '''Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/usr/include/c++/4.2.1
Apple LLVM version 5.1 (clang-503.0.38) (based on LLVM 3.4svn)
Target: x86_64-apple-darwin13.1.0
Thread model: posix'''
GCC_VERSION_UBUNTU = '''gcc (Ubuntu/Linaro 4.8.1-10ubuntu9) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
GCC_VERSION_WIN = '''gcc (GCC) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'''
class FindersTest(unittest.TestCase):
@patch('biicode.client.setups.finders.finders.execute')
def test_gnu_version_detection(self, execute_mock):
execute_mock.return_value = ("", GCC_VERSION_MAC)
self.assertEquals(gnu_version('gnu'), Version('4.2.1'))
execute_mock.return_value = ("", GCC_VERSION_UBUNTU)
self.assertEquals(gnu_version('gnu'), Version('4.8.1'))
execute_mock.return_value = ("", GCC_VERSION_WIN)
self.assertEquals(gnu_version('gnu'), Version('4.8.1'))
@patch('os.path.exists')
def test_find_gnu_arm(self, exists):
exists.return_value = False
self.assertEqual((None, None), find_gnu_arm())
exists.return_value = True
c_path, cpp_path = find_gnu_arm()
inst_path = get_biicode_env_folder_path().replace('\\', '/')
c_path = c_path.replace('\\', '/')
cpp_path = cpp_path.replace('\\', '/')
inst_path = '%s/raspberry_cross_compilers/arm-bcm2708/'\
'arm-bcm2708hardfp-linux-gnueabi/bin/'\
'arm-bcm2708hardfp-linux-gnueabi' % inst_path
self.assertTrue(cpp_path.endswith('%s-g++' % inst_path))
self.assertTrue(c_path.endswith('%s-gcc' % inst_path)) | en | 0.652458 | Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/usr/include/c++/4.2.1 Apple LLVM version 5.1 (clang-503.0.38) (based on LLVM 3.4svn) Target: x86_64-apple-darwin13.1.0 Thread model: posix gcc (Ubuntu/Linaro 4.8.1-10ubuntu9) 4.8.1 Copyright (C) 2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. gcc (GCC) 4.8.1 Copyright (C) 2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 1.86705 | 2 |
setup.py | mintmachine/arweave-python-client | 63 | 9046 | from distutils.core import setup
setup(
name="arweave-python-client",
packages = ['arweave'], # this must be the same as the name above
version="1.0.15.dev0",
description="Client interface for sending transactions on the Arweave permaweb",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/MikeHibbert/arweave-python-client",
download_url="https://github.com/MikeHibbert/arweave-python-client",
keywords=['arweave', 'crypto'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'arrow',
'python-jose',
'pynacl',
'pycryptodome',
'cryptography',
'requests',
'psutil'
],
)
| from distutils.core import setup
setup(
name="arweave-python-client",
packages = ['arweave'], # this must be the same as the name above
version="1.0.15.dev0",
description="Client interface for sending transactions on the Arweave permaweb",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/MikeHibbert/arweave-python-client",
download_url="https://github.com/MikeHibbert/arweave-python-client",
keywords=['arweave', 'crypto'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'arrow',
'python-jose',
'pynacl',
'pycryptodome',
'cryptography',
'requests',
'psutil'
],
)
| en | 0.955069 | # this must be the same as the name above | 1.299459 | 1 |
exchange_calendars/extensions/exchange_calendar_krx.py | syonoki/exchange_calendars | 0 | 9047 | <reponame>syonoki/exchange_calendars<filename>exchange_calendars/extensions/exchange_calendar_krx.py
"""
Last update: 2018-10-26
"""
from exchange_calendars.extensions.calendar_extension import ExtendedExchangeCalendar
from pandas import (
Timestamp,
)
from pandas.tseries.holiday import (
Holiday,
previous_friday,
)
from exchange_calendars.exchange_calendar import HolidayCalendar
from datetime import time
from itertools import chain
from pytz import timezone
KRNewYearsDay = Holiday(
'New Years Day',
month=1,
day=1)
KRIndependenceDay = Holiday(
'Independence Day',
month=3,
day=1
)
KRArbourDay = Holiday(
'Arbour Day',
month=4,
day=5,
end_date=Timestamp('2006-01-01'),
)
KRLabourDay = Holiday(
'Labour Day',
month=5,
day=1
)
KRChildrensDay = Holiday(
'Labour Day',
month=5,
day=5
)
# 현충일
KRMemorialDay = Holiday(
'Memorial Day',
month=6,
day=6
)
# 제헌절
KRConstitutionDay = Holiday(
'Constitution Day',
month=7,
day=17,
end_date=Timestamp('2008-01-01')
)
# 광복절
KRLiberationDay = Holiday(
'Liberation Day',
month=8,
day=15
)
# 개천절
KRNationalFoundationDay = Holiday(
'NationalFoundationDay',
month=10,
day=3
)
Christmas = Holiday(
'Christmas',
month=12,
day=25
)
# 한글날
KRHangulProclamationDay = Holiday(
'Hangul Proclamation Day',
month=10,
day=9,
start_date=Timestamp('2013-01-01')
)
# KRX 연말 휴장
KRXEndOfYearClosing = Holiday(
'KRX Year-end Closing',
month=12,
day=31,
observance=previous_friday,
start_date=Timestamp('2001-01-01')
)
KRXEndOfYearClosing2000 = [
Timestamp('2000-12-27', tz='UTC'),
Timestamp('2000-12-28', tz='UTC'),
Timestamp('2000-12-29', tz='UTC'),
Timestamp('2000-12-30', tz='UTC'),
]
# Lunar New Year
KRLunarNewYear = [
# 2000
Timestamp('2000-02-04', tz='UTC'),
# 2001
Timestamp('2001-01-23', tz='UTC'),
Timestamp('2001-01-24', tz='UTC'),
Timestamp('2001-01-25', tz='UTC'),
# 2002
Timestamp('2002-02-11', tz='UTC'),
Timestamp('2002-02-12', tz='UTC'),
Timestamp('2002-02-13', tz='UTC'),
# 2003
Timestamp('2003-01-31', tz='UTC'),
# 2004
Timestamp('2004-01-21', tz='UTC'),
Timestamp('2004-01-22', tz='UTC'),
Timestamp('2004-01-23', tz='UTC'),
# 2005
Timestamp('2005-02-08', tz='UTC'),
Timestamp('2005-02-09', tz='UTC'),
Timestamp('2005-02-10', tz='UTC'),
# 2006
Timestamp('2006-01-28', tz='UTC'),
Timestamp('2006-01-29', tz='UTC'),
Timestamp('2006-01-30', tz='UTC'),
# 2007
Timestamp('2007-02-19', tz='UTC'),
# 2008
Timestamp('2008-02-06', tz='UTC'),
Timestamp('2008-02-07', tz='UTC'),
Timestamp('2008-02-08', tz='UTC'),
# 2009
Timestamp('2009-01-25', tz='UTC'),
Timestamp('2009-01-26', tz='UTC'),
Timestamp('2009-01-27', tz='UTC'),
# 2010
Timestamp('2010-02-13', tz='UTC'),
Timestamp('2010-02-14', tz='UTC'),
Timestamp('2010-02-15', tz='UTC'),
# 2011
Timestamp('2011-02-02', tz='UTC'),
Timestamp('2011-02-03', tz='UTC'),
Timestamp('2011-02-04', tz='UTC'),
# 2012
Timestamp('2012-01-23', tz='UTC'),
Timestamp('2012-01-24', tz='UTC'),
# 2013
Timestamp('2013-02-11', tz='UTC'),
# 2014
Timestamp('2014-01-30', tz='UTC'),
Timestamp('2014-01-31', tz='UTC'),
# 2015
Timestamp('2015-02-18', tz='UTC'),
Timestamp('2015-02-19', tz='UTC'),
Timestamp('2015-02-20', tz='UTC'),
# 2016
Timestamp('2016-02-07', tz='UTC'),
Timestamp('2016-02-08', tz='UTC'),
Timestamp('2016-02-09', tz='UTC'),
Timestamp('2016-02-10', tz='UTC'),
# 2017
Timestamp('2017-01-27', tz='UTC'),
Timestamp('2017-01-28', tz='UTC'),
Timestamp('2017-01-29', tz='UTC'),
Timestamp('2017-01-30', tz='UTC'),
# 2018
Timestamp('2018-02-15', tz='UTC'),
Timestamp('2018-02-16', tz='UTC'),
Timestamp('2018-02-17', tz='UTC'),
# 2019
Timestamp('2019-02-04', tz='UTC'),
Timestamp('2019-02-05', tz='UTC'),
Timestamp('2019-02-06', tz='UTC'),
# 2020
Timestamp('2020-01-24', tz='UTC'),
Timestamp('2020-01-25', tz='UTC'),
Timestamp('2020-01-26', tz='UTC'),
Timestamp('2020-01-27', tz='UTC'),
# 2021
Timestamp('2021-02-11', tz='UTC'),
Timestamp('2021-02-12', tz='UTC'),
# 2022
Timestamp('2022-01-31', tz='UTC'),
Timestamp('2022-02-01', tz='UTC'),
Timestamp('2022-02-02', tz='UTC'),
]
# Election Days
KRElectionDays = [
Timestamp('2000-04-13', tz='UTC'), # National Assembly
Timestamp('2002-06-13', tz='UTC'), # Regional election
Timestamp('2002-12-19', tz='UTC'), # Presidency
Timestamp('2004-04-15', tz='UTC'), # National Assembly
Timestamp('2006-05-31', tz='UTC'), # Regional election
Timestamp('2007-12-19', tz='UTC'), # Presidency
Timestamp('2008-04-09', tz='UTC'), # National Assembly
Timestamp('2010-06-02', tz='UTC'), # Local election
Timestamp('2012-04-11', tz='UTC'), # National Assembly
Timestamp('2012-12-19', tz='UTC'), # Presidency
Timestamp('2014-06-04', tz='UTC'), # Local election
Timestamp('2016-04-13', tz='UTC'), # National Assembly
Timestamp('2017-05-09', tz='UTC'), # Presidency
Timestamp('2018-06-13', tz='UTC'), # Local election
Timestamp('2020-04-15', tz='UTC'), # National Assembly
Timestamp('2022-03-09', tz='UTC'), # Presidency
Timestamp('2022-06-01', tz='UTC'), # Local election
]
# Buddha's birthday
KRBuddhasBirthday = [
Timestamp('2000-05-11', tz='UTC'),
Timestamp('2001-05-01', tz='UTC'),
Timestamp('2003-05-08', tz='UTC'),
Timestamp('2004-05-26', tz='UTC'),
Timestamp('2005-05-15', tz='UTC'),
Timestamp('2006-05-05', tz='UTC'),
Timestamp('2007-05-24', tz='UTC'),
Timestamp('2008-05-12', tz='UTC'),
Timestamp('2009-05-02', tz='UTC'),
Timestamp('2010-05-21', tz='UTC'),
Timestamp('2011-05-10', tz='UTC'),
Timestamp('2012-05-28', tz='UTC'),
Timestamp('2013-05-17', tz='UTC'),
Timestamp('2014-05-06', tz='UTC'),
Timestamp('2015-05-25', tz='UTC'),
Timestamp('2016-05-14', tz='UTC'),
Timestamp('2017-05-03', tz='UTC'),
Timestamp('2018-05-22', tz='UTC'),
Timestamp('2020-04-30', tz='UTC'),
Timestamp('2021-05-19', tz='UTC'),
]
# Harvest Moon Day
KRHarvestMoonDay = [
# 2000
Timestamp('2000-09-11', tz='UTC'),
Timestamp('2000-09-12', tz='UTC'),
Timestamp('2000-09-13', tz='UTC'),
# 2001
Timestamp('2001-10-01', tz='UTC'),
Timestamp('2001-10-02', tz='UTC'),
# 2002
Timestamp('2002-09-20', tz='UTC'),
# 2003
Timestamp('2003-09-10', tz='UTC'),
Timestamp('2003-09-11', tz='UTC'),
Timestamp('2003-09-12', tz='UTC'),
# 2004
Timestamp('2004-09-27', tz='UTC'),
Timestamp('2004-09-28', tz='UTC'),
Timestamp('2004-09-29', tz='UTC'),
# 2005
Timestamp('2005-09-17', tz='UTC'),
Timestamp('2005-09-18', tz='UTC'),
Timestamp('2005-09-19', tz='UTC'),
# 2006
Timestamp('2006-10-05', tz='UTC'),
Timestamp('2006-10-06', tz='UTC'),
Timestamp('2006-10-07', tz='UTC'),
# 2007
Timestamp('2007-09-24', tz='UTC'),
Timestamp('2007-09-25', tz='UTC'),
Timestamp('2007-09-26', tz='UTC'),
# 2008
Timestamp('2008-09-13', tz='UTC'),
Timestamp('2008-09-14', tz='UTC'),
Timestamp('2008-09-15', tz='UTC'),
# 2009
Timestamp('2009-10-02', tz='UTC'),
Timestamp('2009-10-03', tz='UTC'),
Timestamp('2009-10-04', tz='UTC'),
# 2010
Timestamp('2010-09-21', tz='UTC'),
Timestamp('2010-09-22', tz='UTC'),
Timestamp('2010-09-23', tz='UTC'),
# 2011
Timestamp('2011-09-12', tz='UTC'),
Timestamp('2011-09-13', tz='UTC'),
# 2012
Timestamp('2012-10-01', tz='UTC'),
# 2013
Timestamp('2013-09-18', tz='UTC'),
Timestamp('2013-09-19', tz='UTC'),
Timestamp('2013-09-20', tz='UTC'),
# 2014
Timestamp('2014-09-08', tz='UTC'),
Timestamp('2014-09-09', tz='UTC'),
Timestamp('2014-09-10', tz='UTC'),
# 2015
Timestamp('2015-09-28', tz='UTC'),
Timestamp('2015-09-29', tz='UTC'),
# 2016
Timestamp('2016-09-14', tz='UTC'),
Timestamp('2016-09-15', tz='UTC'),
Timestamp('2016-09-16', tz='UTC'),
# 2017
Timestamp('2017-10-03', tz='UTC'),
Timestamp('2017-10-04', tz='UTC'),
Timestamp('2017-10-05', tz='UTC'),
Timestamp('2017-10-06', tz='UTC'),
# 2018
Timestamp('2018-09-23', tz='UTC'),
Timestamp('2018-09-24', tz='UTC'),
Timestamp('2018-09-25', tz='UTC'),
Timestamp('2018-09-26', tz='UTC'),
# 2019
Timestamp('2019-09-12', tz='UTC'),
Timestamp('2019-09-13', tz='UTC'),
# 2020
Timestamp('2020-09-30', tz='UTC'),
Timestamp('2020-10-01', tz='UTC'),
Timestamp('2020-10-02', tz='UTC'),
# 2021
Timestamp('2021-09-20', tz='UTC'),
Timestamp('2021-09-21', tz='UTC'),
Timestamp('2021-09-22', tz='UTC'),
# 2022
Timestamp('2022-09-09', tz='UTC'),
Timestamp('2022-09-12', tz='UTC'), # 대체휴일
]
# 대체휴일
KRSubstitutionHolidayForChildrensDay2018 = [
Timestamp('2018-05-07', tz='UTC')
]
# 임시공휴일
KRCelebrationForWorldCupHosting = [
Timestamp('2002-07-01', tz='UTC')
]
KRSeventyYearsFromIndependenceDay = [
Timestamp('2015-08-14', tz='UTC')
]
KRTemporaryHolidayForChildrensDay2016 = [
Timestamp('2016-05-06', tz='UTC')
]
KRTemporaryHolidayForHarvestMoonDay2017 = [
Timestamp('2017-10-02', tz='UTC')
]
KRTemporaryHolidayForChildrenDay2018 = [
Timestamp('2018-05-07', tz='UTC')
]
KRTemporaryHolidayForChildrenDay2019 = [
Timestamp('2019-05-06', tz='UTC')
]
KRTemporaryHolidayForLiberationDay2020 = [
Timestamp('2020-08-17', tz='UTC')
]
KRTemporaryHoliday2021 = [
Timestamp('2021-08-16', tz='UTC'), # 광복절 대체휴일
Timestamp('2021-10-04', tz='UTC'), # 개천절 대체휴일
Timestamp('2021-10-11', tz='UTC'), # 한글날 대체휴일
]
KRTemporaryHoliday2022 = [
Timestamp('2022-10-10', tz='UTC'), # 한글날 대체휴일
]
# 잘 모르겠는 휴장일
HolidaysNeedToCheck = [
Timestamp('2000-01-03', tz='UTC')
]
HolidaysBefore1999 = [
Timestamp('1990-01-01', tz='UTC'),
Timestamp('1990-01-02', tz='UTC'),
Timestamp('1990-01-03', tz='UTC'),
Timestamp('1990-01-29', tz='UTC'),
Timestamp('1990-03-01', tz='UTC'),
Timestamp('1990-04-05', tz='UTC'),
Timestamp('1990-05-02', tz='UTC'),
Timestamp('1990-06-06', tz='UTC'),
Timestamp('1990-07-17', tz='UTC'),
Timestamp('1990-08-15', tz='UTC'),
Timestamp('1990-09-03', tz='UTC'),
Timestamp('1990-10-01', tz='UTC'),
Timestamp('1990-10-03', tz='UTC'),
Timestamp('1990-10-09', tz='UTC'),
Timestamp('1990-12-25', tz='UTC'),
Timestamp('1991-01-01', tz='UTC'),
Timestamp('1991-01-02', tz='UTC'),
Timestamp('1991-02-14', tz='UTC'),
Timestamp('1991-02-15', tz='UTC'),
Timestamp('1991-03-01', tz='UTC'),
Timestamp('1991-04-05', tz='UTC'),
Timestamp('1991-05-21', tz='UTC'),
Timestamp('1991-06-06', tz='UTC'),
Timestamp('1991-07-17', tz='UTC'),
Timestamp('1991-08-15', tz='UTC'),
Timestamp('1991-09-23', tz='UTC'),
Timestamp('1991-10-03', tz='UTC'),
Timestamp('1991-12-25', tz='UTC'),
Timestamp('1991-12-30', tz='UTC'),
Timestamp('1992-01-01', tz='UTC'),
Timestamp('1992-09-10', tz='UTC'),
Timestamp('1992-09-11', tz='UTC'),
Timestamp('1992-10-03', tz='UTC'),
Timestamp('1992-12-25', tz='UTC'),
Timestamp('1992-12-29', tz='UTC'),
Timestamp('1992-12-30', tz='UTC'),
Timestamp('1992-12-31', tz='UTC'),
Timestamp('1993-01-01', tz='UTC'),
Timestamp('1993-01-22', tz='UTC'),
Timestamp('1993-03-01', tz='UTC'),
Timestamp('1993-04-05', tz='UTC'),
Timestamp('1993-05-05', tz='UTC'),
Timestamp('1993-05-28', tz='UTC'),
Timestamp('1993-07-17', tz='UTC'),
Timestamp('1993-09-29', tz='UTC'),
Timestamp('1993-09-30', tz='UTC'),
Timestamp('1993-10-01', tz='UTC'),
Timestamp('1993-12-29', tz='UTC'),
Timestamp('1993-12-30', tz='UTC'),
Timestamp('1993-12-31', tz='UTC'),
Timestamp('1994-01-02', tz='UTC'),
Timestamp('1994-02-09', tz='UTC'),
Timestamp('1994-02-10', tz='UTC'),
Timestamp('1994-02-11', tz='UTC'),
Timestamp('1994-03-01', tz='UTC'),
Timestamp('1994-04-05', tz='UTC'),
Timestamp('1994-05-05', tz='UTC'),
Timestamp('1994-06-06', tz='UTC'),
Timestamp('1994-07-17', tz='UTC'),
Timestamp('1994-08-15', tz='UTC'),
Timestamp('1994-09-19', tz='UTC'),
Timestamp('1994-09-20', tz='UTC'),
Timestamp('1994-09-21', tz='UTC'),
Timestamp('1994-10-03', tz='UTC'),
Timestamp('1994-12-29', tz='UTC'),
Timestamp('1994-12-30', tz='UTC'),
Timestamp('1995-01-02', tz='UTC'),
Timestamp('1995-01-30', tz='UTC'),
Timestamp('1995-01-31', tz='UTC'),
Timestamp('1995-02-01', tz='UTC'),
Timestamp('1995-03-01', tz='UTC'),
Timestamp('1995-05-01', tz='UTC'),
Timestamp('1995-05-05', tz='UTC'),
Timestamp('1995-06-06', tz='UTC'),
Timestamp('1995-06-27', tz='UTC'),
Timestamp('1995-07-17', tz='UTC'),
Timestamp('1995-08-15', tz='UTC'),
Timestamp('1995-09-08', tz='UTC'),
Timestamp('1995-09-09', tz='UTC'),
Timestamp('1995-10-03', tz='UTC'),
Timestamp('1995-12-22', tz='UTC'),
Timestamp('1995-12-25', tz='UTC'),
Timestamp('1995-12-28', tz='UTC'),
Timestamp('1995-12-29', tz='UTC'),
Timestamp('1995-12-30', tz='UTC'),
Timestamp('1995-12-31', tz='UTC'),
Timestamp('1996-01-01', tz='UTC'),
Timestamp('1996-01-02', tz='UTC'),
Timestamp('1996-02-19', tz='UTC'),
Timestamp('1996-02-20', tz='UTC'),
Timestamp('1996-03-01', tz='UTC'),
Timestamp('1996-04-05', tz='UTC'),
Timestamp('1996-04-11', tz='UTC'),
Timestamp('1996-05-01', tz='UTC'),
Timestamp('1996-05-05', tz='UTC'),
Timestamp('1996-05-24', tz='UTC'),
Timestamp('1996-06-06', tz='UTC'),
Timestamp('1996-07-17', tz='UTC'),
Timestamp('1996-08-15', tz='UTC'),
Timestamp('1996-09-26', tz='UTC'),
Timestamp('1996-09-27', tz='UTC'),
Timestamp('1996-09-28', tz='UTC'),
Timestamp('1996-10-03', tz='UTC'),
Timestamp('1996-12-25', tz='UTC'),
Timestamp('1996-12-30', tz='UTC'),
Timestamp('1996-12-31', tz='UTC'),
Timestamp('1997-01-01', tz='UTC'),
Timestamp('1997-01-02', tz='UTC'),
Timestamp('1997-02-07', tz='UTC'),
Timestamp('1997-02-08', tz='UTC'),
Timestamp('1997-03-01', tz='UTC'),
Timestamp('1997-04-05', tz='UTC'),
Timestamp('1997-05-05', tz='UTC'),
Timestamp('1997-05-14', tz='UTC'),
Timestamp('1997-06-06', tz='UTC'),
Timestamp('1997-07-17', tz='UTC'),
Timestamp('1997-08-15', tz='UTC'),
Timestamp('1997-09-16', tz='UTC'),
Timestamp('1997-09-17', tz='UTC'),
Timestamp('1997-10-03', tz='UTC'),
Timestamp('1997-12-25', tz='UTC'),
Timestamp('1998-01-01', tz='UTC'),
Timestamp('1998-01-02', tz='UTC'),
Timestamp('1998-01-27', tz='UTC'),
Timestamp('1998-01-28', tz='UTC'),
Timestamp('1998-01-29', tz='UTC'),
Timestamp('1998-03-01', tz='UTC'),
Timestamp('1998-04-05', tz='UTC'),
Timestamp('1998-05-01', tz='UTC'),
Timestamp('1998-05-03', tz='UTC'),
Timestamp('1998-05-05', tz='UTC'),
Timestamp('1998-06-04', tz='UTC'),
Timestamp('1998-06-06', tz='UTC'),
Timestamp('1998-07-17', tz='UTC'),
Timestamp('1998-08-15', tz='UTC'),
Timestamp('1998-10-03', tz='UTC'),
Timestamp('1998-10-04', tz='UTC'),
Timestamp('1998-10-05', tz='UTC'),
Timestamp('1998-10-06', tz='UTC'),
Timestamp('1998-12-25', tz='UTC'),
Timestamp('1998-12-31', tz='UTC'),
Timestamp('1999-01-01', tz='UTC'),
Timestamp('1999-02-15', tz='UTC'),
Timestamp('1999-02-16', tz='UTC'),
Timestamp('1999-02-17', tz='UTC'),
Timestamp('1999-03-01', tz='UTC'),
Timestamp('1999-04-05', tz='UTC'),
Timestamp('1999-05-05', tz='UTC'),
Timestamp('1999-05-22', tz='UTC'),
Timestamp('1999-06-06', tz='UTC'),
Timestamp('1999-07-17', tz='UTC'),
Timestamp('1999-09-23', tz='UTC'),
Timestamp('1999-09-24', tz='UTC'),
Timestamp('1999-09-25', tz='UTC'),
Timestamp('1999-10-03', tz='UTC'),
Timestamp('1999-12-29', tz='UTC'),
Timestamp('1999-12-30', tz='UTC'),
Timestamp('1999-12-31', tz='UTC'),
]
class KRXExchangeCalendar(ExtendedExchangeCalendar):
"""
Exchange calendars for KRX
Open Time: 9:00 AM, Asia/Seoul
Close Time: 3:30 PM, Asia/Seoul (3:00 PM until 2016/07/31)
"""
@property
def name(self):
return "KRX"
@property
def tz(self):
# return timezone('Asia/Seoul')
return timezone('UTC')
@property
def open_time(self):
return time(9, 0)
@property
def open_times(self):
return [(None, time(9, 0))]
@property
def close_time(self):
return time(15, 30)
@property
def close_times(self):
return [(None, time(15, 30))]
@property
def regular_holidays(self):
return HolidayCalendar([
KRNewYearsDay,
KRIndependenceDay,
KRArbourDay,
KRLabourDay,
KRChildrensDay,
KRMemorialDay,
KRConstitutionDay,
KRLiberationDay,
KRNationalFoundationDay,
Christmas,
KRHangulProclamationDay,
KRXEndOfYearClosing
])
@property
def special_closes(self):
return []
@property
def adhoc_holidays(self):
return list(chain(
KRXEndOfYearClosing2000,
KRLunarNewYear,
KRElectionDays,
KRBuddhasBirthday,
KRHarvestMoonDay,
KRSubstitutionHolidayForChildrensDay2018,
KRCelebrationForWorldCupHosting,
KRSeventyYearsFromIndependenceDay,
KRTemporaryHolidayForChildrensDay2016,
KRTemporaryHolidayForHarvestMoonDay2017,
KRTemporaryHolidayForChildrenDay2018,
KRTemporaryHolidayForChildrenDay2019,
HolidaysNeedToCheck,
KRTemporaryHolidayForLiberationDay2020,
KRTemporaryHoliday2021,
HolidaysBefore1999,
))
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.__class__ == other.__class__
| """
Last update: 2018-10-26
"""
from exchange_calendars.extensions.calendar_extension import ExtendedExchangeCalendar
from pandas import (
Timestamp,
)
from pandas.tseries.holiday import (
Holiday,
previous_friday,
)
from exchange_calendars.exchange_calendar import HolidayCalendar
from datetime import time
from itertools import chain
from pytz import timezone
KRNewYearsDay = Holiday(
'New Years Day',
month=1,
day=1)
KRIndependenceDay = Holiday(
'Independence Day',
month=3,
day=1
)
KRArbourDay = Holiday(
'Arbour Day',
month=4,
day=5,
end_date=Timestamp('2006-01-01'),
)
KRLabourDay = Holiday(
'Labour Day',
month=5,
day=1
)
KRChildrensDay = Holiday(
'Labour Day',
month=5,
day=5
)
# 현충일
KRMemorialDay = Holiday(
'Memorial Day',
month=6,
day=6
)
# 제헌절
KRConstitutionDay = Holiday(
'Constitution Day',
month=7,
day=17,
end_date=Timestamp('2008-01-01')
)
# 광복절
KRLiberationDay = Holiday(
'Liberation Day',
month=8,
day=15
)
# 개천절
KRNationalFoundationDay = Holiday(
'NationalFoundationDay',
month=10,
day=3
)
Christmas = Holiday(
'Christmas',
month=12,
day=25
)
# 한글날
KRHangulProclamationDay = Holiday(
'Hangul Proclamation Day',
month=10,
day=9,
start_date=Timestamp('2013-01-01')
)
# KRX 연말 휴장
KRXEndOfYearClosing = Holiday(
'KRX Year-end Closing',
month=12,
day=31,
observance=previous_friday,
start_date=Timestamp('2001-01-01')
)
KRXEndOfYearClosing2000 = [
Timestamp('2000-12-27', tz='UTC'),
Timestamp('2000-12-28', tz='UTC'),
Timestamp('2000-12-29', tz='UTC'),
Timestamp('2000-12-30', tz='UTC'),
]
# Lunar New Year
KRLunarNewYear = [
# 2000
Timestamp('2000-02-04', tz='UTC'),
# 2001
Timestamp('2001-01-23', tz='UTC'),
Timestamp('2001-01-24', tz='UTC'),
Timestamp('2001-01-25', tz='UTC'),
# 2002
Timestamp('2002-02-11', tz='UTC'),
Timestamp('2002-02-12', tz='UTC'),
Timestamp('2002-02-13', tz='UTC'),
# 2003
Timestamp('2003-01-31', tz='UTC'),
# 2004
Timestamp('2004-01-21', tz='UTC'),
Timestamp('2004-01-22', tz='UTC'),
Timestamp('2004-01-23', tz='UTC'),
# 2005
Timestamp('2005-02-08', tz='UTC'),
Timestamp('2005-02-09', tz='UTC'),
Timestamp('2005-02-10', tz='UTC'),
# 2006
Timestamp('2006-01-28', tz='UTC'),
Timestamp('2006-01-29', tz='UTC'),
Timestamp('2006-01-30', tz='UTC'),
# 2007
Timestamp('2007-02-19', tz='UTC'),
# 2008
Timestamp('2008-02-06', tz='UTC'),
Timestamp('2008-02-07', tz='UTC'),
Timestamp('2008-02-08', tz='UTC'),
# 2009
Timestamp('2009-01-25', tz='UTC'),
Timestamp('2009-01-26', tz='UTC'),
Timestamp('2009-01-27', tz='UTC'),
# 2010
Timestamp('2010-02-13', tz='UTC'),
Timestamp('2010-02-14', tz='UTC'),
Timestamp('2010-02-15', tz='UTC'),
# 2011
Timestamp('2011-02-02', tz='UTC'),
Timestamp('2011-02-03', tz='UTC'),
Timestamp('2011-02-04', tz='UTC'),
# 2012
Timestamp('2012-01-23', tz='UTC'),
Timestamp('2012-01-24', tz='UTC'),
# 2013
Timestamp('2013-02-11', tz='UTC'),
# 2014
Timestamp('2014-01-30', tz='UTC'),
Timestamp('2014-01-31', tz='UTC'),
# 2015
Timestamp('2015-02-18', tz='UTC'),
Timestamp('2015-02-19', tz='UTC'),
Timestamp('2015-02-20', tz='UTC'),
# 2016
Timestamp('2016-02-07', tz='UTC'),
Timestamp('2016-02-08', tz='UTC'),
Timestamp('2016-02-09', tz='UTC'),
Timestamp('2016-02-10', tz='UTC'),
# 2017
Timestamp('2017-01-27', tz='UTC'),
Timestamp('2017-01-28', tz='UTC'),
Timestamp('2017-01-29', tz='UTC'),
Timestamp('2017-01-30', tz='UTC'),
# 2018
Timestamp('2018-02-15', tz='UTC'),
Timestamp('2018-02-16', tz='UTC'),
Timestamp('2018-02-17', tz='UTC'),
# 2019
Timestamp('2019-02-04', tz='UTC'),
Timestamp('2019-02-05', tz='UTC'),
Timestamp('2019-02-06', tz='UTC'),
# 2020
Timestamp('2020-01-24', tz='UTC'),
Timestamp('2020-01-25', tz='UTC'),
Timestamp('2020-01-26', tz='UTC'),
Timestamp('2020-01-27', tz='UTC'),
# 2021
Timestamp('2021-02-11', tz='UTC'),
Timestamp('2021-02-12', tz='UTC'),
# 2022
Timestamp('2022-01-31', tz='UTC'),
Timestamp('2022-02-01', tz='UTC'),
Timestamp('2022-02-02', tz='UTC'),
]
# Election Days
KRElectionDays = [
Timestamp('2000-04-13', tz='UTC'), # National Assembly
Timestamp('2002-06-13', tz='UTC'), # Regional election
Timestamp('2002-12-19', tz='UTC'), # Presidency
Timestamp('2004-04-15', tz='UTC'), # National Assembly
Timestamp('2006-05-31', tz='UTC'), # Regional election
Timestamp('2007-12-19', tz='UTC'), # Presidency
Timestamp('2008-04-09', tz='UTC'), # National Assembly
Timestamp('2010-06-02', tz='UTC'), # Local election
Timestamp('2012-04-11', tz='UTC'), # National Assembly
Timestamp('2012-12-19', tz='UTC'), # Presidency
Timestamp('2014-06-04', tz='UTC'), # Local election
Timestamp('2016-04-13', tz='UTC'), # National Assembly
Timestamp('2017-05-09', tz='UTC'), # Presidency
Timestamp('2018-06-13', tz='UTC'), # Local election
Timestamp('2020-04-15', tz='UTC'), # National Assembly
Timestamp('2022-03-09', tz='UTC'), # Presidency
Timestamp('2022-06-01', tz='UTC'), # Local election
]
# Buddha's birthday
KRBuddhasBirthday = [
Timestamp('2000-05-11', tz='UTC'),
Timestamp('2001-05-01', tz='UTC'),
Timestamp('2003-05-08', tz='UTC'),
Timestamp('2004-05-26', tz='UTC'),
Timestamp('2005-05-15', tz='UTC'),
Timestamp('2006-05-05', tz='UTC'),
Timestamp('2007-05-24', tz='UTC'),
Timestamp('2008-05-12', tz='UTC'),
Timestamp('2009-05-02', tz='UTC'),
Timestamp('2010-05-21', tz='UTC'),
Timestamp('2011-05-10', tz='UTC'),
Timestamp('2012-05-28', tz='UTC'),
Timestamp('2013-05-17', tz='UTC'),
Timestamp('2014-05-06', tz='UTC'),
Timestamp('2015-05-25', tz='UTC'),
Timestamp('2016-05-14', tz='UTC'),
Timestamp('2017-05-03', tz='UTC'),
Timestamp('2018-05-22', tz='UTC'),
Timestamp('2020-04-30', tz='UTC'),
Timestamp('2021-05-19', tz='UTC'),
]
# Harvest Moon Day
KRHarvestMoonDay = [
# 2000
Timestamp('2000-09-11', tz='UTC'),
Timestamp('2000-09-12', tz='UTC'),
Timestamp('2000-09-13', tz='UTC'),
# 2001
Timestamp('2001-10-01', tz='UTC'),
Timestamp('2001-10-02', tz='UTC'),
# 2002
Timestamp('2002-09-20', tz='UTC'),
# 2003
Timestamp('2003-09-10', tz='UTC'),
Timestamp('2003-09-11', tz='UTC'),
Timestamp('2003-09-12', tz='UTC'),
# 2004
Timestamp('2004-09-27', tz='UTC'),
Timestamp('2004-09-28', tz='UTC'),
Timestamp('2004-09-29', tz='UTC'),
# 2005
Timestamp('2005-09-17', tz='UTC'),
Timestamp('2005-09-18', tz='UTC'),
Timestamp('2005-09-19', tz='UTC'),
# 2006
Timestamp('2006-10-05', tz='UTC'),
Timestamp('2006-10-06', tz='UTC'),
Timestamp('2006-10-07', tz='UTC'),
# 2007
Timestamp('2007-09-24', tz='UTC'),
Timestamp('2007-09-25', tz='UTC'),
Timestamp('2007-09-26', tz='UTC'),
# 2008
Timestamp('2008-09-13', tz='UTC'),
Timestamp('2008-09-14', tz='UTC'),
Timestamp('2008-09-15', tz='UTC'),
# 2009
Timestamp('2009-10-02', tz='UTC'),
Timestamp('2009-10-03', tz='UTC'),
Timestamp('2009-10-04', tz='UTC'),
# 2010
Timestamp('2010-09-21', tz='UTC'),
Timestamp('2010-09-22', tz='UTC'),
Timestamp('2010-09-23', tz='UTC'),
# 2011
Timestamp('2011-09-12', tz='UTC'),
Timestamp('2011-09-13', tz='UTC'),
# 2012
Timestamp('2012-10-01', tz='UTC'),
# 2013
Timestamp('2013-09-18', tz='UTC'),
Timestamp('2013-09-19', tz='UTC'),
Timestamp('2013-09-20', tz='UTC'),
# 2014
Timestamp('2014-09-08', tz='UTC'),
Timestamp('2014-09-09', tz='UTC'),
Timestamp('2014-09-10', tz='UTC'),
# 2015
Timestamp('2015-09-28', tz='UTC'),
Timestamp('2015-09-29', tz='UTC'),
# 2016
Timestamp('2016-09-14', tz='UTC'),
Timestamp('2016-09-15', tz='UTC'),
Timestamp('2016-09-16', tz='UTC'),
# 2017
Timestamp('2017-10-03', tz='UTC'),
Timestamp('2017-10-04', tz='UTC'),
Timestamp('2017-10-05', tz='UTC'),
Timestamp('2017-10-06', tz='UTC'),
# 2018
Timestamp('2018-09-23', tz='UTC'),
Timestamp('2018-09-24', tz='UTC'),
Timestamp('2018-09-25', tz='UTC'),
Timestamp('2018-09-26', tz='UTC'),
# 2019
Timestamp('2019-09-12', tz='UTC'),
Timestamp('2019-09-13', tz='UTC'),
# 2020
Timestamp('2020-09-30', tz='UTC'),
Timestamp('2020-10-01', tz='UTC'),
Timestamp('2020-10-02', tz='UTC'),
# 2021
Timestamp('2021-09-20', tz='UTC'),
Timestamp('2021-09-21', tz='UTC'),
Timestamp('2021-09-22', tz='UTC'),
# 2022
Timestamp('2022-09-09', tz='UTC'),
Timestamp('2022-09-12', tz='UTC'), # 대체휴일
]
# 대체휴일
KRSubstitutionHolidayForChildrensDay2018 = [
Timestamp('2018-05-07', tz='UTC')
]
# 임시공휴일
KRCelebrationForWorldCupHosting = [
Timestamp('2002-07-01', tz='UTC')
]
KRSeventyYearsFromIndependenceDay = [
Timestamp('2015-08-14', tz='UTC')
]
KRTemporaryHolidayForChildrensDay2016 = [
Timestamp('2016-05-06', tz='UTC')
]
KRTemporaryHolidayForHarvestMoonDay2017 = [
Timestamp('2017-10-02', tz='UTC')
]
KRTemporaryHolidayForChildrenDay2018 = [
Timestamp('2018-05-07', tz='UTC')
]
KRTemporaryHolidayForChildrenDay2019 = [
Timestamp('2019-05-06', tz='UTC')
]
KRTemporaryHolidayForLiberationDay2020 = [
Timestamp('2020-08-17', tz='UTC')
]
KRTemporaryHoliday2021 = [
Timestamp('2021-08-16', tz='UTC'), # 광복절 대체휴일
Timestamp('2021-10-04', tz='UTC'), # 개천절 대체휴일
Timestamp('2021-10-11', tz='UTC'), # 한글날 대체휴일
]
KRTemporaryHoliday2022 = [
Timestamp('2022-10-10', tz='UTC'), # 한글날 대체휴일
]
# 잘 모르겠는 휴장일
HolidaysNeedToCheck = [
Timestamp('2000-01-03', tz='UTC')
]
HolidaysBefore1999 = [
Timestamp('1990-01-01', tz='UTC'),
Timestamp('1990-01-02', tz='UTC'),
Timestamp('1990-01-03', tz='UTC'),
Timestamp('1990-01-29', tz='UTC'),
Timestamp('1990-03-01', tz='UTC'),
Timestamp('1990-04-05', tz='UTC'),
Timestamp('1990-05-02', tz='UTC'),
Timestamp('1990-06-06', tz='UTC'),
Timestamp('1990-07-17', tz='UTC'),
Timestamp('1990-08-15', tz='UTC'),
Timestamp('1990-09-03', tz='UTC'),
Timestamp('1990-10-01', tz='UTC'),
Timestamp('1990-10-03', tz='UTC'),
Timestamp('1990-10-09', tz='UTC'),
Timestamp('1990-12-25', tz='UTC'),
Timestamp('1991-01-01', tz='UTC'),
Timestamp('1991-01-02', tz='UTC'),
Timestamp('1991-02-14', tz='UTC'),
Timestamp('1991-02-15', tz='UTC'),
Timestamp('1991-03-01', tz='UTC'),
Timestamp('1991-04-05', tz='UTC'),
Timestamp('1991-05-21', tz='UTC'),
Timestamp('1991-06-06', tz='UTC'),
Timestamp('1991-07-17', tz='UTC'),
Timestamp('1991-08-15', tz='UTC'),
Timestamp('1991-09-23', tz='UTC'),
Timestamp('1991-10-03', tz='UTC'),
Timestamp('1991-12-25', tz='UTC'),
Timestamp('1991-12-30', tz='UTC'),
Timestamp('1992-01-01', tz='UTC'),
Timestamp('1992-09-10', tz='UTC'),
Timestamp('1992-09-11', tz='UTC'),
Timestamp('1992-10-03', tz='UTC'),
Timestamp('1992-12-25', tz='UTC'),
Timestamp('1992-12-29', tz='UTC'),
Timestamp('1992-12-30', tz='UTC'),
Timestamp('1992-12-31', tz='UTC'),
Timestamp('1993-01-01', tz='UTC'),
Timestamp('1993-01-22', tz='UTC'),
Timestamp('1993-03-01', tz='UTC'),
Timestamp('1993-04-05', tz='UTC'),
Timestamp('1993-05-05', tz='UTC'),
Timestamp('1993-05-28', tz='UTC'),
Timestamp('1993-07-17', tz='UTC'),
Timestamp('1993-09-29', tz='UTC'),
Timestamp('1993-09-30', tz='UTC'),
Timestamp('1993-10-01', tz='UTC'),
Timestamp('1993-12-29', tz='UTC'),
Timestamp('1993-12-30', tz='UTC'),
Timestamp('1993-12-31', tz='UTC'),
Timestamp('1994-01-02', tz='UTC'),
Timestamp('1994-02-09', tz='UTC'),
Timestamp('1994-02-10', tz='UTC'),
Timestamp('1994-02-11', tz='UTC'),
Timestamp('1994-03-01', tz='UTC'),
Timestamp('1994-04-05', tz='UTC'),
Timestamp('1994-05-05', tz='UTC'),
Timestamp('1994-06-06', tz='UTC'),
Timestamp('1994-07-17', tz='UTC'),
Timestamp('1994-08-15', tz='UTC'),
Timestamp('1994-09-19', tz='UTC'),
Timestamp('1994-09-20', tz='UTC'),
Timestamp('1994-09-21', tz='UTC'),
Timestamp('1994-10-03', tz='UTC'),
Timestamp('1994-12-29', tz='UTC'),
Timestamp('1994-12-30', tz='UTC'),
Timestamp('1995-01-02', tz='UTC'),
Timestamp('1995-01-30', tz='UTC'),
Timestamp('1995-01-31', tz='UTC'),
Timestamp('1995-02-01', tz='UTC'),
Timestamp('1995-03-01', tz='UTC'),
Timestamp('1995-05-01', tz='UTC'),
Timestamp('1995-05-05', tz='UTC'),
Timestamp('1995-06-06', tz='UTC'),
Timestamp('1995-06-27', tz='UTC'),
Timestamp('1995-07-17', tz='UTC'),
Timestamp('1995-08-15', tz='UTC'),
Timestamp('1995-09-08', tz='UTC'),
Timestamp('1995-09-09', tz='UTC'),
Timestamp('1995-10-03', tz='UTC'),
Timestamp('1995-12-22', tz='UTC'),
Timestamp('1995-12-25', tz='UTC'),
Timestamp('1995-12-28', tz='UTC'),
Timestamp('1995-12-29', tz='UTC'),
Timestamp('1995-12-30', tz='UTC'),
Timestamp('1995-12-31', tz='UTC'),
Timestamp('1996-01-01', tz='UTC'),
Timestamp('1996-01-02', tz='UTC'),
Timestamp('1996-02-19', tz='UTC'),
Timestamp('1996-02-20', tz='UTC'),
Timestamp('1996-03-01', tz='UTC'),
Timestamp('1996-04-05', tz='UTC'),
Timestamp('1996-04-11', tz='UTC'),
Timestamp('1996-05-01', tz='UTC'),
Timestamp('1996-05-05', tz='UTC'),
Timestamp('1996-05-24', tz='UTC'),
Timestamp('1996-06-06', tz='UTC'),
Timestamp('1996-07-17', tz='UTC'),
Timestamp('1996-08-15', tz='UTC'),
Timestamp('1996-09-26', tz='UTC'),
Timestamp('1996-09-27', tz='UTC'),
Timestamp('1996-09-28', tz='UTC'),
Timestamp('1996-10-03', tz='UTC'),
Timestamp('1996-12-25', tz='UTC'),
Timestamp('1996-12-30', tz='UTC'),
Timestamp('1996-12-31', tz='UTC'),
Timestamp('1997-01-01', tz='UTC'),
Timestamp('1997-01-02', tz='UTC'),
Timestamp('1997-02-07', tz='UTC'),
Timestamp('1997-02-08', tz='UTC'),
Timestamp('1997-03-01', tz='UTC'),
Timestamp('1997-04-05', tz='UTC'),
Timestamp('1997-05-05', tz='UTC'),
Timestamp('1997-05-14', tz='UTC'),
Timestamp('1997-06-06', tz='UTC'),
Timestamp('1997-07-17', tz='UTC'),
Timestamp('1997-08-15', tz='UTC'),
Timestamp('1997-09-16', tz='UTC'),
Timestamp('1997-09-17', tz='UTC'),
Timestamp('1997-10-03', tz='UTC'),
Timestamp('1997-12-25', tz='UTC'),
Timestamp('1998-01-01', tz='UTC'),
Timestamp('1998-01-02', tz='UTC'),
Timestamp('1998-01-27', tz='UTC'),
Timestamp('1998-01-28', tz='UTC'),
Timestamp('1998-01-29', tz='UTC'),
Timestamp('1998-03-01', tz='UTC'),
Timestamp('1998-04-05', tz='UTC'),
Timestamp('1998-05-01', tz='UTC'),
Timestamp('1998-05-03', tz='UTC'),
Timestamp('1998-05-05', tz='UTC'),
Timestamp('1998-06-04', tz='UTC'),
Timestamp('1998-06-06', tz='UTC'),
Timestamp('1998-07-17', tz='UTC'),
Timestamp('1998-08-15', tz='UTC'),
Timestamp('1998-10-03', tz='UTC'),
Timestamp('1998-10-04', tz='UTC'),
Timestamp('1998-10-05', tz='UTC'),
Timestamp('1998-10-06', tz='UTC'),
Timestamp('1998-12-25', tz='UTC'),
Timestamp('1998-12-31', tz='UTC'),
Timestamp('1999-01-01', tz='UTC'),
Timestamp('1999-02-15', tz='UTC'),
Timestamp('1999-02-16', tz='UTC'),
Timestamp('1999-02-17', tz='UTC'),
Timestamp('1999-03-01', tz='UTC'),
Timestamp('1999-04-05', tz='UTC'),
Timestamp('1999-05-05', tz='UTC'),
Timestamp('1999-05-22', tz='UTC'),
Timestamp('1999-06-06', tz='UTC'),
Timestamp('1999-07-17', tz='UTC'),
Timestamp('1999-09-23', tz='UTC'),
Timestamp('1999-09-24', tz='UTC'),
Timestamp('1999-09-25', tz='UTC'),
Timestamp('1999-10-03', tz='UTC'),
Timestamp('1999-12-29', tz='UTC'),
Timestamp('1999-12-30', tz='UTC'),
Timestamp('1999-12-31', tz='UTC'),
]
class KRXExchangeCalendar(ExtendedExchangeCalendar):
"""
Exchange calendars for KRX
Open Time: 9:00 AM, Asia/Seoul
Close Time: 3:30 PM, Asia/Seoul (3:00 PM until 2016/07/31)
"""
@property
def name(self):
return "KRX"
@property
def tz(self):
# return timezone('Asia/Seoul')
return timezone('UTC')
@property
def open_time(self):
return time(9, 0)
@property
def open_times(self):
return [(None, time(9, 0))]
@property
def close_time(self):
return time(15, 30)
@property
def close_times(self):
return [(None, time(15, 30))]
@property
def regular_holidays(self):
return HolidayCalendar([
KRNewYearsDay,
KRIndependenceDay,
KRArbourDay,
KRLabourDay,
KRChildrensDay,
KRMemorialDay,
KRConstitutionDay,
KRLiberationDay,
KRNationalFoundationDay,
Christmas,
KRHangulProclamationDay,
KRXEndOfYearClosing
])
@property
def special_closes(self):
return []
@property
def adhoc_holidays(self):
return list(chain(
KRXEndOfYearClosing2000,
KRLunarNewYear,
KRElectionDays,
KRBuddhasBirthday,
KRHarvestMoonDay,
KRSubstitutionHolidayForChildrensDay2018,
KRCelebrationForWorldCupHosting,
KRSeventyYearsFromIndependenceDay,
KRTemporaryHolidayForChildrensDay2016,
KRTemporaryHolidayForHarvestMoonDay2017,
KRTemporaryHolidayForChildrenDay2018,
KRTemporaryHolidayForChildrenDay2019,
HolidaysNeedToCheck,
KRTemporaryHolidayForLiberationDay2020,
KRTemporaryHoliday2021,
HolidaysBefore1999,
))
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.__class__ == other.__class__ | en | 0.492863 | Last update: 2018-10-26 # 현충일 # 제헌절 # 광복절 # 개천절 # 한글날 # KRX 연말 휴장 # Lunar New Year # 2000 # 2001 # 2002 # 2003 # 2004 # 2005 # 2006 # 2007 # 2008 # 2009 # 2010 # 2011 # 2012 # 2013 # 2014 # 2015 # 2016 # 2017 # 2018 # 2019 # 2020 # 2021 # 2022 # Election Days # National Assembly # Regional election # Presidency # National Assembly # Regional election # Presidency # National Assembly # Local election # National Assembly # Presidency # Local election # National Assembly # Presidency # Local election # National Assembly # Presidency # Local election # Buddha's birthday # Harvest Moon Day # 2000 # 2001 # 2002 # 2003 # 2004 # 2005 # 2006 # 2007 # 2008 # 2009 # 2010 # 2011 # 2012 # 2013 # 2014 # 2015 # 2016 # 2017 # 2018 # 2019 # 2020 # 2021 # 2022 # 대체휴일 # 대체휴일 # 임시공휴일 # 광복절 대체휴일 # 개천절 대체휴일 # 한글날 대체휴일 # 한글날 대체휴일 # 잘 모르겠는 휴장일 Exchange calendars for KRX Open Time: 9:00 AM, Asia/Seoul Close Time: 3:30 PM, Asia/Seoul (3:00 PM until 2016/07/31) # return timezone('Asia/Seoul') | 2.338201 | 2 |
utilities.py | ameldocena/StratifiedAggregation | 0 | 9048 | <reponame>ameldocena/StratifiedAggregation
import random
import numpy
#import tensorflow as tf
#import torch
from abc import abstractmethod
from sklearn.decomposition import PCA
from aggregators import FedAvg, MultiKrum, AlignedAvg, TrimmedMean, Median, StratifiedAggr
class SelectionStrategy:
# Unchanged from original work
@abstractmethod
def select_round_workers(self, workers, poisoned_workers, kwargs):
"""
:param workers: list(int). All workers available for learning
:param poisoned_workers: list(int). All workers that are poisoned
:param kwargs: dict
"""
raise NotImplementedError("select_round_workers() not implemented")
class RandomSelectionStrategy(SelectionStrategy):
# Unchanged from original work
"""
Randomly selects workers out of the list of all workers
"""
def select_round_workers(self, workers, poisoned_workers, kwargs):
#The poisoned_workers here are not used
return random.sample(workers, kwargs["NUM_WORKERS_PER_ROUND"])
#returns a list of sampled worker ids
# class StratifiedRandomSelection(SelectionStrategy):
# #We first stratify: Each stratum will be a list of workers
# #Then within each stratum, we randomly select
# #We would need the list of workers and the information about their skews
def select_aggregator(args, name, KWARGS={}):
#Creates an Aggregator object as selected
if name == "FedAvg":
return FedAvg(args, name, KWARGS)
elif name == "AlignedAvg":
return AlignedAvg(args, name, KWARGS)
elif name == "AlignedAvgImpute":
KWARGS.update({"use_impute":"filter","align":"fusion"})
return AlignedAvg(args, name, **KWARGS)
elif name == "MultiKrum":
return MultiKrum(args, name, KWARGS)
elif name == "TrimmedMean":
return TrimmedMean(args, name, KWARGS)
elif name == "Median":
return Median(args, name, KWARGS)
elif (name == "StratKrum") or (name == "StratTrimMean") or (name == "StratMedian") or (name == "StratFedAvg"):
#We may have to change the class name to StratifiedAggregation
return StratifiedAggr(args, name, KWARGS)
else:
raise NotImplementedError(f"Unrecognized Aggregator Name: {name}")
def calculate_pca_of_gradients(logger, gradients, num_components):
# Unchanged from original work
pca = PCA(n_components=num_components)
logger.info("Computing {}-component PCA of gradients".format(num_components))
return pca.fit_transform(gradients)
#So this is here after all
def calculate_model_gradient( model_1, model_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two Torch models.
:param logger: loguru.logger (NOW REMOVED)
:param model_1: torch.nn
:param model_2: torch.nn
"""
model_1_parameters = list(dict(model_1.state_dict()))
model_2_parameters = list(dict(model_2.state_dict()))
return calculate_parameter_gradients(model_1_parameters, model_2_parameters)
def calculate_parameter_gradients(params_1, params_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two sets of Torch parameters.
:param logger: loguru.logger (NOW REMOVED)
:param params_1: dict
:param params_2: dict
"""
#logger.debug("Shape of model_1_parameters: {}".format(str(len(params_1))))
#logger.debug("Shape of model_2_parameters: {}".format(str(len(params_2))))
return numpy.array([x for x in numpy.subtract(params_1, params_2)])
# #Inserted
# def convert2TF(torch_tensor):
# # Converts a pytorch tensor into a Tensorflow.
# # We first convert torch into numpy, then to tensorflow.
# # Arg: torch_tensor - a Pytorch tensor object
# np_tensor = torch_tensor.numpy().astype(float)
# return tf.convert_to_tensor(np_tensor)
#
# def convert2Torch(tf_tensor):
# #Converts a TF tensor to Torch
# #Arg: tf_tensor - a TF tensor
# np_tensor = tf.make_ndarray(tf_tensor)
# return torch.from_numpy(np_tensor)
def count_poisoned_stratum(stratified_workers, poisoned_workers):
if len(poisoned_workers) > 0:
print("\nPoisoned workers:", len(poisoned_workers), poisoned_workers)
for stratum in stratified_workers:
intersect = list(set(stratified_workers[stratum]).intersection(poisoned_workers))
print("Count poisoned workers per stratum:", len(intersect), intersect)
print("Stratum: {}. Propn to total poisoned: {}. Propn to subpopn in stratum: {}".format(stratum, len(intersect)/len(poisoned_workers),
len(intersect)/len(stratified_workers[stratum])))
else:
print("No poisoned workers")
def generate_uniform_weights(random_workers):
"""
This function generates uniform weights for each stratum in random_workers
:param random_workers:
:return:
"""
strata_weights = dict()
weight = 1.0 / len(list(random_workers.keys()))
for stratum in random_workers:
strata_weights[stratum] = weight
return strata_weights | import random
import numpy
#import tensorflow as tf
#import torch
from abc import abstractmethod
from sklearn.decomposition import PCA
from aggregators import FedAvg, MultiKrum, AlignedAvg, TrimmedMean, Median, StratifiedAggr
class SelectionStrategy:
# Unchanged from original work
@abstractmethod
def select_round_workers(self, workers, poisoned_workers, kwargs):
"""
:param workers: list(int). All workers available for learning
:param poisoned_workers: list(int). All workers that are poisoned
:param kwargs: dict
"""
raise NotImplementedError("select_round_workers() not implemented")
class RandomSelectionStrategy(SelectionStrategy):
# Unchanged from original work
"""
Randomly selects workers out of the list of all workers
"""
def select_round_workers(self, workers, poisoned_workers, kwargs):
#The poisoned_workers here are not used
return random.sample(workers, kwargs["NUM_WORKERS_PER_ROUND"])
#returns a list of sampled worker ids
# class StratifiedRandomSelection(SelectionStrategy):
# #We first stratify: Each stratum will be a list of workers
# #Then within each stratum, we randomly select
# #We would need the list of workers and the information about their skews
def select_aggregator(args, name, KWARGS={}):
#Creates an Aggregator object as selected
if name == "FedAvg":
return FedAvg(args, name, KWARGS)
elif name == "AlignedAvg":
return AlignedAvg(args, name, KWARGS)
elif name == "AlignedAvgImpute":
KWARGS.update({"use_impute":"filter","align":"fusion"})
return AlignedAvg(args, name, **KWARGS)
elif name == "MultiKrum":
return MultiKrum(args, name, KWARGS)
elif name == "TrimmedMean":
return TrimmedMean(args, name, KWARGS)
elif name == "Median":
return Median(args, name, KWARGS)
elif (name == "StratKrum") or (name == "StratTrimMean") or (name == "StratMedian") or (name == "StratFedAvg"):
#We may have to change the class name to StratifiedAggregation
return StratifiedAggr(args, name, KWARGS)
else:
raise NotImplementedError(f"Unrecognized Aggregator Name: {name}")
def calculate_pca_of_gradients(logger, gradients, num_components):
# Unchanged from original work
pca = PCA(n_components=num_components)
logger.info("Computing {}-component PCA of gradients".format(num_components))
return pca.fit_transform(gradients)
#So this is here after all
def calculate_model_gradient( model_1, model_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two Torch models.
:param logger: loguru.logger (NOW REMOVED)
:param model_1: torch.nn
:param model_2: torch.nn
"""
model_1_parameters = list(dict(model_1.state_dict()))
model_2_parameters = list(dict(model_2.state_dict()))
return calculate_parameter_gradients(model_1_parameters, model_2_parameters)
def calculate_parameter_gradients(params_1, params_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two sets of Torch parameters.
:param logger: loguru.logger (NOW REMOVED)
:param params_1: dict
:param params_2: dict
"""
#logger.debug("Shape of model_1_parameters: {}".format(str(len(params_1))))
#logger.debug("Shape of model_2_parameters: {}".format(str(len(params_2))))
return numpy.array([x for x in numpy.subtract(params_1, params_2)])
# #Inserted
# def convert2TF(torch_tensor):
# # Converts a pytorch tensor into a Tensorflow.
# # We first convert torch into numpy, then to tensorflow.
# # Arg: torch_tensor - a Pytorch tensor object
# np_tensor = torch_tensor.numpy().astype(float)
# return tf.convert_to_tensor(np_tensor)
#
# def convert2Torch(tf_tensor):
# #Converts a TF tensor to Torch
# #Arg: tf_tensor - a TF tensor
# np_tensor = tf.make_ndarray(tf_tensor)
# return torch.from_numpy(np_tensor)
def count_poisoned_stratum(stratified_workers, poisoned_workers):
if len(poisoned_workers) > 0:
print("\nPoisoned workers:", len(poisoned_workers), poisoned_workers)
for stratum in stratified_workers:
intersect = list(set(stratified_workers[stratum]).intersection(poisoned_workers))
print("Count poisoned workers per stratum:", len(intersect), intersect)
print("Stratum: {}. Propn to total poisoned: {}. Propn to subpopn in stratum: {}".format(stratum, len(intersect)/len(poisoned_workers),
len(intersect)/len(stratified_workers[stratum])))
else:
print("No poisoned workers")
def generate_uniform_weights(random_workers):
"""
This function generates uniform weights for each stratum in random_workers
:param random_workers:
:return:
"""
strata_weights = dict()
weight = 1.0 / len(list(random_workers.keys()))
for stratum in random_workers:
strata_weights[stratum] = weight
return strata_weights | en | 0.693368 | #import tensorflow as tf #import torch # Unchanged from original work :param workers: list(int). All workers available for learning :param poisoned_workers: list(int). All workers that are poisoned :param kwargs: dict # Unchanged from original work Randomly selects workers out of the list of all workers #The poisoned_workers here are not used #returns a list of sampled worker ids # class StratifiedRandomSelection(SelectionStrategy): # #We first stratify: Each stratum will be a list of workers # #Then within each stratum, we randomly select # #We would need the list of workers and the information about their skews #Creates an Aggregator object as selected #We may have to change the class name to StratifiedAggregation # Unchanged from original work #So this is here after all # Minor change from original work Calculates the gradient (parameter difference) between two Torch models. :param logger: loguru.logger (NOW REMOVED) :param model_1: torch.nn :param model_2: torch.nn # Minor change from original work Calculates the gradient (parameter difference) between two sets of Torch parameters. :param logger: loguru.logger (NOW REMOVED) :param params_1: dict :param params_2: dict #logger.debug("Shape of model_1_parameters: {}".format(str(len(params_1)))) #logger.debug("Shape of model_2_parameters: {}".format(str(len(params_2)))) # #Inserted # def convert2TF(torch_tensor): # # Converts a pytorch tensor into a Tensorflow. # # We first convert torch into numpy, then to tensorflow. # # Arg: torch_tensor - a Pytorch tensor object # np_tensor = torch_tensor.numpy().astype(float) # return tf.convert_to_tensor(np_tensor) # # def convert2Torch(tf_tensor): # #Converts a TF tensor to Torch # #Arg: tf_tensor - a TF tensor # np_tensor = tf.make_ndarray(tf_tensor) # return torch.from_numpy(np_tensor) This function generates uniform weights for each stratum in random_workers :param random_workers: :return: | 2.595923 | 3 |
game/player.py | b1naryth1ef/mmo | 7 | 9049 | <reponame>b1naryth1ef/mmo
from sprites import PlayerSprite
import time
class Player(object):
def __init__(self, name, game):
self.name = name
self.pos = [50, 50]
self.do_blit = False
self.game = game
self.surf = game.SCREEN
self.lastMove = 99999999999
self.velo_def = [0, 0]
self.velo_x = 0
self.velo_y = 0
self.sprite = PlayerSprite(self)
self.moving = [False, False, False, False]
def tick(self):
if self.do_blit:
self.game.reDraw = True
self.sprite.display(self.surf.screen)
#self.surface.screen.blit(self.image, self.pos)
self.do_blit = False
# print self.lastMove - time.time()
if True in self.moving and abs(self.lastMove - time.time()) >= .08:
self.lastMove = time.time()
if self.moving[0]: self.move(x=-1)
if self.moving[1]: self.move(x=1)#down
if self.moving[2]: self.move(y=-1)#left
if self.moving[3]: self.move(y=1)#right
def move(self, x=0, y=0):
self.pos[1]+=x*10
self.pos[0]+=y*10
self.do_blit = True
if y < 0 and self.sprite.dir == 1:
self.sprite.flip()
elif y > 0 and self.sprite.dir == -1:
self.sprite.flip() | from sprites import PlayerSprite
import time
class Player(object):
def __init__(self, name, game):
self.name = name
self.pos = [50, 50]
self.do_blit = False
self.game = game
self.surf = game.SCREEN
self.lastMove = 99999999999
self.velo_def = [0, 0]
self.velo_x = 0
self.velo_y = 0
self.sprite = PlayerSprite(self)
self.moving = [False, False, False, False]
def tick(self):
if self.do_blit:
self.game.reDraw = True
self.sprite.display(self.surf.screen)
#self.surface.screen.blit(self.image, self.pos)
self.do_blit = False
# print self.lastMove - time.time()
if True in self.moving and abs(self.lastMove - time.time()) >= .08:
self.lastMove = time.time()
if self.moving[0]: self.move(x=-1)
if self.moving[1]: self.move(x=1)#down
if self.moving[2]: self.move(y=-1)#left
if self.moving[3]: self.move(y=1)#right
def move(self, x=0, y=0):
self.pos[1]+=x*10
self.pos[0]+=y*10
self.do_blit = True
if y < 0 and self.sprite.dir == 1:
self.sprite.flip()
elif y > 0 and self.sprite.dir == -1:
self.sprite.flip() | en | 0.159253 | #self.surface.screen.blit(self.image, self.pos) # print self.lastMove - time.time() #down #left #right | 3.146375 | 3 |
toys/layers/pool.py | cbarrick/toys | 1 | 9050 | <filename>toys/layers/pool.py
from typing import Sequence
import torch
from torch import nn
class MaxPool2d(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size,
stride=stride, padding=padding, dilation=dilation,
return_indices=return_indices, ceil_mode=ceil_mode)
def forward(self, x):
(*batch, height, width, channels) = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
x = self.pool(x)
x = torch.einsum('nchw->nhwc', [x])
(_, new_height, new_width, _) = x.shape
x = x.contiguous()
x = x.view(*batch, new_height, new_width, channels)
return x
| <filename>toys/layers/pool.py
from typing import Sequence
import torch
from torch import nn
class MaxPool2d(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size,
stride=stride, padding=padding, dilation=dilation,
return_indices=return_indices, ceil_mode=ceil_mode)
def forward(self, x):
(*batch, height, width, channels) = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
x = self.pool(x)
x = torch.einsum('nchw->nhwc', [x])
(_, new_height, new_width, _) = x.shape
x = x.contiguous()
x = x.view(*batch, new_height, new_width, channels)
return x
| none | 1 | 2.628106 | 3 |
|
src/forecastmgmt/ui/masterdata/person_window.py | vvladych/forecastmgmt | 0 | 9051 | <reponame>vvladych/forecastmgmt
from gi.repository import Gtk
from masterdata_abstract_window import MasterdataAbstractWindow
from person_add_mask import PersonAddMask
from person_list_mask import PersonListMask
class PersonWindow(MasterdataAbstractWindow):
def __init__(self, main_window):
super(PersonWindow, self).__init__(main_window, PersonListMask(), PersonAddMask(main_window, self.add_working_area))
| from gi.repository import Gtk
from masterdata_abstract_window import MasterdataAbstractWindow
from person_add_mask import PersonAddMask
from person_list_mask import PersonListMask
class PersonWindow(MasterdataAbstractWindow):
def __init__(self, main_window):
super(PersonWindow, self).__init__(main_window, PersonListMask(), PersonAddMask(main_window, self.add_working_area)) | none | 1 | 1.838144 | 2 |
|
fastseg/model/utils.py | SeockHwa/Segmentation_mobileV3 | 274 | 9052 | <filename>fastseg/model/utils.py<gh_stars>100-1000
import torch.nn as nn
from .efficientnet import EfficientNet_B4, EfficientNet_B0
from .mobilenetv3 import MobileNetV3_Large, MobileNetV3_Small
def get_trunk(trunk_name):
"""Retrieve the pretrained network trunk and channel counts"""
if trunk_name == 'efficientnet_b4':
backbone = EfficientNet_B4(pretrained=True)
s2_ch = 24
s4_ch = 32
high_level_ch = 1792
elif trunk_name == 'efficientnet_b0':
backbone = EfficientNet_B0(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 1280
elif trunk_name == 'mobilenetv3_large':
backbone = MobileNetV3_Large(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 960
elif trunk_name == 'mobilenetv3_small':
backbone = MobileNetV3_Small(pretrained=True)
s2_ch = 16
s4_ch = 16
high_level_ch = 576
else:
raise ValueError('unknown backbone {}'.format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
class ConvBnRelu(nn.Module):
"""Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation.
Original source of this code comes from
https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py
"""
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
norm_layer=nn.BatchNorm2d):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = norm_layer(out_planes, eps=1e-5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
| <filename>fastseg/model/utils.py<gh_stars>100-1000
import torch.nn as nn
from .efficientnet import EfficientNet_B4, EfficientNet_B0
from .mobilenetv3 import MobileNetV3_Large, MobileNetV3_Small
def get_trunk(trunk_name):
"""Retrieve the pretrained network trunk and channel counts"""
if trunk_name == 'efficientnet_b4':
backbone = EfficientNet_B4(pretrained=True)
s2_ch = 24
s4_ch = 32
high_level_ch = 1792
elif trunk_name == 'efficientnet_b0':
backbone = EfficientNet_B0(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 1280
elif trunk_name == 'mobilenetv3_large':
backbone = MobileNetV3_Large(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 960
elif trunk_name == 'mobilenetv3_small':
backbone = MobileNetV3_Small(pretrained=True)
s2_ch = 16
s4_ch = 16
high_level_ch = 576
else:
raise ValueError('unknown backbone {}'.format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
class ConvBnRelu(nn.Module):
"""Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation.
Original source of this code comes from
https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py
"""
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
norm_layer=nn.BatchNorm2d):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = norm_layer(out_planes, eps=1e-5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
| en | 0.596223 | Retrieve the pretrained network trunk and channel counts Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation. Original source of this code comes from https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py | 2.182528 | 2 |
python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py | 06needhamt/intellij-community | 2 | 9053 | import module
from types import ModuleType
def foo(m: ModuleType):
pass
def bar(m):
return m.__name__
foo(module)
bar(module) | import module
from types import ModuleType
def foo(m: ModuleType):
pass
def bar(m):
return m.__name__
foo(module)
bar(module) | none | 1 | 2.524533 | 3 |
|
tests/webapp/test_webapp_actions.py | proofdock/chaos-azure | 1 | 9054 | <reponame>proofdock/chaos-azure
from unittest.mock import patch, MagicMock
from pdchaosazure.webapp.actions import stop, restart, delete
from tests.data import config_provider, secrets_provider, webapp_provider
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_stop_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
stop(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.stop.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_restart_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
restart(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.restart.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_delete_webapp(init, fetch):
webapp = webapp_provider.default()
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
delete(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.delete.assert_called_with(webapp['resourceGroup'], webapp['name'])
| from unittest.mock import patch, MagicMock
from pdchaosazure.webapp.actions import stop, restart, delete
from tests.data import config_provider, secrets_provider, webapp_provider
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_stop_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
stop(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.stop.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_restart_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
restart(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.restart.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_delete_webapp(init, fetch):
webapp = webapp_provider.default()
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
delete(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.delete.assert_called_with(webapp['resourceGroup'], webapp['name']) | none | 1 | 2.204043 | 2 |
|
utils.py | lbesnard/subimporter | 0 | 9055 | <reponame>lbesnard/subimporter
def stringifySong(song):
return f"<'{song['title']}' by '{song['artist']}' in '{song['album']}'>" | def stringifySong(song):
return f"<'{song['title']}' by '{song['artist']}' in '{song['album']}'>" | none | 1 | 2.663007 | 3 |
|
echopype/model/modelbase.py | leewujung/echopype-lfs-test | 0 | 9056 | """
echopype data model that keeps tracks of echo data and
its connection to data files.
"""
import os
import warnings
import datetime as dt
from echopype.utils import uwa
import numpy as np
import xarray as xr
class ModelBase(object):
"""Class for manipulating echo data that is already converted to netCDF."""
def __init__(self, file_path=""):
self.file_path = file_path # this passes the input through file name test
self.noise_est_range_bin_size = 5 # meters per tile for noise estimation
self.noise_est_ping_size = 30 # number of pings per tile for noise estimation
self.MVBS_range_bin_size = 5 # meters per tile for MVBS
self.MVBS_ping_size = 30 # number of pings per tile for MVBS
self.Sv = None # calibrated volume backscattering strength
self.Sv_path = None # path to save calibrated results
self.Sv_clean = None # denoised volume backscattering strength
self.TS = None # calibrated target strength
self.TS_path = None # path to save TS calculation results
self.MVBS = None # mean volume backscattering strength
self._salinity = None
self._temperature = None
self._pressure = None
self._sound_speed = None
self._sample_thickness = None
self._range = None
self._seawater_absorption = None
@property
def salinity(self):
return self._salinity
@salinity.setter
def salinity(self, sal):
self._salinity = sal
@property
def pressure(self):
return self._pressure
@pressure.setter
def pressure(self, pres):
self._pressure = pres
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, t):
self._temperature = t
@property
def sample_thickness(self):
return self._sample_thickness
@sample_thickness.setter
def sample_thickness(self, sth):
self._sample_thickness = sth
@property
def range(self):
return self._range
@range.setter
def range(self, rr):
self._range = rr
@property
def seawater_absorption(self):
return self._seawater_absorption
@seawater_absorption.setter
def seawater_absorption(self, absorption):
self._seawater_absorption.values = absorption
@property
def sound_speed(self):
return self._sound_speed
@sound_speed.setter
def sound_speed(self, ss):
if isinstance(self._sound_speed, xr.DataArray):
self._sound_speed.values = ss
else:
self._sound_speed = ss
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, p):
self._file_path = p
# Load netCDF groups if file format is correct
pp = os.path.basename(p)
_, ext = os.path.splitext(pp)
supported_ext_list = ['.raw', '.01A']
if ext in supported_ext_list:
print('Data file in manufacturer format, please convert to .nc first.')
elif ext == '.nc':
self.toplevel = xr.open_dataset(self.file_path)
# Get .nc filenames for storing processed data if computation is performed
self.Sv_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv.nc')
self.Sv_clean_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv_clean.nc')
self.TS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_TS.nc')
self.MVBS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_MVBS.nc')
# Raise error if the file format convention does not match
if self.toplevel.sonar_convention_name != 'SONAR-netCDF4':
raise ValueError('netCDF file convention not recognized.')
self.toplevel.close()
else:
raise ValueError('Data file format not recognized.')
def calc_sound_speed(self, src='file'):
"""Base method to be overridden for calculating sound_speed for different sonar models
"""
# issue warning when subclass methods not available
print("Sound speed calculation has not been implemented for this sonar model!")
def calc_seawater_absorption(self, src='file'):
"""Base method to be overridden for calculating seawater_absorption for different sonar models
"""
# issue warning when subclass methods not available
print("Seawater absorption calculation has not been implemented for this sonar model!")
def calc_sample_thickness(self):
"""Base method to be overridden for calculating sample_thickness for different sonar models.
"""
# issue warning when subclass methods not available
print('Sample thickness calculation has not been implemented for this sonar model!')
def calc_range(self):
"""Base method to be overridden for calculating range for different sonar models.
"""
# issue warning when subclass methods not available
print('Range calculation has not been implemented for this sonar model!')
def recalculate_environment(self, ss=True, sa=True, st=True, r=True):
""" Recalculates sound speed, seawater absorption, sample thickness, and range using
salinity, temperature, and pressure
Parameters
----------
ss : bool
Whether to calcualte sound speed. Defaults to `True`
sa : bool
Whether to calcualte seawater absorption. Defaults to `True`
st : bool
Whether to calcualte sample thickness. Defaults to `True`
r : bool
Whether to calcualte range. Defaults to `True`
"""
s, t, p = self.salinity, self.temperature, self.pressure
if s is not None and t is not None and p is not None:
if ss:
self.sound_speed = self.calc_sound_speed(src='user')
if sa:
self.seawater_absorption = self.calc_seawater_absorption(src='user')
if st:
self.sample_thickness = self.calc_sample_thickness()
if r:
self.range = self.calc_range()
elif s is None:
print("Salinity was not provided. Environment was not recalculated")
elif t is None:
print("Temperature was not provided. Environment was not recalculated")
else:
print("Pressure was not provided. Environment was not recalculated")
def calibrate(self):
"""Base method to be overridden for volume backscatter calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Calibration has not been implemented for this sonar model!')
def calibrate_TS(self):
"""Base method to be overridden for target strength calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Target strength calibration has not been implemented for this sonar model!')
def validate_path(self, save_path, save_postfix):
"""Creates a directory if it doesnt exist. Returns a valid save path.
"""
def _assemble_path():
file_in = os.path.basename(self.file_path)
file_name, file_ext = os.path.splitext(file_in)
return file_name + save_postfix + file_ext
if save_path is None:
save_dir = os.path.dirname(self.file_path)
file_out = _assemble_path()
else:
path_ext = os.path.splitext(save_path)[1]
# If given save_path is file, split into directory and file
if path_ext != '':
save_dir, file_out = os.path.split(save_path)
if save_dir == '': # save_path is only a filename without directory
save_dir = os.path.dirname(self.file_path) # use directory from input file
# If given save_path is a directory, get a filename from input .nc file
else:
save_dir = save_path
file_out = _assemble_path()
# Create folder if not already exists
if save_dir == '':
# TODO: should we use '.' instead of os.getcwd()?
save_dir = os.getcwd() # explicit about path to current directory
if not os.path.exists(save_dir):
os.mkdir(save_dir)
return os.path.join(save_dir, file_out)
@staticmethod
def get_tile_params(r_data_sz, p_data_sz, r_tile_sz, p_tile_sz, sample_thickness):
"""Obtain ping_time and range_bin parameters associated with groupby and groupby_bins operations.
These parameters are used in methods remove_noise(), noise_estimates(), get_MVBS().
Parameters
----------
r_data_sz : int
number of range_bin entries in data
p_data_sz : int
number of ping_time entries in data
r_tile_sz : float
tile size along the range_bin dimension [m]
p_tile_sz : int
tile size along the ping_time dimension [number of pings]
sample_thickness : float
thickness of each data sample, determined by sound speed and pulse duration
Returns
-------
r_tile_sz : int
modified tile size along the range dimension [m], determined by sample_thickness
r_tile_bin_edge : list of int
bin edges along the range_bin dimension for :py:func:`xarray.DataArray.groupby_bins` operation
p_tile_bin_edge : list of int
bin edges along the ping_time dimension for :py:func:`xarray.DataArray.groupby_bins` operation
"""
# Adjust noise_est_range_bin_size because range_bin_size may be an inconvenient value
num_r_per_tile = np.round(r_tile_sz / sample_thickness).astype(int) # num of range_bin per tile
r_tile_sz = num_r_per_tile * sample_thickness
# Total number of range_bin and ping tiles
num_tile_range_bin = np.ceil(r_data_sz / num_r_per_tile).astype(int)
if np.mod(p_data_sz, p_tile_sz) == 0:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int) + 1
else:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int)
# Tile bin edges along range
# ... -1 to make sure each bin has the same size because of the right-inclusive and left-exclusive bins
r_tile_bin_edge = [np.arange(x.values + 1) * y.values - 1 for x, y in zip(num_tile_range_bin, num_r_per_tile)]
p_tile_bin_edge = np.arange(num_tile_ping + 1) * p_tile_sz - 1
return r_tile_sz, r_tile_bin_edge, p_tile_bin_edge
def _get_proc_Sv(self, source_path=None, source_postfix='_Sv'):
"""Private method to return calibrated Sv either from memory or _Sv.nc file.
This method is called by remove_noise(), noise_estimates() and get_MVBS().
"""
if self.Sv is None: # calibration not yet performed
Sv_path = self.validate_path(save_path=source_path, # wrangle _Sv path
save_postfix=source_postfix)
if os.path.exists(Sv_path): # _Sv exists
self.Sv = xr.open_dataset(Sv_path) # load _Sv file
else:
# if path specification given but file do not exist:
if (source_path is not None) or (source_postfix != '_Sv'):
print('%s no calibrated data found in specified path: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), Sv_path))
else:
print('%s data has not been calibrated. ' % dt.datetime.now().strftime('%H:%M:%S'))
print(' performing calibration now and operate from Sv in memory.')
self.calibrate() # calibrate, have Sv in memory
return self.Sv
def remove_noise(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None,
SNR=0, Sv_threshold=None,
save=False, save_postfix='_Sv_clean', save_path=None):
"""Remove noise by using noise estimates obtained from the minimum mean calibrated power level
along each column of tiles.
See method noise_estimates() for details of noise estimation.
Reference: <NAME> & Higginbottom, 2017, ICES Journal of Marine Sciences
Parameters
----------
source_postfix : str
postfix of the Sv file used to remove noise from, default to '_Sv'
source_path : str
path of Sv file used to remove noise from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float, optional
Meters per tile for noise estimation [m]
noise_est_ping_size : int, optional
Number of pings per tile for noise estimation
SNR : int, optional
Minimum signal-to-noise ratio (remove values below this after general noise removal).
Sv_threshold : int, optional
Minimum Sv threshold [dB] (remove values below this after general noise removal)
save : bool, optional
Whether to save the denoised Sv (``Sv_clean``) into a new .nc file.
Default to ``False``.
save_postfix : str
Filename postfix, default to '_Sv_clean'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Get calibrated Sv
if self.Sv is not None:
print('%s Remove noise from Sv stored in memory.' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
print('%s Remove noise from Sv stored in: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Get TVG and ABS for compensating for transmission loss
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Function for use with apply
def remove_n(x, rr):
p_c_lin = 10 ** ((x.Sv - x.ABS - x.TVG) / 10)
nn = 10 * np.log10(p_c_lin.mean(dim='ping_time').groupby_bins('range_bin', rr).mean().min(
dim='range_bin_bins')) + x.ABS + x.TVG
# Return values where signal is [SNR] dB above noise and at least [Sv_threshold] dB
if not Sv_threshold:
return x.Sv.where(x.Sv > (nn + SNR), other=np.nan)
else:
return x.Sv.where((x.Sv > (nn + SNR)) & (x > Sv_threshold), other=np.nan)
# Groupby noise removal operation
proc_data.coords['ping_idx'] = ('ping_time', np.arange(proc_data.Sv['ping_time'].size))
ABS.name = 'ABS'
TVG.name = 'TVG'
pp = xr.merge([proc_data, ABS])
pp = xr.merge([pp, TVG])
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
Sv_clean = pp.groupby_bins('ping_idx', ping_tile_bin_edge).\
map(remove_n, rr=range_bin_tile_bin_edge[0])
Sv_clean = Sv_clean.drop_vars(['ping_idx'])
else:
tmp_clean = []
cnt = 0
for key, val in pp.groupby('frequency'): # iterate over different frequency channel
tmp = val.groupby_bins('ping_idx', ping_tile_bin_edge). \
map(remove_n, rr=range_bin_tile_bin_edge[cnt])
cnt += 1
tmp_clean.append(tmp)
clean_val = np.array([zz.values for zz in xr.align(*tmp_clean, join='outer')])
Sv_clean = xr.DataArray(clean_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_clean[0]['ping_time'].values,
'range_bin': tmp_clean[0]['range_bin'].values},
dims=['frequency', 'ping_time', 'range_bin'])
# Set up DataSet
Sv_clean.name = 'Sv'
Sv_clean = Sv_clean.to_dataset()
Sv_clean['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
Sv_clean.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Attach calculated range into data set
Sv_clean['range'] = (('frequency', 'range_bin'), self.range.T)
# Save as object attributes as a netCDF file
self.Sv_clean = Sv_clean
# TODO: now adding the below so that MVBS can be calculated directly
# from the cleaned Sv without saving and loading Sv_clean from disk.
# However this is not explicit to the user. A better way to do this
# is to change get_MVBS() to first check existence of self.Sv_clean
# when `_Sv_clean` is specified as the source_postfix.
if not print_src: # remove noise from Sv stored in memory
self.Sv = Sv_clean.copy()
if save:
self.Sv_clean_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving denoised Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_clean_path))
Sv_clean.to_netcdf(self.Sv_clean_path)
# Close opened resources
proc_data.close()
def noise_estimates(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None):
"""Obtain noise estimates from the minimum mean calibrated power level along each column of tiles.
The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size.
This method contains redundant pieces of code that also appear in method remove_noise(),
but this method can be used separately to determine the exact tile size for noise removal before
noise removal is actually performed.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate noise estimates from, default to '_Sv'
source_path : str
path of Sv file used to calculate noise estimates from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float
meters per tile for noise estimation [m]
noise_est_ping_size : int
number of pings per tile for noise estimation
Returns
-------
noise_est : xarray DataSet
noise estimates as a DataArray with dimension [ping_time x range_bin]
ping_time and range_bin are taken from the first element of each tile along each of the dimensions
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Use calibrated data to calculate noise removal
proc_data = self._get_proc_Sv()
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Values for noise estimates
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Noise estimates
proc_data['power_cal'] = 10 ** ((proc_data.Sv - ABS - TVG) / 10)
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
noise_est = 10 * np.log10(proc_data['power_cal'].coarsen(
ping_time=self.noise_est_ping_size,
range_bin=int(np.unique(self.noise_est_range_bin_size / self.sample_thickness)),
boundary='pad').mean().min(dim='range_bin'))
else:
range_bin_coarsen_idx = (self.noise_est_range_bin_size / self.sample_thickness).astype(int)
tmp_noise = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(proc_data['power_cal'].sel(frequency=freq).coarsen(
ping_time=self.noise_est_ping_size,
range_bin=r_bin.values,
boundary='pad').mean().min(dim='range_bin'))
tmp_da.name = 'noise_est'
tmp_noise.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
noise_val = np.array([zz.values for zz in xr.align(*tmp_noise, join='outer')])
noise_est = xr.DataArray(noise_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_noise[0]['ping_time'].values},
dims=['frequency', 'ping_time'])
noise_est = noise_est.to_dataset(name='noise_est')
noise_est['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
noise_est.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Close opened resources
proc_data.close()
return noise_est
def get_MVBS(self, source_postfix='_Sv', source_path=None,
MVBS_range_bin_size=None, MVBS_ping_size=None,
save=False, save_postfix='_MVBS', save_path=None):
"""Calculate Mean Volume Backscattering Strength (MVBS).
The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to
calculate and save MVBS as a new attribute to the calling EchoData instance.
MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin``
that are from the first elements of each tile along the corresponding dimensions
in the original Sv or Sv_clean DataArray.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate MVBS, default to '_Sv'
source_path : str
path of Sv file used to calculate MVBS, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
MVBS_range_bin_size : float, optional
meters per tile for calculating MVBS [m]
MVBS_ping_size : int, optional
number of pings per tile for calculating MVBS
save : bool, optional
whether to save the calculated MVBS into a new .nc file, default to ``False``
save_postfix : str
Filename postfix, default to '_MVBS'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default
"""
# Check params
if (MVBS_range_bin_size is not None) and (self.MVBS_range_bin_size != MVBS_range_bin_size):
self.MVBS_range_bin_size = MVBS_range_bin_size
if (MVBS_ping_size is not None) and (self.MVBS_ping_size != MVBS_ping_size):
self.MVBS_ping_size = MVBS_ping_size
# Get Sv by validating path and calibrate if not already done
if self.Sv is not None:
print('%s use Sv stored in memory to calculate MVBS' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
if self.Sv_path is not None:
print('%s Sv source used to calculate MVBS: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
else:
print('%s Sv source used to calculate MVBS: memory' %
dt.datetime.now().strftime('%H:%M:%S'))
# Get tile indexing parameters
self.MVBS_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.MVBS_range_bin_size,
p_tile_sz=self.MVBS_ping_size,
sample_thickness=self.sample_thickness)
# Calculate MVBS
Sv_linear = 10 ** (proc_data.Sv / 10) # convert to linear domain before averaging
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
MVBS = 10 * np.log10(Sv_linear.coarsen(
ping_time=self.MVBS_ping_size,
range_bin=int(np.unique(self.MVBS_range_bin_size / self.sample_thickness)),
boundary='pad').mean())
MVBS.coords['range_bin'] = ('range_bin', np.arange(MVBS['range_bin'].size))
else:
range_bin_coarsen_idx = (self.MVBS_range_bin_size / self.sample_thickness).astype(int)
tmp_MVBS = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(Sv_linear.sel(frequency=freq).coarsen(
ping_time=self.MVBS_ping_size,
range_bin=r_bin.values,
boundary='pad').mean())
tmp_da.coords['range_bin'] = ('range_bin', np.arange(tmp_da['range_bin'].size))
tmp_da.name = 'MVBS'
tmp_MVBS.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
MVBS_val = np.array([zz.values for zz in xr.align(*tmp_MVBS, join='outer')])
MVBS = xr.DataArray(MVBS_val,
coords={'frequency': Sv_linear['frequency'].values,
'ping_time': tmp_MVBS[0]['ping_time'].values,
'range_bin': np.arange(MVBS_val.shape[2])},
dims=['frequency', 'ping_time', 'range_bin']).dropna(dim='range_bin', how='all')
# Set MVBS attributes
MVBS.name = 'MVBS'
MVBS = MVBS.to_dataset()
MVBS['MVBS_range_bin_size'] = ('frequency', self.MVBS_range_bin_size)
MVBS.attrs['MVBS_ping_size'] = self.MVBS_ping_size
# Save results in object and as a netCDF file
self.MVBS = MVBS
if save:
self.MVBS_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving MVBS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.MVBS_path))
MVBS.to_netcdf(self.MVBS_path)
# Close opened resources
proc_data.close()
| """
echopype data model that keeps tracks of echo data and
its connection to data files.
"""
import os
import warnings
import datetime as dt
from echopype.utils import uwa
import numpy as np
import xarray as xr
class ModelBase(object):
"""Class for manipulating echo data that is already converted to netCDF."""
def __init__(self, file_path=""):
self.file_path = file_path # this passes the input through file name test
self.noise_est_range_bin_size = 5 # meters per tile for noise estimation
self.noise_est_ping_size = 30 # number of pings per tile for noise estimation
self.MVBS_range_bin_size = 5 # meters per tile for MVBS
self.MVBS_ping_size = 30 # number of pings per tile for MVBS
self.Sv = None # calibrated volume backscattering strength
self.Sv_path = None # path to save calibrated results
self.Sv_clean = None # denoised volume backscattering strength
self.TS = None # calibrated target strength
self.TS_path = None # path to save TS calculation results
self.MVBS = None # mean volume backscattering strength
self._salinity = None
self._temperature = None
self._pressure = None
self._sound_speed = None
self._sample_thickness = None
self._range = None
self._seawater_absorption = None
@property
def salinity(self):
return self._salinity
@salinity.setter
def salinity(self, sal):
self._salinity = sal
@property
def pressure(self):
return self._pressure
@pressure.setter
def pressure(self, pres):
self._pressure = pres
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, t):
self._temperature = t
@property
def sample_thickness(self):
return self._sample_thickness
@sample_thickness.setter
def sample_thickness(self, sth):
self._sample_thickness = sth
@property
def range(self):
return self._range
@range.setter
def range(self, rr):
self._range = rr
@property
def seawater_absorption(self):
return self._seawater_absorption
@seawater_absorption.setter
def seawater_absorption(self, absorption):
self._seawater_absorption.values = absorption
@property
def sound_speed(self):
return self._sound_speed
@sound_speed.setter
def sound_speed(self, ss):
if isinstance(self._sound_speed, xr.DataArray):
self._sound_speed.values = ss
else:
self._sound_speed = ss
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, p):
self._file_path = p
# Load netCDF groups if file format is correct
pp = os.path.basename(p)
_, ext = os.path.splitext(pp)
supported_ext_list = ['.raw', '.01A']
if ext in supported_ext_list:
print('Data file in manufacturer format, please convert to .nc first.')
elif ext == '.nc':
self.toplevel = xr.open_dataset(self.file_path)
# Get .nc filenames for storing processed data if computation is performed
self.Sv_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv.nc')
self.Sv_clean_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv_clean.nc')
self.TS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_TS.nc')
self.MVBS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_MVBS.nc')
# Raise error if the file format convention does not match
if self.toplevel.sonar_convention_name != 'SONAR-netCDF4':
raise ValueError('netCDF file convention not recognized.')
self.toplevel.close()
else:
raise ValueError('Data file format not recognized.')
def calc_sound_speed(self, src='file'):
"""Base method to be overridden for calculating sound_speed for different sonar models
"""
# issue warning when subclass methods not available
print("Sound speed calculation has not been implemented for this sonar model!")
def calc_seawater_absorption(self, src='file'):
"""Base method to be overridden for calculating seawater_absorption for different sonar models
"""
# issue warning when subclass methods not available
print("Seawater absorption calculation has not been implemented for this sonar model!")
def calc_sample_thickness(self):
"""Base method to be overridden for calculating sample_thickness for different sonar models.
"""
# issue warning when subclass methods not available
print('Sample thickness calculation has not been implemented for this sonar model!')
def calc_range(self):
"""Base method to be overridden for calculating range for different sonar models.
"""
# issue warning when subclass methods not available
print('Range calculation has not been implemented for this sonar model!')
def recalculate_environment(self, ss=True, sa=True, st=True, r=True):
""" Recalculates sound speed, seawater absorption, sample thickness, and range using
salinity, temperature, and pressure
Parameters
----------
ss : bool
Whether to calcualte sound speed. Defaults to `True`
sa : bool
Whether to calcualte seawater absorption. Defaults to `True`
st : bool
Whether to calcualte sample thickness. Defaults to `True`
r : bool
Whether to calcualte range. Defaults to `True`
"""
s, t, p = self.salinity, self.temperature, self.pressure
if s is not None and t is not None and p is not None:
if ss:
self.sound_speed = self.calc_sound_speed(src='user')
if sa:
self.seawater_absorption = self.calc_seawater_absorption(src='user')
if st:
self.sample_thickness = self.calc_sample_thickness()
if r:
self.range = self.calc_range()
elif s is None:
print("Salinity was not provided. Environment was not recalculated")
elif t is None:
print("Temperature was not provided. Environment was not recalculated")
else:
print("Pressure was not provided. Environment was not recalculated")
def calibrate(self):
"""Base method to be overridden for volume backscatter calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Calibration has not been implemented for this sonar model!')
def calibrate_TS(self):
"""Base method to be overridden for target strength calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Target strength calibration has not been implemented for this sonar model!')
def validate_path(self, save_path, save_postfix):
"""Creates a directory if it doesnt exist. Returns a valid save path.
"""
def _assemble_path():
file_in = os.path.basename(self.file_path)
file_name, file_ext = os.path.splitext(file_in)
return file_name + save_postfix + file_ext
if save_path is None:
save_dir = os.path.dirname(self.file_path)
file_out = _assemble_path()
else:
path_ext = os.path.splitext(save_path)[1]
# If given save_path is file, split into directory and file
if path_ext != '':
save_dir, file_out = os.path.split(save_path)
if save_dir == '': # save_path is only a filename without directory
save_dir = os.path.dirname(self.file_path) # use directory from input file
# If given save_path is a directory, get a filename from input .nc file
else:
save_dir = save_path
file_out = _assemble_path()
# Create folder if not already exists
if save_dir == '':
# TODO: should we use '.' instead of os.getcwd()?
save_dir = os.getcwd() # explicit about path to current directory
if not os.path.exists(save_dir):
os.mkdir(save_dir)
return os.path.join(save_dir, file_out)
@staticmethod
def get_tile_params(r_data_sz, p_data_sz, r_tile_sz, p_tile_sz, sample_thickness):
"""Obtain ping_time and range_bin parameters associated with groupby and groupby_bins operations.
These parameters are used in methods remove_noise(), noise_estimates(), get_MVBS().
Parameters
----------
r_data_sz : int
number of range_bin entries in data
p_data_sz : int
number of ping_time entries in data
r_tile_sz : float
tile size along the range_bin dimension [m]
p_tile_sz : int
tile size along the ping_time dimension [number of pings]
sample_thickness : float
thickness of each data sample, determined by sound speed and pulse duration
Returns
-------
r_tile_sz : int
modified tile size along the range dimension [m], determined by sample_thickness
r_tile_bin_edge : list of int
bin edges along the range_bin dimension for :py:func:`xarray.DataArray.groupby_bins` operation
p_tile_bin_edge : list of int
bin edges along the ping_time dimension for :py:func:`xarray.DataArray.groupby_bins` operation
"""
# Adjust noise_est_range_bin_size because range_bin_size may be an inconvenient value
num_r_per_tile = np.round(r_tile_sz / sample_thickness).astype(int) # num of range_bin per tile
r_tile_sz = num_r_per_tile * sample_thickness
# Total number of range_bin and ping tiles
num_tile_range_bin = np.ceil(r_data_sz / num_r_per_tile).astype(int)
if np.mod(p_data_sz, p_tile_sz) == 0:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int) + 1
else:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int)
# Tile bin edges along range
# ... -1 to make sure each bin has the same size because of the right-inclusive and left-exclusive bins
r_tile_bin_edge = [np.arange(x.values + 1) * y.values - 1 for x, y in zip(num_tile_range_bin, num_r_per_tile)]
p_tile_bin_edge = np.arange(num_tile_ping + 1) * p_tile_sz - 1
return r_tile_sz, r_tile_bin_edge, p_tile_bin_edge
def _get_proc_Sv(self, source_path=None, source_postfix='_Sv'):
"""Private method to return calibrated Sv either from memory or _Sv.nc file.
This method is called by remove_noise(), noise_estimates() and get_MVBS().
"""
if self.Sv is None: # calibration not yet performed
Sv_path = self.validate_path(save_path=source_path, # wrangle _Sv path
save_postfix=source_postfix)
if os.path.exists(Sv_path): # _Sv exists
self.Sv = xr.open_dataset(Sv_path) # load _Sv file
else:
# if path specification given but file do not exist:
if (source_path is not None) or (source_postfix != '_Sv'):
print('%s no calibrated data found in specified path: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), Sv_path))
else:
print('%s data has not been calibrated. ' % dt.datetime.now().strftime('%H:%M:%S'))
print(' performing calibration now and operate from Sv in memory.')
self.calibrate() # calibrate, have Sv in memory
return self.Sv
def remove_noise(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None,
SNR=0, Sv_threshold=None,
save=False, save_postfix='_Sv_clean', save_path=None):
"""Remove noise by using noise estimates obtained from the minimum mean calibrated power level
along each column of tiles.
See method noise_estimates() for details of noise estimation.
Reference: <NAME> & Higginbottom, 2017, ICES Journal of Marine Sciences
Parameters
----------
source_postfix : str
postfix of the Sv file used to remove noise from, default to '_Sv'
source_path : str
path of Sv file used to remove noise from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float, optional
Meters per tile for noise estimation [m]
noise_est_ping_size : int, optional
Number of pings per tile for noise estimation
SNR : int, optional
Minimum signal-to-noise ratio (remove values below this after general noise removal).
Sv_threshold : int, optional
Minimum Sv threshold [dB] (remove values below this after general noise removal)
save : bool, optional
Whether to save the denoised Sv (``Sv_clean``) into a new .nc file.
Default to ``False``.
save_postfix : str
Filename postfix, default to '_Sv_clean'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Get calibrated Sv
if self.Sv is not None:
print('%s Remove noise from Sv stored in memory.' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
print('%s Remove noise from Sv stored in: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Get TVG and ABS for compensating for transmission loss
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Function for use with apply
def remove_n(x, rr):
p_c_lin = 10 ** ((x.Sv - x.ABS - x.TVG) / 10)
nn = 10 * np.log10(p_c_lin.mean(dim='ping_time').groupby_bins('range_bin', rr).mean().min(
dim='range_bin_bins')) + x.ABS + x.TVG
# Return values where signal is [SNR] dB above noise and at least [Sv_threshold] dB
if not Sv_threshold:
return x.Sv.where(x.Sv > (nn + SNR), other=np.nan)
else:
return x.Sv.where((x.Sv > (nn + SNR)) & (x > Sv_threshold), other=np.nan)
# Groupby noise removal operation
proc_data.coords['ping_idx'] = ('ping_time', np.arange(proc_data.Sv['ping_time'].size))
ABS.name = 'ABS'
TVG.name = 'TVG'
pp = xr.merge([proc_data, ABS])
pp = xr.merge([pp, TVG])
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
Sv_clean = pp.groupby_bins('ping_idx', ping_tile_bin_edge).\
map(remove_n, rr=range_bin_tile_bin_edge[0])
Sv_clean = Sv_clean.drop_vars(['ping_idx'])
else:
tmp_clean = []
cnt = 0
for key, val in pp.groupby('frequency'): # iterate over different frequency channel
tmp = val.groupby_bins('ping_idx', ping_tile_bin_edge). \
map(remove_n, rr=range_bin_tile_bin_edge[cnt])
cnt += 1
tmp_clean.append(tmp)
clean_val = np.array([zz.values for zz in xr.align(*tmp_clean, join='outer')])
Sv_clean = xr.DataArray(clean_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_clean[0]['ping_time'].values,
'range_bin': tmp_clean[0]['range_bin'].values},
dims=['frequency', 'ping_time', 'range_bin'])
# Set up DataSet
Sv_clean.name = 'Sv'
Sv_clean = Sv_clean.to_dataset()
Sv_clean['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
Sv_clean.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Attach calculated range into data set
Sv_clean['range'] = (('frequency', 'range_bin'), self.range.T)
# Save as object attributes as a netCDF file
self.Sv_clean = Sv_clean
# TODO: now adding the below so that MVBS can be calculated directly
# from the cleaned Sv without saving and loading Sv_clean from disk.
# However this is not explicit to the user. A better way to do this
# is to change get_MVBS() to first check existence of self.Sv_clean
# when `_Sv_clean` is specified as the source_postfix.
if not print_src: # remove noise from Sv stored in memory
self.Sv = Sv_clean.copy()
if save:
self.Sv_clean_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving denoised Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_clean_path))
Sv_clean.to_netcdf(self.Sv_clean_path)
# Close opened resources
proc_data.close()
def noise_estimates(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None):
"""Obtain noise estimates from the minimum mean calibrated power level along each column of tiles.
The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size.
This method contains redundant pieces of code that also appear in method remove_noise(),
but this method can be used separately to determine the exact tile size for noise removal before
noise removal is actually performed.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate noise estimates from, default to '_Sv'
source_path : str
path of Sv file used to calculate noise estimates from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float
meters per tile for noise estimation [m]
noise_est_ping_size : int
number of pings per tile for noise estimation
Returns
-------
noise_est : xarray DataSet
noise estimates as a DataArray with dimension [ping_time x range_bin]
ping_time and range_bin are taken from the first element of each tile along each of the dimensions
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Use calibrated data to calculate noise removal
proc_data = self._get_proc_Sv()
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Values for noise estimates
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Noise estimates
proc_data['power_cal'] = 10 ** ((proc_data.Sv - ABS - TVG) / 10)
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
noise_est = 10 * np.log10(proc_data['power_cal'].coarsen(
ping_time=self.noise_est_ping_size,
range_bin=int(np.unique(self.noise_est_range_bin_size / self.sample_thickness)),
boundary='pad').mean().min(dim='range_bin'))
else:
range_bin_coarsen_idx = (self.noise_est_range_bin_size / self.sample_thickness).astype(int)
tmp_noise = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(proc_data['power_cal'].sel(frequency=freq).coarsen(
ping_time=self.noise_est_ping_size,
range_bin=r_bin.values,
boundary='pad').mean().min(dim='range_bin'))
tmp_da.name = 'noise_est'
tmp_noise.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
noise_val = np.array([zz.values for zz in xr.align(*tmp_noise, join='outer')])
noise_est = xr.DataArray(noise_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_noise[0]['ping_time'].values},
dims=['frequency', 'ping_time'])
noise_est = noise_est.to_dataset(name='noise_est')
noise_est['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
noise_est.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Close opened resources
proc_data.close()
return noise_est
def get_MVBS(self, source_postfix='_Sv', source_path=None,
MVBS_range_bin_size=None, MVBS_ping_size=None,
save=False, save_postfix='_MVBS', save_path=None):
"""Calculate Mean Volume Backscattering Strength (MVBS).
The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to
calculate and save MVBS as a new attribute to the calling EchoData instance.
MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin``
that are from the first elements of each tile along the corresponding dimensions
in the original Sv or Sv_clean DataArray.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate MVBS, default to '_Sv'
source_path : str
path of Sv file used to calculate MVBS, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
MVBS_range_bin_size : float, optional
meters per tile for calculating MVBS [m]
MVBS_ping_size : int, optional
number of pings per tile for calculating MVBS
save : bool, optional
whether to save the calculated MVBS into a new .nc file, default to ``False``
save_postfix : str
Filename postfix, default to '_MVBS'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default
"""
# Check params
if (MVBS_range_bin_size is not None) and (self.MVBS_range_bin_size != MVBS_range_bin_size):
self.MVBS_range_bin_size = MVBS_range_bin_size
if (MVBS_ping_size is not None) and (self.MVBS_ping_size != MVBS_ping_size):
self.MVBS_ping_size = MVBS_ping_size
# Get Sv by validating path and calibrate if not already done
if self.Sv is not None:
print('%s use Sv stored in memory to calculate MVBS' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
if self.Sv_path is not None:
print('%s Sv source used to calculate MVBS: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
else:
print('%s Sv source used to calculate MVBS: memory' %
dt.datetime.now().strftime('%H:%M:%S'))
# Get tile indexing parameters
self.MVBS_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.MVBS_range_bin_size,
p_tile_sz=self.MVBS_ping_size,
sample_thickness=self.sample_thickness)
# Calculate MVBS
Sv_linear = 10 ** (proc_data.Sv / 10) # convert to linear domain before averaging
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
MVBS = 10 * np.log10(Sv_linear.coarsen(
ping_time=self.MVBS_ping_size,
range_bin=int(np.unique(self.MVBS_range_bin_size / self.sample_thickness)),
boundary='pad').mean())
MVBS.coords['range_bin'] = ('range_bin', np.arange(MVBS['range_bin'].size))
else:
range_bin_coarsen_idx = (self.MVBS_range_bin_size / self.sample_thickness).astype(int)
tmp_MVBS = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(Sv_linear.sel(frequency=freq).coarsen(
ping_time=self.MVBS_ping_size,
range_bin=r_bin.values,
boundary='pad').mean())
tmp_da.coords['range_bin'] = ('range_bin', np.arange(tmp_da['range_bin'].size))
tmp_da.name = 'MVBS'
tmp_MVBS.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
MVBS_val = np.array([zz.values for zz in xr.align(*tmp_MVBS, join='outer')])
MVBS = xr.DataArray(MVBS_val,
coords={'frequency': Sv_linear['frequency'].values,
'ping_time': tmp_MVBS[0]['ping_time'].values,
'range_bin': np.arange(MVBS_val.shape[2])},
dims=['frequency', 'ping_time', 'range_bin']).dropna(dim='range_bin', how='all')
# Set MVBS attributes
MVBS.name = 'MVBS'
MVBS = MVBS.to_dataset()
MVBS['MVBS_range_bin_size'] = ('frequency', self.MVBS_range_bin_size)
MVBS.attrs['MVBS_ping_size'] = self.MVBS_ping_size
# Save results in object and as a netCDF file
self.MVBS = MVBS
if save:
self.MVBS_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving MVBS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.MVBS_path))
MVBS.to_netcdf(self.MVBS_path)
# Close opened resources
proc_data.close()
| en | 0.683863 | echopype data model that keeps tracks of echo data and its connection to data files. Class for manipulating echo data that is already converted to netCDF. # this passes the input through file name test # meters per tile for noise estimation # number of pings per tile for noise estimation # meters per tile for MVBS # number of pings per tile for MVBS # calibrated volume backscattering strength # path to save calibrated results # denoised volume backscattering strength # calibrated target strength # path to save TS calculation results # mean volume backscattering strength # Load netCDF groups if file format is correct # Get .nc filenames for storing processed data if computation is performed # Raise error if the file format convention does not match Base method to be overridden for calculating sound_speed for different sonar models # issue warning when subclass methods not available Base method to be overridden for calculating seawater_absorption for different sonar models # issue warning when subclass methods not available Base method to be overridden for calculating sample_thickness for different sonar models. # issue warning when subclass methods not available Base method to be overridden for calculating range for different sonar models. # issue warning when subclass methods not available Recalculates sound speed, seawater absorption, sample thickness, and range using salinity, temperature, and pressure Parameters ---------- ss : bool Whether to calcualte sound speed. Defaults to `True` sa : bool Whether to calcualte seawater absorption. Defaults to `True` st : bool Whether to calcualte sample thickness. Defaults to `True` r : bool Whether to calcualte range. Defaults to `True` Base method to be overridden for volume backscatter calibration and echo-integration for different sonar models. # issue warning when subclass methods not available Base method to be overridden for target strength calibration and echo-integration for different sonar models. # issue warning when subclass methods not available Creates a directory if it doesnt exist. Returns a valid save path. # If given save_path is file, split into directory and file # save_path is only a filename without directory # use directory from input file # If given save_path is a directory, get a filename from input .nc file # Create folder if not already exists # TODO: should we use '.' instead of os.getcwd()? # explicit about path to current directory Obtain ping_time and range_bin parameters associated with groupby and groupby_bins operations. These parameters are used in methods remove_noise(), noise_estimates(), get_MVBS(). Parameters ---------- r_data_sz : int number of range_bin entries in data p_data_sz : int number of ping_time entries in data r_tile_sz : float tile size along the range_bin dimension [m] p_tile_sz : int tile size along the ping_time dimension [number of pings] sample_thickness : float thickness of each data sample, determined by sound speed and pulse duration Returns ------- r_tile_sz : int modified tile size along the range dimension [m], determined by sample_thickness r_tile_bin_edge : list of int bin edges along the range_bin dimension for :py:func:`xarray.DataArray.groupby_bins` operation p_tile_bin_edge : list of int bin edges along the ping_time dimension for :py:func:`xarray.DataArray.groupby_bins` operation # Adjust noise_est_range_bin_size because range_bin_size may be an inconvenient value # num of range_bin per tile # Total number of range_bin and ping tiles # Tile bin edges along range # ... -1 to make sure each bin has the same size because of the right-inclusive and left-exclusive bins Private method to return calibrated Sv either from memory or _Sv.nc file. This method is called by remove_noise(), noise_estimates() and get_MVBS(). # calibration not yet performed # wrangle _Sv path # _Sv exists # load _Sv file # if path specification given but file do not exist: # calibrate, have Sv in memory Remove noise by using noise estimates obtained from the minimum mean calibrated power level along each column of tiles. See method noise_estimates() for details of noise estimation. Reference: <NAME> & Higginbottom, 2017, ICES Journal of Marine Sciences Parameters ---------- source_postfix : str postfix of the Sv file used to remove noise from, default to '_Sv' source_path : str path of Sv file used to remove noise from, can be one of the following: - None (default): use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file, or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv - path to a directory: RAWFILENAME_Sv.nc in the specified directory - path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc noise_est_range_bin_size : float, optional Meters per tile for noise estimation [m] noise_est_ping_size : int, optional Number of pings per tile for noise estimation SNR : int, optional Minimum signal-to-noise ratio (remove values below this after general noise removal). Sv_threshold : int, optional Minimum Sv threshold [dB] (remove values below this after general noise removal) save : bool, optional Whether to save the denoised Sv (``Sv_clean``) into a new .nc file. Default to ``False``. save_postfix : str Filename postfix, default to '_Sv_clean' save_path : str Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default # Check params # Get calibrated Sv # Get tile indexing parameters # Get TVG and ABS for compensating for transmission loss # Function for use with apply # Return values where signal is [SNR] dB above noise and at least [Sv_threshold] dB # Groupby noise removal operation # check if number of range_bin per tile the same for all freq channels # iterate over different frequency channel # Set up DataSet # Attach calculated range into data set # Save as object attributes as a netCDF file # TODO: now adding the below so that MVBS can be calculated directly # from the cleaned Sv without saving and loading Sv_clean from disk. # However this is not explicit to the user. A better way to do this # is to change get_MVBS() to first check existence of self.Sv_clean # when `_Sv_clean` is specified as the source_postfix. # remove noise from Sv stored in memory # Close opened resources Obtain noise estimates from the minimum mean calibrated power level along each column of tiles. The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size. This method contains redundant pieces of code that also appear in method remove_noise(), but this method can be used separately to determine the exact tile size for noise removal before noise removal is actually performed. Parameters ---------- source_postfix : str postfix of the Sv file used to calculate noise estimates from, default to '_Sv' source_path : str path of Sv file used to calculate noise estimates from, can be one of the following: - None (default): use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file, or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv - path to a directory: RAWFILENAME_Sv.nc in the specified directory - path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc noise_est_range_bin_size : float meters per tile for noise estimation [m] noise_est_ping_size : int number of pings per tile for noise estimation Returns ------- noise_est : xarray DataSet noise estimates as a DataArray with dimension [ping_time x range_bin] ping_time and range_bin are taken from the first element of each tile along each of the dimensions # Check params # Use calibrated data to calculate noise removal # Get tile indexing parameters # Values for noise estimates # Noise estimates # check if number of range_bin per tile the same for all freq channels # Construct a dataArray TODO: this can probably be done smarter using xarray native functions # Close opened resources Calculate Mean Volume Backscattering Strength (MVBS). The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to calculate and save MVBS as a new attribute to the calling EchoData instance. MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin`` that are from the first elements of each tile along the corresponding dimensions in the original Sv or Sv_clean DataArray. Parameters ---------- source_postfix : str postfix of the Sv file used to calculate MVBS, default to '_Sv' source_path : str path of Sv file used to calculate MVBS, can be one of the following: - None (default): use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file, or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv - path to a directory: RAWFILENAME_Sv.nc in the specified directory - path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc MVBS_range_bin_size : float, optional meters per tile for calculating MVBS [m] MVBS_ping_size : int, optional number of pings per tile for calculating MVBS save : bool, optional whether to save the calculated MVBS into a new .nc file, default to ``False`` save_postfix : str Filename postfix, default to '_MVBS' save_path : str Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default # Check params # Get Sv by validating path and calibrate if not already done # Get tile indexing parameters # Calculate MVBS # convert to linear domain before averaging # check if number of range_bin per tile the same for all freq channels # Construct a dataArray TODO: this can probably be done smarter using xarray native functions # Set MVBS attributes # Save results in object and as a netCDF file # Close opened resources | 2.851058 | 3 |
Python/face_detect_camera/managers.py | abondar24/OpenCVBase | 0 | 9057 | import cv2
import numpy as np
import time
class CaptureManager(object):
def __init__(self, capture, preview_window_manager=None, should_mirror_preview = False):
self.preview_window_manager = preview_window_manager
self.should_mirror_preview = should_mirror_preview
self._capture = capture
self._channel = 0
self._entered_frame = False
self._frame = None
self._frames_elapsed = long(0)
self._fps_est = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self):
return self._channel
@property
def frame(self):
if self._entered_frame and self._frame is None:
_, self._frame = self._capture.retrieve(channel=self.channel)
return self._frame
def enter_frame(self):
# capture the next frame
assert not self._entered_frame, 'previous enter_frame() had no matching exit_frame()'
if self._capture is not None:
self._entered_frame = self._capture.grab()
def exit_frame(self):
# draw to window, write to files, release the frame
# frame is retrievable or not
if self.frame is None:
self._entered_frame = False
return
if self._frames_elapsed == 0:
self._start_time = time.time()
else:
time_elapsed = time.time() - self._start_time
self._fps_est = self._frames_elapsed / time_elapsed
self._frames_elapsed += 1
# draw
if self.preview_window_manager is not None:
if self.should_mirror_preview:
mirrored_frame = np.fliplr(self._frame).copy()
self.preview_window_manager.show(mirrored_frame)
else:
self.preview_window_manager.show(self._frame)
# release the frame
self._frame = None
self._entered_frame = False
class WindowManager(object):
def __init__(self, window_name, keypress_callback = None):
self.keypress_callback = keypress_callback
self._window_name = window_name
self._is_window_created = False
@property
def is_window_created(self):
return self._is_window_created
def create_window(self):
cv2.namedWindow(self._window_name)
self._is_window_created = True
def show(self, frame):
cv2.imshow(self._window_name, frame)
def destroy_window(self):
cv2.destroyWindow(self._window_name)
self._is_window_created = False
def process_events(self):
keykode = cv2.waitKey(1)
if self.keypress_callback is not None and keykode != -1:
keykode &= 0xFF
self.keypress_callback(keykode)
| import cv2
import numpy as np
import time
class CaptureManager(object):
def __init__(self, capture, preview_window_manager=None, should_mirror_preview = False):
self.preview_window_manager = preview_window_manager
self.should_mirror_preview = should_mirror_preview
self._capture = capture
self._channel = 0
self._entered_frame = False
self._frame = None
self._frames_elapsed = long(0)
self._fps_est = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self):
return self._channel
@property
def frame(self):
if self._entered_frame and self._frame is None:
_, self._frame = self._capture.retrieve(channel=self.channel)
return self._frame
def enter_frame(self):
# capture the next frame
assert not self._entered_frame, 'previous enter_frame() had no matching exit_frame()'
if self._capture is not None:
self._entered_frame = self._capture.grab()
def exit_frame(self):
# draw to window, write to files, release the frame
# frame is retrievable or not
if self.frame is None:
self._entered_frame = False
return
if self._frames_elapsed == 0:
self._start_time = time.time()
else:
time_elapsed = time.time() - self._start_time
self._fps_est = self._frames_elapsed / time_elapsed
self._frames_elapsed += 1
# draw
if self.preview_window_manager is not None:
if self.should_mirror_preview:
mirrored_frame = np.fliplr(self._frame).copy()
self.preview_window_manager.show(mirrored_frame)
else:
self.preview_window_manager.show(self._frame)
# release the frame
self._frame = None
self._entered_frame = False
class WindowManager(object):
def __init__(self, window_name, keypress_callback = None):
self.keypress_callback = keypress_callback
self._window_name = window_name
self._is_window_created = False
@property
def is_window_created(self):
return self._is_window_created
def create_window(self):
cv2.namedWindow(self._window_name)
self._is_window_created = True
def show(self, frame):
cv2.imshow(self._window_name, frame)
def destroy_window(self):
cv2.destroyWindow(self._window_name)
self._is_window_created = False
def process_events(self):
keykode = cv2.waitKey(1)
if self.keypress_callback is not None and keykode != -1:
keykode &= 0xFF
self.keypress_callback(keykode)
| en | 0.835866 | # capture the next frame # draw to window, write to files, release the frame # frame is retrievable or not # draw # release the frame | 2.654734 | 3 |
ELLA/ELLA.py | micaelverissimo/lifelong_ringer | 0 | 9058 | <gh_stars>0
""" Alpha version of a version of ELLA that plays nicely with sklearn
@author: <NAME>
"""
from math import log
import numpy as np
from scipy.special import logsumexp
from scipy.linalg import sqrtm, inv, norm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression, Lasso
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, explained_variance_score
class ELLA(object):
""" The ELLA model """
def __init__(self, d, k, base_learner, base_learner_kwargs = {}, mu = 1, lam = 1, k_init = False):
""" Initializes a new model for the given base_learner.
d: the number of parameters for the base learner
k: the number of latent model components
base_learner: the base learner to use (currently can only be
LinearRegression, Ridge, or LogisticRegression).
base_learner_kwargs: keyword arguments to base learner (for instance to
specify regularization strength)
mu: hyperparameter for sparsity
lam: L2 penalty on L
mu: the L_1 penalty to use
lam: the L_2 penalty to use
NOTE: currently only binary logistic regression is supported
"""
self.d = d
self.k = k
self.L = np.random.randn(d,k)
self.A = np.zeros((d * k, d * k))
self.b = np.zeros((d * k, 1))
self.S = np.zeros((k, 0))
self.T = 0
self.mu = mu
self.lam = lam
self.k_init = k_init
if base_learner in [LinearRegression, Ridge]:
self.perf_metric = explained_variance_score
elif base_learner in [LogisticRegression]:
self.perf_metric = accuracy_score
else:
raise Exception("Unsupported Base Learner")
self.base_learner = base_learner
self.base_learner_kwargs = base_learner_kwargs
def fit(self, X, y, task_id):
""" Fit the model to a new batch of training data. The task_id must
start at 0 and increase by one each time this function is called.
Currently you cannot add new data to old tasks.
X: the training data
y: the trianing labels
task_id: the id of the task
"""
self.T += 1
single_task_model = self.base_learner(fit_intercept = False, **self.base_learner_kwargs).fit(X, y)
D_t = self.get_hessian(single_task_model, X, y)
D_t_sqrt = sqrtm(D_t)
theta_t = single_task_model.coef_
sparse_encode = Lasso(alpha = self.mu / (X.shape[0] * 2.0),
fit_intercept = False, tol=1e9, max_iter=50000).fit(D_t_sqrt.dot(self.L),
D_t_sqrt.dot(theta_t.T))
if self.k_init and task_id < self.k:
sparse_coeffs = np.zeros((self.k,))
sparse_coeffs[task_id] = 1.0
else:
sparse_coeffs = sparse_encode.coef_
self.S = np.hstack((self.S, np.matrix(sparse_coeffs).T))
self.A += np.kron(self.S[:,task_id].dot(self.S[:,task_id].T), D_t)
self.b += np.kron(self.S[:,task_id].T, np.mat(theta_t).dot(D_t)).T
L_vectorized = inv(self.A / self.T + self.lam * np.eye(self.d * self.k, self.d * self.k)).dot(self.b) / self.T
self.L = L_vectorized.reshape((self.k, self.d)).T
self.revive_dead_components()
def revive_dead_components(self):
""" re-initailizes any components that have decayed to 0 """
for i,val in enumerate(np.sum(self.L, axis = 0)):
if abs(val) < 10 ** -8:
self.L[:, i] = np.random.randn(self.d,)
def predict(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
return X.dot(self.L.dot(self.S[:, task_id]))
elif self.base_learner == LogisticRegression:
return 1. / (1.0 + np.exp(-X.dot(self.L.dot(self.S[:, task_id])))) > 0.5
def predict_probs(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
raise Exception("This base learner does not support predicting probabilities")
elif self.base_learner == LogisticRegression:
return np.exp(self.predict_logprobs(X, task_id))
def predict_logprobs(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
raise Exception("This base learner does not support predicting probabilities")
elif self.base_learner == LogisticRegression:
return -logsumexp(np.hstack((np.zeros((X.shape[0], 1)), -X.dot(self.L.dot(self.S[:, task_id])))), axis = 1)
def score(self, X, y, task_id):
""" Output the score for ELLA's model on the specified testing data.
If using a continuous model (Ridge and LinearRegression)
the score is explained variance. If using a classification model
(LogisticRegression) the score is accuracy.
"""
return self.perf_metric(self.predict(X, task_id), y)
def get_hessian(self, model, X, y):
""" ELLA requires that each single task learner provide the Hessian
of the loss function evaluated around the optimal single task
parameters. This funciton implements this for the base learners
that are currently supported """
theta_t = model.coef_
if self.base_learner == LinearRegression:
return X.T.dot(X)/(2.0 * X.shape[0])
elif self.base_learner == Ridge:
return X.T.dot(X)/(2.0 * X.shape[0]) + model.alpha * np.eye(self.d, self.d)
elif self.base_learner == LogisticRegression:
preds = 1. / (1.0 + np.exp(-X.dot(theta_t.T)))
base = np.tile(preds * (1 - preds), (1, X.shape[1]))
hessian = (np.multiply(X, base)).T.dot(X) / (2.0 * X.shape[0])
return hessian + np.eye(self.d,self.d) / (2.0 * model.C) | """ Alpha version of a version of ELLA that plays nicely with sklearn
@author: <NAME>
"""
from math import log
import numpy as np
from scipy.special import logsumexp
from scipy.linalg import sqrtm, inv, norm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression, Lasso
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, explained_variance_score
class ELLA(object):
""" The ELLA model """
def __init__(self, d, k, base_learner, base_learner_kwargs = {}, mu = 1, lam = 1, k_init = False):
""" Initializes a new model for the given base_learner.
d: the number of parameters for the base learner
k: the number of latent model components
base_learner: the base learner to use (currently can only be
LinearRegression, Ridge, or LogisticRegression).
base_learner_kwargs: keyword arguments to base learner (for instance to
specify regularization strength)
mu: hyperparameter for sparsity
lam: L2 penalty on L
mu: the L_1 penalty to use
lam: the L_2 penalty to use
NOTE: currently only binary logistic regression is supported
"""
self.d = d
self.k = k
self.L = np.random.randn(d,k)
self.A = np.zeros((d * k, d * k))
self.b = np.zeros((d * k, 1))
self.S = np.zeros((k, 0))
self.T = 0
self.mu = mu
self.lam = lam
self.k_init = k_init
if base_learner in [LinearRegression, Ridge]:
self.perf_metric = explained_variance_score
elif base_learner in [LogisticRegression]:
self.perf_metric = accuracy_score
else:
raise Exception("Unsupported Base Learner")
self.base_learner = base_learner
self.base_learner_kwargs = base_learner_kwargs
def fit(self, X, y, task_id):
""" Fit the model to a new batch of training data. The task_id must
start at 0 and increase by one each time this function is called.
Currently you cannot add new data to old tasks.
X: the training data
y: the trianing labels
task_id: the id of the task
"""
self.T += 1
single_task_model = self.base_learner(fit_intercept = False, **self.base_learner_kwargs).fit(X, y)
D_t = self.get_hessian(single_task_model, X, y)
D_t_sqrt = sqrtm(D_t)
theta_t = single_task_model.coef_
sparse_encode = Lasso(alpha = self.mu / (X.shape[0] * 2.0),
fit_intercept = False, tol=1e9, max_iter=50000).fit(D_t_sqrt.dot(self.L),
D_t_sqrt.dot(theta_t.T))
if self.k_init and task_id < self.k:
sparse_coeffs = np.zeros((self.k,))
sparse_coeffs[task_id] = 1.0
else:
sparse_coeffs = sparse_encode.coef_
self.S = np.hstack((self.S, np.matrix(sparse_coeffs).T))
self.A += np.kron(self.S[:,task_id].dot(self.S[:,task_id].T), D_t)
self.b += np.kron(self.S[:,task_id].T, np.mat(theta_t).dot(D_t)).T
L_vectorized = inv(self.A / self.T + self.lam * np.eye(self.d * self.k, self.d * self.k)).dot(self.b) / self.T
self.L = L_vectorized.reshape((self.k, self.d)).T
self.revive_dead_components()
def revive_dead_components(self):
""" re-initailizes any components that have decayed to 0 """
for i,val in enumerate(np.sum(self.L, axis = 0)):
if abs(val) < 10 ** -8:
self.L[:, i] = np.random.randn(self.d,)
def predict(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
return X.dot(self.L.dot(self.S[:, task_id]))
elif self.base_learner == LogisticRegression:
return 1. / (1.0 + np.exp(-X.dot(self.L.dot(self.S[:, task_id])))) > 0.5
def predict_probs(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
raise Exception("This base learner does not support predicting probabilities")
elif self.base_learner == LogisticRegression:
return np.exp(self.predict_logprobs(X, task_id))
def predict_logprobs(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
raise Exception("This base learner does not support predicting probabilities")
elif self.base_learner == LogisticRegression:
return -logsumexp(np.hstack((np.zeros((X.shape[0], 1)), -X.dot(self.L.dot(self.S[:, task_id])))), axis = 1)
def score(self, X, y, task_id):
""" Output the score for ELLA's model on the specified testing data.
If using a continuous model (Ridge and LinearRegression)
the score is explained variance. If using a classification model
(LogisticRegression) the score is accuracy.
"""
return self.perf_metric(self.predict(X, task_id), y)
def get_hessian(self, model, X, y):
""" ELLA requires that each single task learner provide the Hessian
of the loss function evaluated around the optimal single task
parameters. This funciton implements this for the base learners
that are currently supported """
theta_t = model.coef_
if self.base_learner == LinearRegression:
return X.T.dot(X)/(2.0 * X.shape[0])
elif self.base_learner == Ridge:
return X.T.dot(X)/(2.0 * X.shape[0]) + model.alpha * np.eye(self.d, self.d)
elif self.base_learner == LogisticRegression:
preds = 1. / (1.0 + np.exp(-X.dot(theta_t.T)))
base = np.tile(preds * (1 - preds), (1, X.shape[1]))
hessian = (np.multiply(X, base)).T.dot(X) / (2.0 * X.shape[0])
return hessian + np.eye(self.d,self.d) / (2.0 * model.C) | en | 0.816177 | Alpha version of a version of ELLA that plays nicely with sklearn @author: <NAME> The ELLA model Initializes a new model for the given base_learner. d: the number of parameters for the base learner k: the number of latent model components base_learner: the base learner to use (currently can only be LinearRegression, Ridge, or LogisticRegression). base_learner_kwargs: keyword arguments to base learner (for instance to specify regularization strength) mu: hyperparameter for sparsity lam: L2 penalty on L mu: the L_1 penalty to use lam: the L_2 penalty to use NOTE: currently only binary logistic regression is supported Fit the model to a new batch of training data. The task_id must start at 0 and increase by one each time this function is called. Currently you cannot add new data to old tasks. X: the training data y: the trianing labels task_id: the id of the task re-initailizes any components that have decayed to 0 Output ELLA's predictions for the specified data on the specified task_id. If using a continuous model (Ridge and LinearRegression) the result is the prediction. If using a classification model (LogisticRgerssion) the output is currently a probability. Output ELLA's predictions for the specified data on the specified task_id. If using a continuous model (Ridge and LinearRegression) the result is the prediction. If using a classification model (LogisticRgerssion) the output is currently a probability. Output ELLA's predictions for the specified data on the specified task_id. If using a continuous model (Ridge and LinearRegression) the result is the prediction. If using a classification model (LogisticRgerssion) the output is currently a probability. Output the score for ELLA's model on the specified testing data. If using a continuous model (Ridge and LinearRegression) the score is explained variance. If using a classification model (LogisticRegression) the score is accuracy. ELLA requires that each single task learner provide the Hessian of the loss function evaluated around the optimal single task parameters. This funciton implements this for the base learners that are currently supported | 2.981724 | 3 |
webhook/utils.py | Myst1c-a/phen-cogs | 0 | 9059 | <filename>webhook/utils.py
"""
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
import discord
from redbot.core.commands import Context
USER_MENTIONS = discord.AllowedMentions.none()
USER_MENTIONS.users = True
WEBHOOK_RE = re.compile(
r"discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})"
)
async def _monkeypatch_send(ctx: Context, content: str = None, **kwargs) -> discord.Message:
self = ctx.bot.get_cog("Webhook")
original_kwargs = kwargs.copy()
try:
webhook = await self.get_webhook(ctx=ctx)
kwargs["username"] = ctx.author.display_name
kwargs["avatar_url"] = ctx.author.avatar_url
kwargs["wait"] = True
return await webhook.send(content, **kwargs)
except Exception:
return await super(Context, ctx).send(content, **original_kwargs)
class FakeResponse:
def __init__(self):
self.status = 403
self.reason = "Forbidden"
| <filename>webhook/utils.py
"""
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
import discord
from redbot.core.commands import Context
USER_MENTIONS = discord.AllowedMentions.none()
USER_MENTIONS.users = True
WEBHOOK_RE = re.compile(
r"discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})"
)
async def _monkeypatch_send(ctx: Context, content: str = None, **kwargs) -> discord.Message:
self = ctx.bot.get_cog("Webhook")
original_kwargs = kwargs.copy()
try:
webhook = await self.get_webhook(ctx=ctx)
kwargs["username"] = ctx.author.display_name
kwargs["avatar_url"] = ctx.author.avatar_url
kwargs["wait"] = True
return await webhook.send(content, **kwargs)
except Exception:
return await super(Context, ctx).send(content, **original_kwargs)
class FakeResponse:
def __init__(self):
self.status = 403
self.reason = "Forbidden"
| en | 0.764605 | MIT License Copyright (c) 2020-present phenom4n4n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1.928739 | 2 |
scripts/generate.py | jwise/pebble-caltrain | 1 | 9060 | <reponame>jwise/pebble-caltrain<gh_stars>1-10
__author__ = 'katharine'
import csv
import struct
import time
import datetime
def generate_files(source_dir, target_dir):
stops_txt = [x for x in csv.DictReader(open("%s/stops.txt" % source_dir, 'rb')) if x['location_type'] == '0']
print "%d stops" % len(stops_txt)
name_replacements = (
('Caltrain', ''),
('Station', ''),
('Mt View', 'Mountain View'),
('So. San Francisco', 'South SF'),
('South San Francisco', 'South SF'),
)
stop_parent_map = {}
stop_name_map = {}
stop_map = {}
stops = []
for s in stops_txt:
if s['parent_station'] != '' and s['parent_station'] in stop_parent_map:
stop_map[int(s['stop_code'])] = stop_parent_map[s['parent_station']]
continue
for replacement in name_replacements:
s['stop_name'] = s['stop_name'].replace(*replacement)
s['stop_name'] = s['stop_name'].rstrip()
if s['stop_name'] in stop_name_map:
stop_map[int(s['stop_code'])] = stop_name_map[s['stop_name']]
continue
stop_map[int(s['stop_code'])] = len(stops)
stop_parent_map[s['parent_station']] = len(stops)
stop_name_map[s['stop_name']] = len(stops)
stops.append({
'name': s['stop_name'],
'zone': int(s['zone_id']) if s['zone_id'] != '' else 0,
'lat': float(s['stop_lat']),
'lon': float(s['stop_lon'])
})
with open('%s/stops.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<B', len(stops)))
for stop in stops:
f.write(struct.pack('<B18sii', stop['zone'], stop['name'], int(stop['lat'] * 1000000), int(stop['lon'] * 1000000)))
calendar_txt = list(csv.DictReader(open("%s/calendar.txt" % source_dir, 'rb')))
cal = []
cal_map = {}
for i, x in enumerate(calendar_txt):
cal_map[x['service_id']] = len(cal)
end_time = datetime.datetime.strptime(x['end_date'], '%Y%m%d') + datetime.timedelta(1, hours=2)
cal.append({
'id': cal_map[x['service_id']],
'start': time.mktime(time.strptime(x['start_date'], '%Y%m%d')),
'end': time.mktime(end_time.timetuple()),
'days': (
(int(x['monday']) << 0) |
(int(x['tuesday']) << 1) |
(int(x['wednesday']) << 2) |
(int(x['thursday']) << 3) |
(int(x['friday']) << 4) |
(int(x['saturday']) << 5) |
(int(x['sunday']) << 6)
)
})
calendar_dates_txt = list(csv.DictReader(open("%s/calendar_dates.txt" % source_dir, 'rb')))
for i, x in enumerate(calendar_dates_txt):
if x['service_id'] in cal_map:
# XXX: Would be nice to find a way to mark special dates. But
# we can't, right now. Oh well.
continue
cal_map[x['service_id']] = len(cal)
start_time = datetime.datetime.strptime(x['date'], '%Y%m%d')
end_time = start_time + datetime.timedelta(1, hours=2)
cal.append({
'id': cal_map[x['service_id']],
'start': time.mktime(start_time.timetuple()),
'end': time.mktime(end_time.timetuple()),
'days': 0x7F,
})
with open('%s/calendar.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<B', len(cal)))
for c in cal:
f.write(struct.pack('<IIB', int(c['start']), int(c['end']), c['days']))
trips_txt = list(csv.DictReader(open("%s/trips.txt" % source_dir, "rb")))
tr = []
tr_map = {}
# These shouldn't be hardcoded, and should instead be inferred from routes.txt.
route_map = {
"BABY BULLET": 0,
"LIMITED": 1,
"LOCAL": 2,
"SHUTTLE": 3,
"Bu-130": 0,
"Li-130": 1,
"Lo-130": 2,
"TaSj-130": 3,
"Sp-130": 2, # XXX: Special Event Extra Service
}
short_name_replacements = (
('<NAME>', ''),
('S', ''),
('shuttle', ''),
)
for i, trip in enumerate(trips_txt):
for replacement in short_name_replacements:
trip['trip_short_name'] = trip['trip_short_name'].replace(*replacement)
tr.append({
'direction': int(not int(trip['direction_id'])), # We picked opposing values for north/south.
'route': route_map[trip['route_id']],
'service': cal_map[trip['service_id']],
'trip_name': int(trip['trip_short_name'])}),
tr_map[trip['trip_id']] = i
with open('%s/trips.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<H', len(tr)))
for t in tr:
f.write(struct.pack('<HBBB', t['trip_name'], t['direction'], t['route'], t['service']))
times_txt = list(csv.DictReader(open("%s/stop_times.txt" % source_dir)))
tm = sorted([{
'time': (int(x['arrival_time'].split(':')[0])*60 + int(x['arrival_time'].split(':')[1])),
'stop': stop_map[int(x['stop_id'])],
'sequence': int(x['stop_sequence']),
'trip': tr_map[x['trip_id']]
} for x in times_txt], key=lambda y: y['time'])
with open('%s/times.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<H', len(tm)))
for t in tm:
f.write(struct.pack('<HHBB', t['trip'], t['time'], t['stop'], t['sequence']))
stop_times = [sorted([i for i, x in enumerate(tm) if x['stop'] == stop], key=lambda t: tm[t]['time']) for stop, s in enumerate(stops)]
lengths = [len(x) for x in stop_times]
with open('%s/stop_index.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<B', len(lengths)))
counter = len(lengths)*4 + 1
for l in lengths:
f.write(struct.pack('<HH', counter, l))
counter += l*2
for s in stop_times:
for x in s:
f.write(struct.pack('<H', x))
trip_stops = [sorted([i for i, x in enumerate(tm) if x['trip'] == trip], key=lambda k: tm[k]['stop']) for trip, s in enumerate(tr)]
lengths = map(len, trip_stops)
with open('%s/trip_index.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<H', len(lengths)))
counter = len(lengths) * 3 + 2
data_start = counter
for l in lengths:
f.write(struct.pack('<HB', counter, l))
counter += l*2
if data_start != f.tell():
raise Exception("%d != %d" % (counter, f.tell()))
for s in trip_stops:
for x in s:
f.write(struct.pack('<H', x))
if f.tell() != counter:
raise Exception("Not the expected length!")
if __name__ == "__main__":
import sys
generate_files(sys.argv[1], sys.argv[2])
| __author__ = 'katharine'
import csv
import struct
import time
import datetime
def generate_files(source_dir, target_dir):
stops_txt = [x for x in csv.DictReader(open("%s/stops.txt" % source_dir, 'rb')) if x['location_type'] == '0']
print "%d stops" % len(stops_txt)
name_replacements = (
('Caltrain', ''),
('Station', ''),
('Mt View', 'Mountain View'),
('So. San Francisco', 'South SF'),
('South San Francisco', 'South SF'),
)
stop_parent_map = {}
stop_name_map = {}
stop_map = {}
stops = []
for s in stops_txt:
if s['parent_station'] != '' and s['parent_station'] in stop_parent_map:
stop_map[int(s['stop_code'])] = stop_parent_map[s['parent_station']]
continue
for replacement in name_replacements:
s['stop_name'] = s['stop_name'].replace(*replacement)
s['stop_name'] = s['stop_name'].rstrip()
if s['stop_name'] in stop_name_map:
stop_map[int(s['stop_code'])] = stop_name_map[s['stop_name']]
continue
stop_map[int(s['stop_code'])] = len(stops)
stop_parent_map[s['parent_station']] = len(stops)
stop_name_map[s['stop_name']] = len(stops)
stops.append({
'name': s['stop_name'],
'zone': int(s['zone_id']) if s['zone_id'] != '' else 0,
'lat': float(s['stop_lat']),
'lon': float(s['stop_lon'])
})
with open('%s/stops.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<B', len(stops)))
for stop in stops:
f.write(struct.pack('<B18sii', stop['zone'], stop['name'], int(stop['lat'] * 1000000), int(stop['lon'] * 1000000)))
calendar_txt = list(csv.DictReader(open("%s/calendar.txt" % source_dir, 'rb')))
cal = []
cal_map = {}
for i, x in enumerate(calendar_txt):
cal_map[x['service_id']] = len(cal)
end_time = datetime.datetime.strptime(x['end_date'], '%Y%m%d') + datetime.timedelta(1, hours=2)
cal.append({
'id': cal_map[x['service_id']],
'start': time.mktime(time.strptime(x['start_date'], '%Y%m%d')),
'end': time.mktime(end_time.timetuple()),
'days': (
(int(x['monday']) << 0) |
(int(x['tuesday']) << 1) |
(int(x['wednesday']) << 2) |
(int(x['thursday']) << 3) |
(int(x['friday']) << 4) |
(int(x['saturday']) << 5) |
(int(x['sunday']) << 6)
)
})
calendar_dates_txt = list(csv.DictReader(open("%s/calendar_dates.txt" % source_dir, 'rb')))
for i, x in enumerate(calendar_dates_txt):
if x['service_id'] in cal_map:
# XXX: Would be nice to find a way to mark special dates. But
# we can't, right now. Oh well.
continue
cal_map[x['service_id']] = len(cal)
start_time = datetime.datetime.strptime(x['date'], '%Y%m%d')
end_time = start_time + datetime.timedelta(1, hours=2)
cal.append({
'id': cal_map[x['service_id']],
'start': time.mktime(start_time.timetuple()),
'end': time.mktime(end_time.timetuple()),
'days': 0x7F,
})
with open('%s/calendar.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<B', len(cal)))
for c in cal:
f.write(struct.pack('<IIB', int(c['start']), int(c['end']), c['days']))
trips_txt = list(csv.DictReader(open("%s/trips.txt" % source_dir, "rb")))
tr = []
tr_map = {}
# These shouldn't be hardcoded, and should instead be inferred from routes.txt.
route_map = {
"BABY BULLET": 0,
"LIMITED": 1,
"LOCAL": 2,
"SHUTTLE": 3,
"Bu-130": 0,
"Li-130": 1,
"Lo-130": 2,
"TaSj-130": 3,
"Sp-130": 2, # XXX: Special Event Extra Service
}
short_name_replacements = (
('<NAME>', ''),
('S', ''),
('shuttle', ''),
)
for i, trip in enumerate(trips_txt):
for replacement in short_name_replacements:
trip['trip_short_name'] = trip['trip_short_name'].replace(*replacement)
tr.append({
'direction': int(not int(trip['direction_id'])), # We picked opposing values for north/south.
'route': route_map[trip['route_id']],
'service': cal_map[trip['service_id']],
'trip_name': int(trip['trip_short_name'])}),
tr_map[trip['trip_id']] = i
with open('%s/trips.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<H', len(tr)))
for t in tr:
f.write(struct.pack('<HBBB', t['trip_name'], t['direction'], t['route'], t['service']))
times_txt = list(csv.DictReader(open("%s/stop_times.txt" % source_dir)))
tm = sorted([{
'time': (int(x['arrival_time'].split(':')[0])*60 + int(x['arrival_time'].split(':')[1])),
'stop': stop_map[int(x['stop_id'])],
'sequence': int(x['stop_sequence']),
'trip': tr_map[x['trip_id']]
} for x in times_txt], key=lambda y: y['time'])
with open('%s/times.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<H', len(tm)))
for t in tm:
f.write(struct.pack('<HHBB', t['trip'], t['time'], t['stop'], t['sequence']))
stop_times = [sorted([i for i, x in enumerate(tm) if x['stop'] == stop], key=lambda t: tm[t]['time']) for stop, s in enumerate(stops)]
lengths = [len(x) for x in stop_times]
with open('%s/stop_index.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<B', len(lengths)))
counter = len(lengths)*4 + 1
for l in lengths:
f.write(struct.pack('<HH', counter, l))
counter += l*2
for s in stop_times:
for x in s:
f.write(struct.pack('<H', x))
trip_stops = [sorted([i for i, x in enumerate(tm) if x['trip'] == trip], key=lambda k: tm[k]['stop']) for trip, s in enumerate(tr)]
lengths = map(len, trip_stops)
with open('%s/trip_index.dat' % target_dir, 'wb') as f:
f.write(struct.pack('<H', len(lengths)))
counter = len(lengths) * 3 + 2
data_start = counter
for l in lengths:
f.write(struct.pack('<HB', counter, l))
counter += l*2
if data_start != f.tell():
raise Exception("%d != %d" % (counter, f.tell()))
for s in trip_stops:
for x in s:
f.write(struct.pack('<H', x))
if f.tell() != counter:
raise Exception("Not the expected length!")
if __name__ == "__main__":
import sys
generate_files(sys.argv[1], sys.argv[2]) | en | 0.905104 | # XXX: Would be nice to find a way to mark special dates. But # we can't, right now. Oh well. # These shouldn't be hardcoded, and should instead be inferred from routes.txt. # XXX: Special Event Extra Service # We picked opposing values for north/south. | 2.713381 | 3 |
tests/test_is_valid_php_version_file_version.py | gerardroche/sublime-phpunit | 85 | 9061 | from PHPUnitKit.tests import unittest
from PHPUnitKit.plugin import is_valid_php_version_file_version
class TestIsValidPhpVersionFileVersion(unittest.TestCase):
def test_invalid_values(self):
self.assertFalse(is_valid_php_version_file_version(''))
self.assertFalse(is_valid_php_version_file_version(' '))
self.assertFalse(is_valid_php_version_file_version('foobar'))
self.assertFalse(is_valid_php_version_file_version('masterfoo'))
self.assertFalse(is_valid_php_version_file_version('.'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('x.x'))
self.assertFalse(is_valid_php_version_file_version('x.x.x'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('snapshot'))
def test_master_branch_version(self):
self.assertTrue(is_valid_php_version_file_version('master'))
def test_specific_semver_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.0.0'))
self.assertTrue(is_valid_php_version_file_version('5.0.1'))
self.assertTrue(is_valid_php_version_file_version('5.0.7'))
self.assertTrue(is_valid_php_version_file_version('5.0.30'))
self.assertTrue(is_valid_php_version_file_version('5.0.32'))
self.assertTrue(is_valid_php_version_file_version('5.1.0'))
self.assertTrue(is_valid_php_version_file_version('5.1.1'))
self.assertTrue(is_valid_php_version_file_version('5.1.3'))
self.assertTrue(is_valid_php_version_file_version('5.1.27'))
self.assertTrue(is_valid_php_version_file_version('7.0.0'))
self.assertTrue(is_valid_php_version_file_version('7.1.19'))
def test_minor_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.6'))
self.assertTrue(is_valid_php_version_file_version('7.1'))
self.assertTrue(is_valid_php_version_file_version('7.2'))
def test_major_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.x'))
self.assertTrue(is_valid_php_version_file_version('6.x'))
self.assertTrue(is_valid_php_version_file_version('7.x'))
self.assertTrue(is_valid_php_version_file_version('8.x'))
def test_major_dot_minor_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('7.0.x'))
self.assertTrue(is_valid_php_version_file_version('7.1.x'))
self.assertTrue(is_valid_php_version_file_version('7.2.x'))
def test_snapshot_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.4snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.5snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.6snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.1snapshot'))
| from PHPUnitKit.tests import unittest
from PHPUnitKit.plugin import is_valid_php_version_file_version
class TestIsValidPhpVersionFileVersion(unittest.TestCase):
def test_invalid_values(self):
self.assertFalse(is_valid_php_version_file_version(''))
self.assertFalse(is_valid_php_version_file_version(' '))
self.assertFalse(is_valid_php_version_file_version('foobar'))
self.assertFalse(is_valid_php_version_file_version('masterfoo'))
self.assertFalse(is_valid_php_version_file_version('.'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('x.x'))
self.assertFalse(is_valid_php_version_file_version('x.x.x'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('snapshot'))
def test_master_branch_version(self):
self.assertTrue(is_valid_php_version_file_version('master'))
def test_specific_semver_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.0.0'))
self.assertTrue(is_valid_php_version_file_version('5.0.1'))
self.assertTrue(is_valid_php_version_file_version('5.0.7'))
self.assertTrue(is_valid_php_version_file_version('5.0.30'))
self.assertTrue(is_valid_php_version_file_version('5.0.32'))
self.assertTrue(is_valid_php_version_file_version('5.1.0'))
self.assertTrue(is_valid_php_version_file_version('5.1.1'))
self.assertTrue(is_valid_php_version_file_version('5.1.3'))
self.assertTrue(is_valid_php_version_file_version('5.1.27'))
self.assertTrue(is_valid_php_version_file_version('7.0.0'))
self.assertTrue(is_valid_php_version_file_version('7.1.19'))
def test_minor_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.6'))
self.assertTrue(is_valid_php_version_file_version('7.1'))
self.assertTrue(is_valid_php_version_file_version('7.2'))
def test_major_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.x'))
self.assertTrue(is_valid_php_version_file_version('6.x'))
self.assertTrue(is_valid_php_version_file_version('7.x'))
self.assertTrue(is_valid_php_version_file_version('8.x'))
def test_major_dot_minor_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('7.0.x'))
self.assertTrue(is_valid_php_version_file_version('7.1.x'))
self.assertTrue(is_valid_php_version_file_version('7.2.x'))
def test_snapshot_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.4snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.5snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.6snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.1snapshot'))
| none | 1 | 2.763245 | 3 |
|
feed/tests/test_consts.py | cul-it/arxiv-rss | 4 | 9062 | <filename>feed/tests/test_consts.py
import pytest
from feed.consts import FeedVersion
from feed.utils import randomize_case
from feed.errors import FeedVersionError
# FeedVersion.supported
def test_feed_version_supported():
assert FeedVersion.supported() == {
FeedVersion.RSS_2_0,
FeedVersion.ATOM_1_0,
}
# FeedVersion.get
def test_feed_version_get_supported():
# RSS full version
assert (
FeedVersion.get(randomize_case(FeedVersion.RSS_2_0.lower()))
== FeedVersion.RSS_2_0
)
# RSS only number
assert FeedVersion.get("2.0") == FeedVersion.RSS_2_0
# Atom full version
assert (
FeedVersion.get(randomize_case(FeedVersion.ATOM_1_0.lower()))
== FeedVersion.ATOM_1_0
)
# Atom only number
assert FeedVersion.get("1.0", atom=True) == FeedVersion.ATOM_1_0
def test_feed_version_get_unsupported():
# RSS 0.91 full version
rss_0_91 = randomize_case(FeedVersion.RSS_0_91)
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(rss_0_91)
ex: FeedVersionError = excinfo.value
assert ex.version == rss_0_91
assert ex.supported == FeedVersion.supported()
# RSS 0.91 only number
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get("0.91")
ex: FeedVersionError = excinfo.value
assert ex.version == "RSS 0.91"
assert ex.supported == FeedVersion.supported()
# RSS 1.0 full version
rss_1_0 = randomize_case(FeedVersion.RSS_1_0)
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(rss_1_0)
ex: FeedVersionError = excinfo.value
assert ex.version == rss_1_0
assert ex.supported == FeedVersion.supported()
# RSS 1.0 only number
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get("1.0")
ex: FeedVersionError = excinfo.value
assert ex.version == "RSS 1.0"
assert ex.supported == FeedVersion.supported()
def test_feed_version_get_invalid():
# RSS
for version, test in [
("RSS 3.3", "3.3"),
("RSS 0.1", "0.1"),
("RSS 1.1", "RSS 1.1"),
("RSS 2.1", "RSS 2.1"),
]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(test)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
# Atom
for version, test, prefere in [
("Atom 0.1", "0.1", True),
("Atom 0.91", "0.91", True),
("Atom 2.0", "2.0", True),
("Atom 0.1", "Atom 0.1", False),
("Atom 0.91", "Atom 0.91", False),
("Atom 2.0", "Atom 2.0", False),
]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(test, atom=prefere)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
# Nonsense
for version in ["foo", "bar", "baz"]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(version)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
def test_is_property():
# RSS
assert FeedVersion.RSS_0_91.is_rss
assert FeedVersion.RSS_1_0.is_rss
assert FeedVersion.RSS_2_0.is_rss
assert not FeedVersion.RSS_0_91.is_atom
assert not FeedVersion.RSS_1_0.is_atom
assert not FeedVersion.RSS_2_0.is_atom
# Atom
assert FeedVersion.ATOM_1_0.is_atom
assert not FeedVersion.ATOM_1_0.is_rss
| <filename>feed/tests/test_consts.py
import pytest
from feed.consts import FeedVersion
from feed.utils import randomize_case
from feed.errors import FeedVersionError
# FeedVersion.supported
def test_feed_version_supported():
assert FeedVersion.supported() == {
FeedVersion.RSS_2_0,
FeedVersion.ATOM_1_0,
}
# FeedVersion.get
def test_feed_version_get_supported():
# RSS full version
assert (
FeedVersion.get(randomize_case(FeedVersion.RSS_2_0.lower()))
== FeedVersion.RSS_2_0
)
# RSS only number
assert FeedVersion.get("2.0") == FeedVersion.RSS_2_0
# Atom full version
assert (
FeedVersion.get(randomize_case(FeedVersion.ATOM_1_0.lower()))
== FeedVersion.ATOM_1_0
)
# Atom only number
assert FeedVersion.get("1.0", atom=True) == FeedVersion.ATOM_1_0
def test_feed_version_get_unsupported():
# RSS 0.91 full version
rss_0_91 = randomize_case(FeedVersion.RSS_0_91)
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(rss_0_91)
ex: FeedVersionError = excinfo.value
assert ex.version == rss_0_91
assert ex.supported == FeedVersion.supported()
# RSS 0.91 only number
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get("0.91")
ex: FeedVersionError = excinfo.value
assert ex.version == "RSS 0.91"
assert ex.supported == FeedVersion.supported()
# RSS 1.0 full version
rss_1_0 = randomize_case(FeedVersion.RSS_1_0)
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(rss_1_0)
ex: FeedVersionError = excinfo.value
assert ex.version == rss_1_0
assert ex.supported == FeedVersion.supported()
# RSS 1.0 only number
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get("1.0")
ex: FeedVersionError = excinfo.value
assert ex.version == "RSS 1.0"
assert ex.supported == FeedVersion.supported()
def test_feed_version_get_invalid():
# RSS
for version, test in [
("RSS 3.3", "3.3"),
("RSS 0.1", "0.1"),
("RSS 1.1", "RSS 1.1"),
("RSS 2.1", "RSS 2.1"),
]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(test)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
# Atom
for version, test, prefere in [
("Atom 0.1", "0.1", True),
("Atom 0.91", "0.91", True),
("Atom 2.0", "2.0", True),
("Atom 0.1", "Atom 0.1", False),
("Atom 0.91", "Atom 0.91", False),
("Atom 2.0", "Atom 2.0", False),
]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(test, atom=prefere)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
# Nonsense
for version in ["foo", "bar", "baz"]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(version)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
def test_is_property():
# RSS
assert FeedVersion.RSS_0_91.is_rss
assert FeedVersion.RSS_1_0.is_rss
assert FeedVersion.RSS_2_0.is_rss
assert not FeedVersion.RSS_0_91.is_atom
assert not FeedVersion.RSS_1_0.is_atom
assert not FeedVersion.RSS_2_0.is_atom
# Atom
assert FeedVersion.ATOM_1_0.is_atom
assert not FeedVersion.ATOM_1_0.is_rss
| en | 0.474448 | # FeedVersion.supported # FeedVersion.get # RSS full version # RSS only number # Atom full version # Atom only number # RSS 0.91 full version # RSS 0.91 only number # RSS 1.0 full version # RSS 1.0 only number # RSS # Atom # Nonsense # RSS # Atom | 2.115119 | 2 |
cmsplugin_cascade/migrations/0007_add_proxy_models.py | teklager/djangocms-cascade | 139 | 9063 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0006_bootstrapgallerypluginmodel'),
]
operations = [
]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0006_bootstrapgallerypluginmodel'),
]
operations = [
]
| none | 1 | 1.304525 | 1 |
|
theonionbox/tob/credits.py | ralphwetzel/theonionbox | 120 | 9064 | Credits = [
('Bootstrap', 'https://getbootstrap.com', 'The Bootstrap team', 'MIT'),
('Bottle', 'http://bottlepy.org', '<NAME>', 'MIT'),
('Cheroot', 'https://github.com/cherrypy/cheroot', 'CherryPy Team', 'BSD 3-Clause "New" or "Revised" License'),
('Click', 'https://github.com/pallets/click', 'Pallets', 'BSD 3-Clause "New" or "Revised" License'),
('ConfigUpdater', 'https://github.com/pyscaffold/configupdater', '<NAME>', 'MIT'),
('Glide', 'https://github.com/glidejs/glide', '@jedrzejchalubek', 'MIT'),
('JQuery', 'https://jquery.com', 'The jQuery Foundation', 'MIT'),
('jquery.pep.js', 'http://pep.briangonzalez.org', '@briangonzalez', 'MIT'),
('js-md5', 'https://github.com/emn178/js-md5', '@emn178', 'MIT'),
('PySocks', 'https://github.com/Anorov/PySocks', '@Anorov', 'Custom DAN HAIM'),
('RapydScript-NG', 'https://github.com/kovidgoyal/rapydscript-ng', '@kovidgoyal',
'BSD 2-Clause "Simplified" License'),
('Requests', 'https://requests.kennethreitz.org', '<NAME>', 'Apache License, Version 2.0'),
('scrollMonitor', 'https://github.com/stutrek/scrollmonitor', '@stutrek', 'MIT'),
('Smoothie Charts', 'https://github.com/joewalnes/smoothie', '@drewnoakes', 'MIT'),
('stem', 'https://stem.torproject.org', '<NAME> and The Tor Project', 'GNU LESSER GENERAL PUBLIC LICENSE')
]
| Credits = [
('Bootstrap', 'https://getbootstrap.com', 'The Bootstrap team', 'MIT'),
('Bottle', 'http://bottlepy.org', '<NAME>', 'MIT'),
('Cheroot', 'https://github.com/cherrypy/cheroot', 'CherryPy Team', 'BSD 3-Clause "New" or "Revised" License'),
('Click', 'https://github.com/pallets/click', 'Pallets', 'BSD 3-Clause "New" or "Revised" License'),
('ConfigUpdater', 'https://github.com/pyscaffold/configupdater', '<NAME>', 'MIT'),
('Glide', 'https://github.com/glidejs/glide', '@jedrzejchalubek', 'MIT'),
('JQuery', 'https://jquery.com', 'The jQuery Foundation', 'MIT'),
('jquery.pep.js', 'http://pep.briangonzalez.org', '@briangonzalez', 'MIT'),
('js-md5', 'https://github.com/emn178/js-md5', '@emn178', 'MIT'),
('PySocks', 'https://github.com/Anorov/PySocks', '@Anorov', 'Custom DAN HAIM'),
('RapydScript-NG', 'https://github.com/kovidgoyal/rapydscript-ng', '@kovidgoyal',
'BSD 2-Clause "Simplified" License'),
('Requests', 'https://requests.kennethreitz.org', '<NAME>', 'Apache License, Version 2.0'),
('scrollMonitor', 'https://github.com/stutrek/scrollmonitor', '@stutrek', 'MIT'),
('Smoothie Charts', 'https://github.com/joewalnes/smoothie', '@drewnoakes', 'MIT'),
('stem', 'https://stem.torproject.org', '<NAME> and The Tor Project', 'GNU LESSER GENERAL PUBLIC LICENSE')
]
| none | 1 | 1.159658 | 1 |
|
turorials/Google/projects/01_02_TextClassification/01_02_main.py | Ubpa/LearnTF | 0 | 9065 | #----------------
# 01_02 文本分类
#----------------
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
# TensorFlow's version : 1.12.0
print('TensorFlow\'s version : ', tf.__version__)
#----------------
# 1 下载 IMDB 数据集
#----------------
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
#----------------
# 2 探索数据
#----------------
# Training entries: 25000, labels: 25000
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
print(train_data[0])
# (218, 189)
print(len(train_data[0]), len(train_data[1]))
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
#----------------
# 3 准备数据
#----------------
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
# (256, 256)
print((len(train_data[0]), len(train_data[1])))
print(train_data[0])
#----------------
# 4 构建模型
#----------------
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
#----------------
# 5 创建验证集
#----------------
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
#----------------
# 6 训练模型
#----------------
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
#----------------
# 7 评估模型
#----------------
results = model.evaluate(test_data, test_labels)
print(results)
#----------------
# 8 创建准确率和损失随时间变化的图
#----------------
history_dict = history.history
# dict_keys(['loss', 'val_loss', 'val_acc', 'acc'])
print(history_dict.keys())
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# loss
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# acc
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
| #----------------
# 01_02 文本分类
#----------------
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
# TensorFlow's version : 1.12.0
print('TensorFlow\'s version : ', tf.__version__)
#----------------
# 1 下载 IMDB 数据集
#----------------
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
#----------------
# 2 探索数据
#----------------
# Training entries: 25000, labels: 25000
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
print(train_data[0])
# (218, 189)
print(len(train_data[0]), len(train_data[1]))
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
#----------------
# 3 准备数据
#----------------
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
# (256, 256)
print((len(train_data[0]), len(train_data[1])))
print(train_data[0])
#----------------
# 4 构建模型
#----------------
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
#----------------
# 5 创建验证集
#----------------
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
#----------------
# 6 训练模型
#----------------
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
#----------------
# 7 评估模型
#----------------
results = model.evaluate(test_data, test_labels)
print(results)
#----------------
# 8 创建准确率和损失随时间变化的图
#----------------
history_dict = history.history
# dict_keys(['loss', 'val_loss', 'val_acc', 'acc'])
print(history_dict.keys())
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# loss
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# acc
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
| en | 0.117853 | #---------------- # 01_02 文本分类 #---------------- # TensorFlow and tf.keras # Helper libraries # TensorFlow's version : 1.12.0 #---------------- # 1 下载 IMDB 数据集 #---------------- #---------------- # 2 探索数据 #---------------- # Training entries: 25000, labels: 25000 # (218, 189) # A dictionary mapping words to an integer index # The first indices are reserved # unknown #---------------- # 3 准备数据 #---------------- # (256, 256) #---------------- # 4 构建模型 #---------------- # input shape is the vocabulary count used for the movie reviews (10,000 words) #---------------- # 5 创建验证集 #---------------- #---------------- # 6 训练模型 #---------------- #---------------- # 7 评估模型 #---------------- #---------------- # 8 创建准确率和损失随时间变化的图 #---------------- # dict_keys(['loss', 'val_loss', 'val_acc', 'acc']) # loss # "bo" is for "blue dot" # b is for "solid blue line" # acc # clear figure | 3.700153 | 4 |
backend/api/urls.py | 12xiaoni/text-label | 0 | 9066 | <reponame>12xiaoni/text-label<filename>backend/api/urls.py
from django.urls import include, path
from .views import (annotation, auto_labeling, comment, example, example_state,
health, label, project, tag, task)
from .views.tasks import category, relation, span, text
urlpatterns_project = [
path(
route='category-types',
view=label.CategoryTypeList.as_view(),
name='category_types'
),
path(
route='category-types/<int:label_id>',
view=label.CategoryTypeDetail.as_view(),
name='category_type'
),
path(
route='span-types',
view=label.SpanTypeList.as_view(),
name='span_types'
),
path(
route='span-types/<int:label_id>',
view=label.SpanTypeDetail.as_view(),
name='span_type'
),
path(
route='category-type-upload',
view=label.CategoryTypeUploadAPI.as_view(),
name='category_type_upload'
),
path(
route='span-type-upload',
view=label.SpanTypeUploadAPI.as_view(),
name='span_type_upload'
),
path(
route='examples',
view=example.ExampleList.as_view(),
name='example_list'
),
path(
route='examples/<int:example_id>',
view=example.ExampleDetail.as_view(),
name='example_detail'
),
path(
route='relation_types',
view=label.RelationTypeList.as_view(),
name='relation_types_list'
),
path(
route='relation_type-upload',
view=label.RelationTypeUploadAPI.as_view(),
name='relation_type-upload'
),
path(
route='relation_types/<int:relation_type_id>',
view=label.RelationTypeDetail.as_view(),
name='relation_type_detail'
),
path(
route='annotation_relations',
view=relation.RelationList.as_view(),
name='relation_types_list'
),
path(
route='annotation_relation-upload',
view=relation.RelationUploadAPI.as_view(),
name='annotation_relation-upload'
),
path(
route='annotation_relations/<int:annotation_relation_id>',
view=relation.RelationDetail.as_view(),
name='annotation_relation_detail'
),
path(
route='approval/<int:example_id>',
view=annotation.ApprovalAPI.as_view(),
name='approve_labels'
),
path(
route='examples/<int:example_id>/categories',
view=category.CategoryListAPI.as_view(),
name='category_list'
),
path(
route='examples/<int:example_id>/categories/<int:annotation_id>',
view=category.CategoryDetailAPI.as_view(),
name='category_detail'
),
path(
route='examples/<int:example_id>/spans',
view=span.SpanListAPI.as_view(),
name='span_list'
),
path(
route='examples/<int:example_id>/spans/<int:annotation_id>',
view=span.SpanDetailAPI.as_view(),
name='span_detail'
),
path(
route='examples/<int:example_id>/texts',
view=text.TextLabelListAPI.as_view(),
name='text_list'
),
path(
route='examples/<int:example_id>/texts/<int:annotation_id>',
view=text.TextLabelDetailAPI.as_view(),
name='text_detail'
),
path(
route='tags',
view=tag.TagList.as_view(),
name='tag_list'
),
path(
route='tags/<int:tag_id>',
view=tag.TagDetail.as_view(),
name='tag_detail'
),
path(
route='examples/<int:example_id>/comments',
view=comment.CommentListDoc.as_view(),
name='comment_list_doc'
),
path(
route='comments',
view=comment.CommentListProject.as_view(),
name='comment_list_project'
),
path(
route='examples/<int:example_id>/comments/<int:comment_id>',
view=comment.CommentDetail.as_view(),
name='comment_detail'
),
path(
route='examples/<int:example_id>/states',
view=example_state.ExampleStateList.as_view(),
name='example_state_list'
),
path(
route='auto-labeling-templates',
view=auto_labeling.AutoLabelingTemplateListAPI.as_view(),
name='auto_labeling_templates'
),
path(
route='auto-labeling-templates/<str:option_name>',
view=auto_labeling.AutoLabelingTemplateDetailAPI.as_view(),
name='auto_labeling_template'
),
path(
route='auto-labeling-configs',
view=auto_labeling.AutoLabelingConfigList.as_view(),
name='auto_labeling_configs'
),
path(
route='auto-labeling-configs/<int:config_id>',
view=auto_labeling.AutoLabelingConfigDetail.as_view(),
name='auto_labeling_config'
),
path(
route='auto-labeling-config-testing',
view=auto_labeling.AutoLabelingConfigTest.as_view(),
name='auto_labeling_config_test'
),
path(
route='examples/<int:example_id>/auto-labeling',
view=auto_labeling.AutoLabelingAnnotation.as_view(),
name='auto_labeling_annotation'
),
path(
route='auto-labeling-parameter-testing',
view=auto_labeling.AutoLabelingConfigParameterTest.as_view(),
name='auto_labeling_parameter_testing'
),
path(
route='auto-labeling-template-testing',
view=auto_labeling.AutoLabelingTemplateTest.as_view(),
name='auto_labeling_template_test'
),
path(
route='auto-labeling-mapping-testing',
view=auto_labeling.AutoLabelingMappingTest.as_view(),
name='auto_labeling_mapping_test'
)
]
urlpatterns = [
path(
route='health',
view=health.Health.as_view(),
name='health'
),
path(
route='projects',
view=project.ProjectList.as_view(),
name='project_list'
),
path(
route='tasks/status/<task_id>',
view=task.TaskStatus.as_view(),
name='task_status'
),
path(
route='projects/<int:project_id>',
view=project.ProjectDetail.as_view(),
name='project_detail'
),
path('projects/<int:project_id>/', include(urlpatterns_project))
]
| from django.urls import include, path
from .views import (annotation, auto_labeling, comment, example, example_state,
health, label, project, tag, task)
from .views.tasks import category, relation, span, text
urlpatterns_project = [
path(
route='category-types',
view=label.CategoryTypeList.as_view(),
name='category_types'
),
path(
route='category-types/<int:label_id>',
view=label.CategoryTypeDetail.as_view(),
name='category_type'
),
path(
route='span-types',
view=label.SpanTypeList.as_view(),
name='span_types'
),
path(
route='span-types/<int:label_id>',
view=label.SpanTypeDetail.as_view(),
name='span_type'
),
path(
route='category-type-upload',
view=label.CategoryTypeUploadAPI.as_view(),
name='category_type_upload'
),
path(
route='span-type-upload',
view=label.SpanTypeUploadAPI.as_view(),
name='span_type_upload'
),
path(
route='examples',
view=example.ExampleList.as_view(),
name='example_list'
),
path(
route='examples/<int:example_id>',
view=example.ExampleDetail.as_view(),
name='example_detail'
),
path(
route='relation_types',
view=label.RelationTypeList.as_view(),
name='relation_types_list'
),
path(
route='relation_type-upload',
view=label.RelationTypeUploadAPI.as_view(),
name='relation_type-upload'
),
path(
route='relation_types/<int:relation_type_id>',
view=label.RelationTypeDetail.as_view(),
name='relation_type_detail'
),
path(
route='annotation_relations',
view=relation.RelationList.as_view(),
name='relation_types_list'
),
path(
route='annotation_relation-upload',
view=relation.RelationUploadAPI.as_view(),
name='annotation_relation-upload'
),
path(
route='annotation_relations/<int:annotation_relation_id>',
view=relation.RelationDetail.as_view(),
name='annotation_relation_detail'
),
path(
route='approval/<int:example_id>',
view=annotation.ApprovalAPI.as_view(),
name='approve_labels'
),
path(
route='examples/<int:example_id>/categories',
view=category.CategoryListAPI.as_view(),
name='category_list'
),
path(
route='examples/<int:example_id>/categories/<int:annotation_id>',
view=category.CategoryDetailAPI.as_view(),
name='category_detail'
),
path(
route='examples/<int:example_id>/spans',
view=span.SpanListAPI.as_view(),
name='span_list'
),
path(
route='examples/<int:example_id>/spans/<int:annotation_id>',
view=span.SpanDetailAPI.as_view(),
name='span_detail'
),
path(
route='examples/<int:example_id>/texts',
view=text.TextLabelListAPI.as_view(),
name='text_list'
),
path(
route='examples/<int:example_id>/texts/<int:annotation_id>',
view=text.TextLabelDetailAPI.as_view(),
name='text_detail'
),
path(
route='tags',
view=tag.TagList.as_view(),
name='tag_list'
),
path(
route='tags/<int:tag_id>',
view=tag.TagDetail.as_view(),
name='tag_detail'
),
path(
route='examples/<int:example_id>/comments',
view=comment.CommentListDoc.as_view(),
name='comment_list_doc'
),
path(
route='comments',
view=comment.CommentListProject.as_view(),
name='comment_list_project'
),
path(
route='examples/<int:example_id>/comments/<int:comment_id>',
view=comment.CommentDetail.as_view(),
name='comment_detail'
),
path(
route='examples/<int:example_id>/states',
view=example_state.ExampleStateList.as_view(),
name='example_state_list'
),
path(
route='auto-labeling-templates',
view=auto_labeling.AutoLabelingTemplateListAPI.as_view(),
name='auto_labeling_templates'
),
path(
route='auto-labeling-templates/<str:option_name>',
view=auto_labeling.AutoLabelingTemplateDetailAPI.as_view(),
name='auto_labeling_template'
),
path(
route='auto-labeling-configs',
view=auto_labeling.AutoLabelingConfigList.as_view(),
name='auto_labeling_configs'
),
path(
route='auto-labeling-configs/<int:config_id>',
view=auto_labeling.AutoLabelingConfigDetail.as_view(),
name='auto_labeling_config'
),
path(
route='auto-labeling-config-testing',
view=auto_labeling.AutoLabelingConfigTest.as_view(),
name='auto_labeling_config_test'
),
path(
route='examples/<int:example_id>/auto-labeling',
view=auto_labeling.AutoLabelingAnnotation.as_view(),
name='auto_labeling_annotation'
),
path(
route='auto-labeling-parameter-testing',
view=auto_labeling.AutoLabelingConfigParameterTest.as_view(),
name='auto_labeling_parameter_testing'
),
path(
route='auto-labeling-template-testing',
view=auto_labeling.AutoLabelingTemplateTest.as_view(),
name='auto_labeling_template_test'
),
path(
route='auto-labeling-mapping-testing',
view=auto_labeling.AutoLabelingMappingTest.as_view(),
name='auto_labeling_mapping_test'
)
]
urlpatterns = [
path(
route='health',
view=health.Health.as_view(),
name='health'
),
path(
route='projects',
view=project.ProjectList.as_view(),
name='project_list'
),
path(
route='tasks/status/<task_id>',
view=task.TaskStatus.as_view(),
name='task_status'
),
path(
route='projects/<int:project_id>',
view=project.ProjectDetail.as_view(),
name='project_detail'
),
path('projects/<int:project_id>/', include(urlpatterns_project))
] | none | 1 | 2.107987 | 2 |
|
nwbwidgets/test/test_base.py | d-sot/nwb-jupyter-widgets | 0 | 9067 | <filename>nwbwidgets/test/test_base.py
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pynwb import TimeSeries
from datetime import datetime
from dateutil.tz import tzlocal
from pynwb import NWBFile
from ipywidgets import widgets
from pynwb.core import DynamicTable
from pynwb.file import Subject
from nwbwidgets.view import default_neurodata_vis_spec
from pynwb import ProcessingModule
from pynwb.behavior import Position, SpatialSeries
from nwbwidgets.base import show_neurodata_base,processing_module, nwb2widget, show_text_fields, \
fig2widget, vis2widget, show_fields, show_dynamic_table, df2accordion, lazy_show_over_data
import unittest
import pytest
def test_show_neurodata_base():
start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
nwbfile = NWBFile(session_description='demonstrate NWBFile basics',
identifier='NWB123',
session_start_time=start_time,
file_create_date=create_date,
related_publications='https://doi.org/10.1088/1741-2552/aaa904',
experimenter='Dr. Pack')
assert isinstance(show_neurodata_base(nwbfile,default_neurodata_vis_spec), widgets.Widget)
def test_show_text_fields():
data = np.random.rand(160,3)
ts = TimeSeries(name='test_timeseries', data=data, unit='m', starting_time=0.0, rate=1.0)
assert isinstance(show_text_fields(ts), widgets.Widget)
class ProcessingModuleTestCase(unittest.TestCase):
def setUp(self):
spatial_series = SpatialSeries(name='position',
data=np.linspace(0, 1, 20),
rate=50.,
reference_frame='starting gate')
self.position = Position(spatial_series=spatial_series)
def test_processing_module(self):
start_time = datetime(2020, 1, 29, 11, tzinfo=tzlocal())
nwbfile = NWBFile(session_description='Test Session',
identifier='NWBPM',
session_start_time=start_time)
behavior_module = ProcessingModule(name='behavior',
description='preprocessed behavioral data')
nwbfile.add_processing_module(behavior_module)
nwbfile.processing['behavior'].add(self.position)
processing_module(nwbfile.processing['behavior'], default_neurodata_vis_spec)
def test_nwb2widget(self):
nwb2widget(self.position, default_neurodata_vis_spec)
def test_fig2widget():
data = np.random.rand(160, 3)
fig = plt.figure(figsize=(10, 5))
plt.plot(data)
assert isinstance(fig2widget(fig), widgets.Widget)
class Test_vis2widget:
def test_vis2widget_input_widget(self):
wg = widgets.IntSlider(
value=7,
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
assert isinstance(vis2widget(wg), widgets.Widget)
def test_vis2widget_input_figure(self):
data = np.random.rand(160,3)
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
assert isinstance(vis2widget(fig), widgets.Widget)
def test_vis2widget_input_other(self):
data = np.random.rand(160,3)
with pytest.raises(ValueError, match="unsupported vis type"):
vis2widget(data)
def test_show_subject():
node = Subject(age='8', sex='m', species='macaque')
show_fields(node)
def test_show_dynamic_table():
d = {'col1': [1, 2], 'col2': [3, 4]}
DT = DynamicTable.from_dataframe(df=pd.DataFrame(data=d),
name='Test Dtable',
table_description='no description')
show_dynamic_table(DT)
def test_df2accordion():
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
columns=['a', 'b', 'c'])
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
df2accordion(df=df,by='a',func=func_fig)
def test_df2accordion_single():
df = pd.DataFrame(np.array([1]),
columns=['a'])
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
df2accordion(df=df,by='a',func=func_fig)
def test_lazy_show_over_data():
list_ = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
assert isinstance(lazy_show_over_data(list_=list_,func_=func_fig),widgets.Widget)
| <filename>nwbwidgets/test/test_base.py
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pynwb import TimeSeries
from datetime import datetime
from dateutil.tz import tzlocal
from pynwb import NWBFile
from ipywidgets import widgets
from pynwb.core import DynamicTable
from pynwb.file import Subject
from nwbwidgets.view import default_neurodata_vis_spec
from pynwb import ProcessingModule
from pynwb.behavior import Position, SpatialSeries
from nwbwidgets.base import show_neurodata_base,processing_module, nwb2widget, show_text_fields, \
fig2widget, vis2widget, show_fields, show_dynamic_table, df2accordion, lazy_show_over_data
import unittest
import pytest
def test_show_neurodata_base():
start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
nwbfile = NWBFile(session_description='demonstrate NWBFile basics',
identifier='NWB123',
session_start_time=start_time,
file_create_date=create_date,
related_publications='https://doi.org/10.1088/1741-2552/aaa904',
experimenter='Dr. Pack')
assert isinstance(show_neurodata_base(nwbfile,default_neurodata_vis_spec), widgets.Widget)
def test_show_text_fields():
data = np.random.rand(160,3)
ts = TimeSeries(name='test_timeseries', data=data, unit='m', starting_time=0.0, rate=1.0)
assert isinstance(show_text_fields(ts), widgets.Widget)
class ProcessingModuleTestCase(unittest.TestCase):
def setUp(self):
spatial_series = SpatialSeries(name='position',
data=np.linspace(0, 1, 20),
rate=50.,
reference_frame='starting gate')
self.position = Position(spatial_series=spatial_series)
def test_processing_module(self):
start_time = datetime(2020, 1, 29, 11, tzinfo=tzlocal())
nwbfile = NWBFile(session_description='Test Session',
identifier='NWBPM',
session_start_time=start_time)
behavior_module = ProcessingModule(name='behavior',
description='preprocessed behavioral data')
nwbfile.add_processing_module(behavior_module)
nwbfile.processing['behavior'].add(self.position)
processing_module(nwbfile.processing['behavior'], default_neurodata_vis_spec)
def test_nwb2widget(self):
nwb2widget(self.position, default_neurodata_vis_spec)
def test_fig2widget():
data = np.random.rand(160, 3)
fig = plt.figure(figsize=(10, 5))
plt.plot(data)
assert isinstance(fig2widget(fig), widgets.Widget)
class Test_vis2widget:
def test_vis2widget_input_widget(self):
wg = widgets.IntSlider(
value=7,
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
assert isinstance(vis2widget(wg), widgets.Widget)
def test_vis2widget_input_figure(self):
data = np.random.rand(160,3)
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
assert isinstance(vis2widget(fig), widgets.Widget)
def test_vis2widget_input_other(self):
data = np.random.rand(160,3)
with pytest.raises(ValueError, match="unsupported vis type"):
vis2widget(data)
def test_show_subject():
node = Subject(age='8', sex='m', species='macaque')
show_fields(node)
def test_show_dynamic_table():
d = {'col1': [1, 2], 'col2': [3, 4]}
DT = DynamicTable.from_dataframe(df=pd.DataFrame(data=d),
name='Test Dtable',
table_description='no description')
show_dynamic_table(DT)
def test_df2accordion():
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
columns=['a', 'b', 'c'])
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
df2accordion(df=df,by='a',func=func_fig)
def test_df2accordion_single():
df = pd.DataFrame(np.array([1]),
columns=['a'])
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
df2accordion(df=df,by='a',func=func_fig)
def test_lazy_show_over_data():
list_ = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
assert isinstance(lazy_show_over_data(list_=list_,func_=func_fig),widgets.Widget)
| none | 1 | 2.165399 | 2 |
|
subliminal/video.py | orikad/subliminal | 0 | 9068 | # -*- coding: utf-8 -*-
from __future__ import division
from datetime import datetime, timedelta
import logging
import os
from guessit import guessit
logger = logging.getLogger(__name__)
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm' '.ogv', '.omf',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo',
'.vob', '.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object):
"""Base class for videos.
Represent a video, existing or not.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param str imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages.
"""
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists"""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name, options=None):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
if options is not None:
return cls.fromguess(name, guessit(name, options=options))
else:
return cls.fromguess(name, guessit(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode(Video):
"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of the series.
:param bool original_series: whether the series is the first with this name.
:param int tvdb_id: TVDB id of the episode.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, **kwargs):
super(Episode, self).__init__(name, **kwargs)
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Title of the episode
self.title = title
#: Year of series
self.year = year
#: The series is the first with this name
self.original_series = original_series
#: TVDB id of the episode
self.tvdb_id = tvdb_id
#: TVDB id of the series
self.series_tvdb_id = series_tvdb_id
#: IMDb id of the series
self.series_imdb_id = series_imdb_id
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'title' not in guess or 'episode' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], guess.get('season', 1), guess['episode'], title=guess.get('episode_title'),
year=guess.get('year'), format=guess.get('format'), original_series='year' not in guess,
release_group=guess.get('release_group'), resolution=guess.get('screen_size'),
video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'episode'}))
def __repr__(self):
if self.year is None:
return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode)
return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode)
class Movie(Video):
"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, title, year=None, **kwargs):
super(Movie, self).__init__(name, **kwargs)
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'),
resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'),
audio_codec=guess.get('audio_codec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'movie'}))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)
| # -*- coding: utf-8 -*-
from __future__ import division
from datetime import datetime, timedelta
import logging
import os
from guessit import guessit
logger = logging.getLogger(__name__)
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm' '.ogv', '.omf',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo',
'.vob', '.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object):
"""Base class for videos.
Represent a video, existing or not.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param str imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages.
"""
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists"""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name, options=None):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
if options is not None:
return cls.fromguess(name, guessit(name, options=options))
else:
return cls.fromguess(name, guessit(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode(Video):
"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of the series.
:param bool original_series: whether the series is the first with this name.
:param int tvdb_id: TVDB id of the episode.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, **kwargs):
super(Episode, self).__init__(name, **kwargs)
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Title of the episode
self.title = title
#: Year of series
self.year = year
#: The series is the first with this name
self.original_series = original_series
#: TVDB id of the episode
self.tvdb_id = tvdb_id
#: TVDB id of the series
self.series_tvdb_id = series_tvdb_id
#: IMDb id of the series
self.series_imdb_id = series_imdb_id
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'title' not in guess or 'episode' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], guess.get('season', 1), guess['episode'], title=guess.get('episode_title'),
year=guess.get('year'), format=guess.get('format'), original_series='year' not in guess,
release_group=guess.get('release_group'), resolution=guess.get('screen_size'),
video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'episode'}))
def __repr__(self):
if self.year is None:
return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode)
return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode)
class Movie(Video):
"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, title, year=None, **kwargs):
super(Movie, self).__init__(name, **kwargs)
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'),
resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'),
audio_codec=guess.get('audio_codec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'movie'}))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)
| en | 0.731312 | # -*- coding: utf-8 -*- #: Video extensions Base class for videos. Represent a video, existing or not. :param str name: name or path of the video. :param str format: format of the video (HDTV, WEB-DL, BluRay, ...). :param str release_group: release group of the video. :param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i). :param str video_codec: codec of the video stream. :param str audio_codec: codec of the main audio stream. :param str imdb_id: IMDb id of the video. :param dict hashes: hashes of the video file by provider names. :param int size: size of the video file in bytes. :param set subtitle_languages: existing subtitle languages. #: Name or path of the video #: Format of the video (HDTV, WEB-DL, BluRay, ...) #: Release group of the video #: Resolution of the video stream (480p, 720p, 1080p or 1080i) #: Codec of the video stream #: Codec of the main audio stream #: IMDb id of the video #: Hashes of the video file by provider names #: Size of the video file in bytes #: Existing subtitle languages Test whether the video exists Age of the video Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`. :param str name: name of the video. :param dict guess: guessed data. :raise: :class:`ValueError` if the `type` of the `guess` is invalid Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`. :param str name: name of the video. Episode :class:`Video`. :param str series: series of the episode. :param int season: season number of the episode. :param int episode: episode number of the episode. :param str title: title of the episode. :param int year: year of the series. :param bool original_series: whether the series is the first with this name. :param int tvdb_id: TVDB id of the episode. :param \*\*kwargs: additional parameters for the :class:`Video` constructor. #: Series of the episode #: Season number of the episode #: Episode number of the episode #: Title of the episode #: Year of series #: The series is the first with this name #: TVDB id of the episode #: TVDB id of the series #: IMDb id of the series Movie :class:`Video`. :param str title: title of the movie. :param int year: year of the movie. :param \*\*kwargs: additional parameters for the :class:`Video` constructor. #: Title of the movie #: Year of the movie | 1.662921 | 2 |
backend/app/migrations/0001_initial.py | juniorosorio47/client-order | 0 | 9069 | <gh_stars>0
# Generated by Django 3.2.7 on 2021-10-18 23:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=20)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.client')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=20)),
('inventory', models.IntegerField(default=0)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderProduct',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
],
options={
'unique_together': {('order', 'product')},
},
),
migrations.AddField(
model_name='order',
name='products',
field=models.ManyToManyField(through='app.OrderProduct', to='app.Product'),
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| # Generated by Django 3.2.7 on 2021-10-18 23:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=20)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.client')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=20)),
('inventory', models.IntegerField(default=0)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderProduct',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
],
options={
'unique_together': {('order', 'product')},
},
),
migrations.AddField(
model_name='order',
name='products',
field=models.ManyToManyField(through='app.OrderProduct', to='app.Product'),
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
] | en | 0.891659 | # Generated by Django 3.2.7 on 2021-10-18 23:21 | 1.74556 | 2 |
nemo/collections/nlp/models/machine_translation/mt_enc_dec_config.py | vadam5/NeMo | 1 | 9070 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Optional, Tuple
from omegaconf.omegaconf import MISSING
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataConfig
from nemo.collections.nlp.models.enc_dec_nlp_model import EncDecNLPModelConfig
from nemo.collections.nlp.modules.common.token_classifier import TokenClassifierConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import TokenizerConfig
from nemo.collections.nlp.modules.common.transformer.transformer import (
NeMoTransformerConfig,
NeMoTransformerEncoderConfig,
)
from nemo.core.config.modelPT import ModelConfig, OptimConfig, SchedConfig
@dataclass
class MTSchedConfig(SchedConfig):
name: str = 'InverseSquareRootAnnealing'
warmup_ratio: Optional[float] = None
last_epoch: int = -1
# TODO: Refactor this dataclass to to support more optimizers (it pins the optimizer to Adam-like optimizers).
@dataclass
class MTOptimConfig(OptimConfig):
name: str = 'adam'
lr: float = 1e-3
betas: Tuple[float, float] = (0.9, 0.98)
weight_decay: float = 0.0
sched: Optional[MTSchedConfig] = MTSchedConfig()
@dataclass
class MTEncDecModelConfig(EncDecNLPModelConfig):
# machine translation configurations
num_val_examples: int = 3
num_test_examples: int = 3
max_generation_delta: int = 10
label_smoothing: Optional[float] = 0.0
beam_size: int = 4
len_pen: float = 0.0
src_language: str = 'en'
tgt_language: str = 'en'
find_unused_parameters: Optional[bool] = True
shared_tokenizer: Optional[bool] = True
preproc_out_dir: Optional[str] = None
# network architecture configuration
encoder_tokenizer: Any = MISSING
encoder: Any = MISSING
decoder_tokenizer: Any = MISSING
decoder: Any = MISSING
head: TokenClassifierConfig = TokenClassifierConfig(log_softmax=True)
# dataset configurations
train_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=True,
shuffle=True,
cache_ids=False,
use_cache=False,
)
validation_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=False,
shuffle=False,
cache_ids=False,
use_cache=False,
)
test_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=False,
shuffle=False,
cache_ids=False,
use_cache=False,
)
optim: Optional[OptimConfig] = MTOptimConfig()
@dataclass
class AAYNBaseConfig(MTEncDecModelConfig):
# Attention is All You Need Base Configuration
encoder_tokenizer: TokenizerConfig = TokenizerConfig(library='yttm')
decoder_tokenizer: TokenizerConfig = TokenizerConfig(library='yttm')
encoder: NeMoTransformerEncoderConfig = NeMoTransformerEncoderConfig(
library='nemo',
model_name=None,
pretrained=False,
hidden_size=512,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
)
decoder: NeMoTransformerConfig = NeMoTransformerConfig(
library='nemo',
model_name=None,
pretrained=False,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
)
| # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Optional, Tuple
from omegaconf.omegaconf import MISSING
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataConfig
from nemo.collections.nlp.models.enc_dec_nlp_model import EncDecNLPModelConfig
from nemo.collections.nlp.modules.common.token_classifier import TokenClassifierConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import TokenizerConfig
from nemo.collections.nlp.modules.common.transformer.transformer import (
NeMoTransformerConfig,
NeMoTransformerEncoderConfig,
)
from nemo.core.config.modelPT import ModelConfig, OptimConfig, SchedConfig
@dataclass
class MTSchedConfig(SchedConfig):
name: str = 'InverseSquareRootAnnealing'
warmup_ratio: Optional[float] = None
last_epoch: int = -1
# TODO: Refactor this dataclass to to support more optimizers (it pins the optimizer to Adam-like optimizers).
@dataclass
class MTOptimConfig(OptimConfig):
name: str = 'adam'
lr: float = 1e-3
betas: Tuple[float, float] = (0.9, 0.98)
weight_decay: float = 0.0
sched: Optional[MTSchedConfig] = MTSchedConfig()
@dataclass
class MTEncDecModelConfig(EncDecNLPModelConfig):
# machine translation configurations
num_val_examples: int = 3
num_test_examples: int = 3
max_generation_delta: int = 10
label_smoothing: Optional[float] = 0.0
beam_size: int = 4
len_pen: float = 0.0
src_language: str = 'en'
tgt_language: str = 'en'
find_unused_parameters: Optional[bool] = True
shared_tokenizer: Optional[bool] = True
preproc_out_dir: Optional[str] = None
# network architecture configuration
encoder_tokenizer: Any = MISSING
encoder: Any = MISSING
decoder_tokenizer: Any = MISSING
decoder: Any = MISSING
head: TokenClassifierConfig = TokenClassifierConfig(log_softmax=True)
# dataset configurations
train_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=True,
shuffle=True,
cache_ids=False,
use_cache=False,
)
validation_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=False,
shuffle=False,
cache_ids=False,
use_cache=False,
)
test_ds: Optional[TranslationDataConfig] = TranslationDataConfig(
src_file_name=MISSING,
tgt_file_name=MISSING,
tokens_in_batch=512,
clean=False,
shuffle=False,
cache_ids=False,
use_cache=False,
)
optim: Optional[OptimConfig] = MTOptimConfig()
@dataclass
class AAYNBaseConfig(MTEncDecModelConfig):
# Attention is All You Need Base Configuration
encoder_tokenizer: TokenizerConfig = TokenizerConfig(library='yttm')
decoder_tokenizer: TokenizerConfig = TokenizerConfig(library='yttm')
encoder: NeMoTransformerEncoderConfig = NeMoTransformerEncoderConfig(
library='nemo',
model_name=None,
pretrained=False,
hidden_size=512,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
)
decoder: NeMoTransformerConfig = NeMoTransformerConfig(
library='nemo',
model_name=None,
pretrained=False,
inner_size=2048,
num_layers=6,
num_attention_heads=8,
ffn_dropout=0.1,
attn_score_dropout=0.1,
attn_layer_dropout=0.1,
)
| en | 0.820391 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Refactor this dataclass to to support more optimizers (it pins the optimizer to Adam-like optimizers). # machine translation configurations # network architecture configuration # dataset configurations # Attention is All You Need Base Configuration | 1.731961 | 2 |
shadowsocksr_cli/main.py | MaxSherry/ssr-command-client | 0 | 9071 | """
@author: tyrantlucifer
@contact: <EMAIL>
@blog: https://tyrantlucifer.com
@file: main.py
@time: 2021/2/18 21:36
@desc: shadowsocksr-cli入口函数
"""
import argparse
import traceback
from shadowsocksr_cli.functions import *
def get_parser():
parser = argparse.ArgumentParser(description=color.blue("The shadowsocksr command client based Python."),
epilog=color.yellow('Powered by ') + color.green('tyrantlucifer') + color.yellow(
". If you have any questions,you can send e-mails to ") + color.green(
"<EMAIL>"))
parser.add_argument("-l", "--list", action="store_true", help="show ssr list")
parser.add_argument("-p", "--port", default=1080, metavar="local_port", type=int,
help="assign local proxy port,use with -s")
parser.add_argument("-s", "--start", metavar="ssr_id", type=int, help="start ssr proxy")
parser.add_argument("-S", "--stop", nargs='?', const=-1, metavar="ssr_id", type=int, help="stop ssr proxy")
parser.add_argument("-u", "--update", action="store_true", help="update ssr list")
parser.add_argument("-v", "--version", action="store_true", help="display version")
parser.add_argument("--generate-clash", action="store_true", help="generate clash config yaml")
parser.add_argument("--display-json", metavar="ssr_id", type=int, help="display ssr json info")
parser.add_argument("--test-speed", type=int, metavar="ssr_id", help="test ssr nodes download and upload speed")
parser.add_argument("--fast-node", action="store_true", help="find most fast by delay and start ssr proxy")
parser.add_argument("--setting-url", metavar="ssr_subscribe_url", help="setting ssr subscribe url")
parser.add_argument("--setting-address", metavar="ssr_local_address", help="setting ssr local address")
parser.add_argument("--list-url", action="store_true", help="list ssr subscribe url")
parser.add_argument("--add-url", metavar="ssr_subscribe_url", help="add ssr subscribe url")
parser.add_argument("--remove-url", metavar="ssr_subscribe_url", help="remove ssr subscribe url")
parser.add_argument("--list-address", action="store_true", help="list ssr local address")
parser.add_argument("--parse-url", metavar="ssr_url", help="pares ssr url")
parser.add_argument("--append-ssr", metavar="ssr_file_path", help="append ssr nodes from file")
parser.add_argument("-b", action="store_true", help="append_ssr file is base64")
parser.add_argument("--clear-ssr", metavar="ssr_id", nargs="?", const="fail",
help="if ssr_id is not empty, clear ssr node by ssr_id, else clear fail nodes")
parser.add_argument("-all", action="store_true", help="clear all ssr node")
parser.add_argument("--add-ssr", metavar="ssr_url", help="add ssr node")
parser.add_argument("--test-again", metavar="ssr_node_id", type=int, help="test ssr node again")
parser.add_argument("--print-qrcode", metavar="ssr_node_id", type=int, help="print ssr node qrcode")
parser.add_argument("--http", metavar="action[start stop status]", help="Manager local http server")
parser.add_argument("--http-port", metavar="http server port", default=80, type=int,
help="assign local http server port")
parser.add_argument("--setting-global-proxy", action="store_true",
help="setting system global proxy,only support on " + color.red('Ubuntu Desktop'))
parser.add_argument("--setting-pac-proxy", action="store_true",
help="setting system pac proxy,only support on " + color.red('Ubuntu Desktop'))
parser.add_argument("--close-system-proxy", action="store_true",
help="close system proxy,only support on " + color.red('Ubuntu Desktop'))
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.list:
DisplayShadowsocksr.display_shadowsocksr_list()
elif args.update:
UpdateConfigurations.update_subscribe()
elif args.fast_node:
HandleShadowsocksr.select_fast_node(args.port)
elif args.start is not None:
HandleShadowsocksr.start(ssr_id=args.start, local_port=args.port)
elif args.stop is not None:
HandleShadowsocksr.stop(ssr_id=args.stop, local_port=args.port)
elif args.version:
DisplayShadowsocksr.display_version()
elif args.setting_url:
UpdateConfigurations.reset_subscribe_url(args.setting_url)
elif args.append_ssr:
if not os.path.isfile(args.append_ssr):
logger.error(f'append_ssr file {args.append_ssr} is not exists')
return
with open(args.append_ssr, 'r', encoding='UTF-8') as f:
txt = f.read()
if args.b:
txt = ParseShadowsocksr.base64_decode(txt)
ssr_set = set()
for line in txt.splitlines():
for ssr in re.findall(r'ssr://[0-9a-zA-Z=-_/+]+', line):
ssr_set.add(ssr)
for ssr in ssr_set:
try:
UpdateConfigurations.append_ssr_node(ssr)
except Exception as e:
logger.error(f'add ssr node error {ssr}')
logger.error(traceback.format_exc())
elif args.clear_ssr:
UpdateConfigurations.clear_ssr_nodes(args.clear_ssr, args.all)
elif args.setting_address:
UpdateConfigurations.update_local_address(args.setting_address)
elif args.list_url:
DisplayShadowsocksr.display_subscribe_url()
elif args.add_url:
UpdateConfigurations.add_subscribe_url(args.add_url)
elif args.remove_url:
UpdateConfigurations.remove_subscribe_url(args.remove_url)
elif args.list_address:
DisplayShadowsocksr.display_local_address()
elif args.parse_url:
DisplayShadowsocksr.display_shadowsocksr_json_by_url(args.parse_url)
elif args.add_ssr:
UpdateConfigurations.add_shadowsocksr_by_url(args.add_ssr)
elif args.test_again is not None:
UpdateConfigurations.update_shadowsocksr_connect_status(ssr_id=args.test_again)
elif args.print_qrcode is not None:
DisplayShadowsocksr.display_qrcode(ssr_id=args.print_qrcode)
elif args.setting_global_proxy:
UpdateSystemProxy.open_global_proxy(args.port, args.http_port)
elif args.setting_pac_proxy:
UpdateSystemProxy.open_pac_proxy(args.port, args.http_port)
elif args.close_system_proxy:
UpdateSystemProxy.close_proxy(args.port, args.http_port)
elif args.test_speed is not None:
DisplayShadowsocksr.display_shadowsocksr_speed(ssr_id=args.test_speed)
elif args.display_json is not None:
DisplayShadowsocksr.display_shadowsocksr_json(ssr_id=args.display_json)
elif args.generate_clash:
GenerateClashConfig.generate_clash_config()
elif args.http:
HandleHttpServer.handle_http_server(args.http, args.port, args.http_port)
else:
parser.print_help()
if __name__ == "__main__":
main()
| """
@author: tyrantlucifer
@contact: <EMAIL>
@blog: https://tyrantlucifer.com
@file: main.py
@time: 2021/2/18 21:36
@desc: shadowsocksr-cli入口函数
"""
import argparse
import traceback
from shadowsocksr_cli.functions import *
def get_parser():
parser = argparse.ArgumentParser(description=color.blue("The shadowsocksr command client based Python."),
epilog=color.yellow('Powered by ') + color.green('tyrantlucifer') + color.yellow(
". If you have any questions,you can send e-mails to ") + color.green(
"<EMAIL>"))
parser.add_argument("-l", "--list", action="store_true", help="show ssr list")
parser.add_argument("-p", "--port", default=1080, metavar="local_port", type=int,
help="assign local proxy port,use with -s")
parser.add_argument("-s", "--start", metavar="ssr_id", type=int, help="start ssr proxy")
parser.add_argument("-S", "--stop", nargs='?', const=-1, metavar="ssr_id", type=int, help="stop ssr proxy")
parser.add_argument("-u", "--update", action="store_true", help="update ssr list")
parser.add_argument("-v", "--version", action="store_true", help="display version")
parser.add_argument("--generate-clash", action="store_true", help="generate clash config yaml")
parser.add_argument("--display-json", metavar="ssr_id", type=int, help="display ssr json info")
parser.add_argument("--test-speed", type=int, metavar="ssr_id", help="test ssr nodes download and upload speed")
parser.add_argument("--fast-node", action="store_true", help="find most fast by delay and start ssr proxy")
parser.add_argument("--setting-url", metavar="ssr_subscribe_url", help="setting ssr subscribe url")
parser.add_argument("--setting-address", metavar="ssr_local_address", help="setting ssr local address")
parser.add_argument("--list-url", action="store_true", help="list ssr subscribe url")
parser.add_argument("--add-url", metavar="ssr_subscribe_url", help="add ssr subscribe url")
parser.add_argument("--remove-url", metavar="ssr_subscribe_url", help="remove ssr subscribe url")
parser.add_argument("--list-address", action="store_true", help="list ssr local address")
parser.add_argument("--parse-url", metavar="ssr_url", help="pares ssr url")
parser.add_argument("--append-ssr", metavar="ssr_file_path", help="append ssr nodes from file")
parser.add_argument("-b", action="store_true", help="append_ssr file is base64")
parser.add_argument("--clear-ssr", metavar="ssr_id", nargs="?", const="fail",
help="if ssr_id is not empty, clear ssr node by ssr_id, else clear fail nodes")
parser.add_argument("-all", action="store_true", help="clear all ssr node")
parser.add_argument("--add-ssr", metavar="ssr_url", help="add ssr node")
parser.add_argument("--test-again", metavar="ssr_node_id", type=int, help="test ssr node again")
parser.add_argument("--print-qrcode", metavar="ssr_node_id", type=int, help="print ssr node qrcode")
parser.add_argument("--http", metavar="action[start stop status]", help="Manager local http server")
parser.add_argument("--http-port", metavar="http server port", default=80, type=int,
help="assign local http server port")
parser.add_argument("--setting-global-proxy", action="store_true",
help="setting system global proxy,only support on " + color.red('Ubuntu Desktop'))
parser.add_argument("--setting-pac-proxy", action="store_true",
help="setting system pac proxy,only support on " + color.red('Ubuntu Desktop'))
parser.add_argument("--close-system-proxy", action="store_true",
help="close system proxy,only support on " + color.red('Ubuntu Desktop'))
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.list:
DisplayShadowsocksr.display_shadowsocksr_list()
elif args.update:
UpdateConfigurations.update_subscribe()
elif args.fast_node:
HandleShadowsocksr.select_fast_node(args.port)
elif args.start is not None:
HandleShadowsocksr.start(ssr_id=args.start, local_port=args.port)
elif args.stop is not None:
HandleShadowsocksr.stop(ssr_id=args.stop, local_port=args.port)
elif args.version:
DisplayShadowsocksr.display_version()
elif args.setting_url:
UpdateConfigurations.reset_subscribe_url(args.setting_url)
elif args.append_ssr:
if not os.path.isfile(args.append_ssr):
logger.error(f'append_ssr file {args.append_ssr} is not exists')
return
with open(args.append_ssr, 'r', encoding='UTF-8') as f:
txt = f.read()
if args.b:
txt = ParseShadowsocksr.base64_decode(txt)
ssr_set = set()
for line in txt.splitlines():
for ssr in re.findall(r'ssr://[0-9a-zA-Z=-_/+]+', line):
ssr_set.add(ssr)
for ssr in ssr_set:
try:
UpdateConfigurations.append_ssr_node(ssr)
except Exception as e:
logger.error(f'add ssr node error {ssr}')
logger.error(traceback.format_exc())
elif args.clear_ssr:
UpdateConfigurations.clear_ssr_nodes(args.clear_ssr, args.all)
elif args.setting_address:
UpdateConfigurations.update_local_address(args.setting_address)
elif args.list_url:
DisplayShadowsocksr.display_subscribe_url()
elif args.add_url:
UpdateConfigurations.add_subscribe_url(args.add_url)
elif args.remove_url:
UpdateConfigurations.remove_subscribe_url(args.remove_url)
elif args.list_address:
DisplayShadowsocksr.display_local_address()
elif args.parse_url:
DisplayShadowsocksr.display_shadowsocksr_json_by_url(args.parse_url)
elif args.add_ssr:
UpdateConfigurations.add_shadowsocksr_by_url(args.add_ssr)
elif args.test_again is not None:
UpdateConfigurations.update_shadowsocksr_connect_status(ssr_id=args.test_again)
elif args.print_qrcode is not None:
DisplayShadowsocksr.display_qrcode(ssr_id=args.print_qrcode)
elif args.setting_global_proxy:
UpdateSystemProxy.open_global_proxy(args.port, args.http_port)
elif args.setting_pac_proxy:
UpdateSystemProxy.open_pac_proxy(args.port, args.http_port)
elif args.close_system_proxy:
UpdateSystemProxy.close_proxy(args.port, args.http_port)
elif args.test_speed is not None:
DisplayShadowsocksr.display_shadowsocksr_speed(ssr_id=args.test_speed)
elif args.display_json is not None:
DisplayShadowsocksr.display_shadowsocksr_json(ssr_id=args.display_json)
elif args.generate_clash:
GenerateClashConfig.generate_clash_config()
elif args.http:
HandleHttpServer.handle_http_server(args.http, args.port, args.http_port)
else:
parser.print_help()
if __name__ == "__main__":
main()
| en | 0.230887 | @author: tyrantlucifer @contact: <EMAIL> @blog: https://tyrantlucifer.com @file: main.py @time: 2021/2/18 21:36 @desc: shadowsocksr-cli入口函数 | 2.475671 | 2 |
examples/Python 2.7/Client_Complete.py | jcjveraa/EDDN | 100 | 9072 | <gh_stars>10-100
import zlib
import zmq
import simplejson
import sys, os, datetime, time
"""
" Configuration
"""
__relayEDDN = 'tcp://eddn.edcd.io:9500'
#__timeoutEDDN = 600000 # 10 minuts
__timeoutEDDN = 60000 # 1 minut
# Set False to listen to production stream; True to listen to debug stream
__debugEDDN = False;
# Set to False if you do not want verbose logging
__logVerboseFile = os.path.dirname(__file__) + '/Logs_Verbose_EDDN_%DATE%.htm'
#__logVerboseFile = False
# Set to False if you do not want JSON logging
__logJSONFile = os.path.dirname(__file__) + '/Logs_JSON_EDDN_%DATE%.log'
#__logJSONFile = False
# A sample list of authorised softwares
__authorisedSoftwares = [
"EDCE",
"ED-TD.SPACE",
"EliteOCR",
"Maddavo's Market Share",
"RegulatedNoise",
"RegulatedNoise__DJ",
"E:D Market Connector [Windows]"
]
# Used this to excludes yourself for example has you don't want to handle your own messages ^^
__excludedSoftwares = [
'My Awesome Market Uploader'
]
"""
" Start
"""
def date(__format):
d = datetime.datetime.utcnow()
return d.strftime(__format)
__oldTime = False
def echoLog(__str):
global __oldTime, __logVerboseFile
if __logVerboseFile != False:
__logVerboseFileParsed = __logVerboseFile.replace('%DATE%', str(date('%Y-%m-%d')))
if __logVerboseFile != False and not os.path.exists(__logVerboseFileParsed):
f = open(__logVerboseFileParsed, 'w')
f.write('<style type="text/css">html { white-space: pre; font-family: Courier New,Courier,Lucida Sans Typewriter,Lucida Typewriter,monospace; }</style>')
f.close()
if (__oldTime == False) or (__oldTime != date('%H:%M:%S')):
__oldTime = date('%H:%M:%S')
__str = str(__oldTime) + ' | ' + str(__str)
else:
__str = ' ' + ' | ' + str(__str)
print __str
sys.stdout.flush()
if __logVerboseFile != False:
f = open(__logVerboseFileParsed, 'a')
f.write(__str + '\n')
f.close()
def echoLogJSON(__json):
global __logJSONFile
if __logJSONFile != False:
__logJSONFileParsed = __logJSONFile.replace('%DATE%', str(date('%Y-%m-%d')))
f = open(__logJSONFileParsed, 'a')
f.write(str(__json) + '\n')
f.close()
def main():
echoLog('Starting EDDN Subscriber')
echoLog('')
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE, "")
subscriber.setsockopt(zmq.RCVTIMEO, __timeoutEDDN)
while True:
try:
subscriber.connect(__relayEDDN)
echoLog('Connect to ' + __relayEDDN)
echoLog('')
echoLog('')
poller = zmq.Poller()
poller.register(subscriber, zmq.POLLIN)
while True:
socks = dict(poller.poll(__timeoutEDDN))
if socks:
if socks.get(subscriber) == zmq.POLLIN:
__message = subscriber.recv(zmq.NOBLOCK)
__message = zlib.decompress(__message)
__json = simplejson.loads(__message)
__converted = False
# Handle commodity v1
if __json['$schemaRef'] == 'https://eddn.edcd.io/schemas/commodity/1' + ('/test' if (__debugEDDN == True) else ''):
echoLogJSON(__message)
echoLog('Receiving commodity-v1 message...')
echoLog(' - Converting to v3...')
__temp = {}
__temp['$schemaRef'] = 'https://eddn.edcd.io/schemas/commodity/3' + ('/test' if (__debugEDDN == True) else '')
__temp['header'] = __json['header']
__temp['message'] = {}
__temp['message']['systemName'] = __json['message']['systemName']
__temp['message']['stationName'] = __json['message']['stationName']
__temp['message']['timestamp'] = __json['message']['timestamp']
__temp['message']['commodities'] = []
__commodity = {}
if 'itemName' in __json['message']:
__commodity['name'] = __json['message']['itemName']
if 'buyPrice' in __json['message']:
__commodity['buyPrice'] = __json['message']['buyPrice']
if 'stationStock' in __json['message']:
__commodity['supply'] = __json['message']['stationStock']
if 'supplyLevel' in __json['message']:
__commodity['supplyLevel'] = __json['message']['supplyLevel']
if 'sellPrice' in __json['message']:
__commodity['sellPrice'] = __json['message']['sellPrice']
if 'demand' in __json['message']:
__commodity['demand'] = __json['message']['demand']
if'demandLevel' in __json['message']:
__commodity['demandLevel'] = __json['message']['demandLevel']
__temp['message']['commodities'].append(__commodity)
__json = __temp
del __temp, __commodity
__converted = True
# Handle commodity v3
if __json['$schemaRef'] == 'https://eddn.edcd.io/schemas/commodity/3' + ('/test' if (__debugEDDN == True) else ''):
if __converted == False:
echoLogJSON(__message)
echoLog('Receiving commodity-v3 message...')
__authorised = False
__excluded = False
if __json['header']['softwareName'] in __authorisedSoftwares:
__authorised = True
if __json['header']['softwareName'] in __excludedSoftwares:
__excluded = True
echoLog(' - Software: ' + __json['header']['softwareName'] + ' / ' + __json['header']['softwareVersion'])
echoLog(' - ' + 'AUTHORISED' if (__authorised == True) else
('EXCLUDED' if (__excluded == True) else 'UNAUTHORISED')
)
if __authorised == True and __excluded == False:
# Do what you want with the data...
# Have fun !
# For example
echoLog(' - Timestamp: ' + __json['message']['timestamp'])
echoLog(' - Uploader ID: ' + __json['header']['uploaderID'])
echoLog(' - System Name: ' + __json['message']['systemName'])
echoLog(' - Station Name: ' + __json['message']['stationName'])
for __commodity in __json['message']['commodities']:
echoLog(' - Name: ' + __commodity['name'])
echoLog(' - Buy Price: ' + str(__commodity['buyPrice']))
echoLog(' - Supply: ' + str(__commodity['supply'])
+ ((' (' + __commodity['supplyLevel'] + ')') if 'supplyLevel' in __commodity else '')
)
echoLog(' - Sell Price: ' + str(__commodity['sellPrice']))
echoLog(' - Demand: ' + str(__commodity['demand'])
+ ((' (' + __commodity['demandLevel'] + ')') if 'demandLevel' in __commodity else '')
)
# End example
del __authorised, __excluded
echoLog('')
echoLog('')
del __converted
else:
print 'Disconnect from ' + __relayEDDN + ' (After timeout)'
echoLog('')
echoLog('')
sys.stdout.flush()
subscriber.disconnect(__relayEDDN)
break
except zmq.ZMQError, e:
subscriber.disconnect(__relayEDDN)
echoLog('')
echoLog('Disconnect from ' + __relayEDDN + ' (After receiving ZMQError)')
echoLog('ZMQSocketException: ' + str(e))
echoLog('')
time.sleep(10)
if __name__ == '__main__':
main()
| import zlib
import zmq
import simplejson
import sys, os, datetime, time
"""
" Configuration
"""
__relayEDDN = 'tcp://eddn.edcd.io:9500'
#__timeoutEDDN = 600000 # 10 minuts
__timeoutEDDN = 60000 # 1 minut
# Set False to listen to production stream; True to listen to debug stream
__debugEDDN = False;
# Set to False if you do not want verbose logging
__logVerboseFile = os.path.dirname(__file__) + '/Logs_Verbose_EDDN_%DATE%.htm'
#__logVerboseFile = False
# Set to False if you do not want JSON logging
__logJSONFile = os.path.dirname(__file__) + '/Logs_JSON_EDDN_%DATE%.log'
#__logJSONFile = False
# A sample list of authorised softwares
__authorisedSoftwares = [
"EDCE",
"ED-TD.SPACE",
"EliteOCR",
"Maddavo's Market Share",
"RegulatedNoise",
"RegulatedNoise__DJ",
"E:D Market Connector [Windows]"
]
# Used this to excludes yourself for example has you don't want to handle your own messages ^^
__excludedSoftwares = [
'My Awesome Market Uploader'
]
"""
" Start
"""
def date(__format):
d = datetime.datetime.utcnow()
return d.strftime(__format)
__oldTime = False
def echoLog(__str):
global __oldTime, __logVerboseFile
if __logVerboseFile != False:
__logVerboseFileParsed = __logVerboseFile.replace('%DATE%', str(date('%Y-%m-%d')))
if __logVerboseFile != False and not os.path.exists(__logVerboseFileParsed):
f = open(__logVerboseFileParsed, 'w')
f.write('<style type="text/css">html { white-space: pre; font-family: Courier New,Courier,Lucida Sans Typewriter,Lucida Typewriter,monospace; }</style>')
f.close()
if (__oldTime == False) or (__oldTime != date('%H:%M:%S')):
__oldTime = date('%H:%M:%S')
__str = str(__oldTime) + ' | ' + str(__str)
else:
__str = ' ' + ' | ' + str(__str)
print __str
sys.stdout.flush()
if __logVerboseFile != False:
f = open(__logVerboseFileParsed, 'a')
f.write(__str + '\n')
f.close()
def echoLogJSON(__json):
global __logJSONFile
if __logJSONFile != False:
__logJSONFileParsed = __logJSONFile.replace('%DATE%', str(date('%Y-%m-%d')))
f = open(__logJSONFileParsed, 'a')
f.write(str(__json) + '\n')
f.close()
def main():
echoLog('Starting EDDN Subscriber')
echoLog('')
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE, "")
subscriber.setsockopt(zmq.RCVTIMEO, __timeoutEDDN)
while True:
try:
subscriber.connect(__relayEDDN)
echoLog('Connect to ' + __relayEDDN)
echoLog('')
echoLog('')
poller = zmq.Poller()
poller.register(subscriber, zmq.POLLIN)
while True:
socks = dict(poller.poll(__timeoutEDDN))
if socks:
if socks.get(subscriber) == zmq.POLLIN:
__message = subscriber.recv(zmq.NOBLOCK)
__message = zlib.decompress(__message)
__json = simplejson.loads(__message)
__converted = False
# Handle commodity v1
if __json['$schemaRef'] == 'https://eddn.edcd.io/schemas/commodity/1' + ('/test' if (__debugEDDN == True) else ''):
echoLogJSON(__message)
echoLog('Receiving commodity-v1 message...')
echoLog(' - Converting to v3...')
__temp = {}
__temp['$schemaRef'] = 'https://eddn.edcd.io/schemas/commodity/3' + ('/test' if (__debugEDDN == True) else '')
__temp['header'] = __json['header']
__temp['message'] = {}
__temp['message']['systemName'] = __json['message']['systemName']
__temp['message']['stationName'] = __json['message']['stationName']
__temp['message']['timestamp'] = __json['message']['timestamp']
__temp['message']['commodities'] = []
__commodity = {}
if 'itemName' in __json['message']:
__commodity['name'] = __json['message']['itemName']
if 'buyPrice' in __json['message']:
__commodity['buyPrice'] = __json['message']['buyPrice']
if 'stationStock' in __json['message']:
__commodity['supply'] = __json['message']['stationStock']
if 'supplyLevel' in __json['message']:
__commodity['supplyLevel'] = __json['message']['supplyLevel']
if 'sellPrice' in __json['message']:
__commodity['sellPrice'] = __json['message']['sellPrice']
if 'demand' in __json['message']:
__commodity['demand'] = __json['message']['demand']
if'demandLevel' in __json['message']:
__commodity['demandLevel'] = __json['message']['demandLevel']
__temp['message']['commodities'].append(__commodity)
__json = __temp
del __temp, __commodity
__converted = True
# Handle commodity v3
if __json['$schemaRef'] == 'https://eddn.edcd.io/schemas/commodity/3' + ('/test' if (__debugEDDN == True) else ''):
if __converted == False:
echoLogJSON(__message)
echoLog('Receiving commodity-v3 message...')
__authorised = False
__excluded = False
if __json['header']['softwareName'] in __authorisedSoftwares:
__authorised = True
if __json['header']['softwareName'] in __excludedSoftwares:
__excluded = True
echoLog(' - Software: ' + __json['header']['softwareName'] + ' / ' + __json['header']['softwareVersion'])
echoLog(' - ' + 'AUTHORISED' if (__authorised == True) else
('EXCLUDED' if (__excluded == True) else 'UNAUTHORISED')
)
if __authorised == True and __excluded == False:
# Do what you want with the data...
# Have fun !
# For example
echoLog(' - Timestamp: ' + __json['message']['timestamp'])
echoLog(' - Uploader ID: ' + __json['header']['uploaderID'])
echoLog(' - System Name: ' + __json['message']['systemName'])
echoLog(' - Station Name: ' + __json['message']['stationName'])
for __commodity in __json['message']['commodities']:
echoLog(' - Name: ' + __commodity['name'])
echoLog(' - Buy Price: ' + str(__commodity['buyPrice']))
echoLog(' - Supply: ' + str(__commodity['supply'])
+ ((' (' + __commodity['supplyLevel'] + ')') if 'supplyLevel' in __commodity else '')
)
echoLog(' - Sell Price: ' + str(__commodity['sellPrice']))
echoLog(' - Demand: ' + str(__commodity['demand'])
+ ((' (' + __commodity['demandLevel'] + ')') if 'demandLevel' in __commodity else '')
)
# End example
del __authorised, __excluded
echoLog('')
echoLog('')
del __converted
else:
print 'Disconnect from ' + __relayEDDN + ' (After timeout)'
echoLog('')
echoLog('')
sys.stdout.flush()
subscriber.disconnect(__relayEDDN)
break
except zmq.ZMQError, e:
subscriber.disconnect(__relayEDDN)
echoLog('')
echoLog('Disconnect from ' + __relayEDDN + ' (After receiving ZMQError)')
echoLog('ZMQSocketException: ' + str(e))
echoLog('')
time.sleep(10)
if __name__ == '__main__':
main() | en | 0.782613 | " Configuration #__timeoutEDDN = 600000 # 10 minuts # 1 minut # Set False to listen to production stream; True to listen to debug stream # Set to False if you do not want verbose logging #__logVerboseFile = False # Set to False if you do not want JSON logging #__logJSONFile = False # A sample list of authorised softwares # Used this to excludes yourself for example has you don't want to handle your own messages ^^ " Start # Handle commodity v1 # Handle commodity v3 # Do what you want with the data... # Have fun ! # For example # End example | 2.078486 | 2 |
zad1.py | nadkkka/H8PW | 6 | 9073 |
def repleace_pattern(t,s,r):
assert len(t) > 0
assert len(s) > 0
assert len(r) > 0
assert len(t) >= len(s)
n = len(t)
m = len(s)
k = len(r)
idx = -1
for i in range(0, n):
if t[i] == s[0]:
pattern = True
for j in range(1,m):
if t[i+j] != s[j]:
pattern = False
break
if(pattern):
idx=i
break
result = t
print(idx)
if(idx!=-1):
result = [*t[0:idx],*r,*t[idx+m:n]]
return result
print (repleace_pattern([1,2,3,1,2,3,4],[1,2,3,4],[9,0]))
|
def repleace_pattern(t,s,r):
assert len(t) > 0
assert len(s) > 0
assert len(r) > 0
assert len(t) >= len(s)
n = len(t)
m = len(s)
k = len(r)
idx = -1
for i in range(0, n):
if t[i] == s[0]:
pattern = True
for j in range(1,m):
if t[i+j] != s[j]:
pattern = False
break
if(pattern):
idx=i
break
result = t
print(idx)
if(idx!=-1):
result = [*t[0:idx],*r,*t[idx+m:n]]
return result
print (repleace_pattern([1,2,3,1,2,3,4],[1,2,3,4],[9,0]))
| none | 1 | 2.955106 | 3 |
|
mycroft/client/enclosure/weather.py | Matjordan/mycroft-core | 0 | 9074 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EnclosureWeather:
"""
Listens for Enclosure API commands to display indicators of the weather.
Performs the associated command on Arduino by writing on the Serial port.
"""
def __init__(self, bus, writer):
self.bus = bus
self.writer = writer
self.__init_events()
def __init_events(self):
self.bus.on('enclosure.weather.display', self.display)
def display(self, event=None):
if event and event.data:
# Convert img_code to icon
img_code = event.data.get("img_code", None)
icon = None
if img_code == 0:
# sunny
icon = "IICEIBMDNLMDIBCEAA"
elif img_code == 1:
# partly cloudy
icon = "IIEEGBGDHLHDHBGEEA"
elif img_code == 2:
# cloudy
icon = "IIIBMDMDODODODMDIB"
elif img_code == 3:
# light rain
icon = "IIMAOJOFPBPJPFOBMA"
elif img_code == 4:
# raining
icon = "IIMIOFOBPFPDPJOFMA"
elif img_code == 5:
# storming
icon = "IIAAIIMEODLBJAAAAA"
elif img_code == 6:
# snowing
icon = "IIJEKCMBPHMBKCJEAA"
elif img_code == 7:
# wind/mist
icon = "IIABIBIBIJIJJGJAGA"
temp = event.data.get("temp", None)
if icon is not None and temp is not None:
icon = "x=2," + icon
msg = "weather.display=" + str(temp) + "," + str(icon)
self.writer.write(msg)
| # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EnclosureWeather:
"""
Listens for Enclosure API commands to display indicators of the weather.
Performs the associated command on Arduino by writing on the Serial port.
"""
def __init__(self, bus, writer):
self.bus = bus
self.writer = writer
self.__init_events()
def __init_events(self):
self.bus.on('enclosure.weather.display', self.display)
def display(self, event=None):
if event and event.data:
# Convert img_code to icon
img_code = event.data.get("img_code", None)
icon = None
if img_code == 0:
# sunny
icon = "IICEIBMDNLMDIBCEAA"
elif img_code == 1:
# partly cloudy
icon = "IIEEGBGDHLHDHBGEEA"
elif img_code == 2:
# cloudy
icon = "IIIBMDMDODODODMDIB"
elif img_code == 3:
# light rain
icon = "IIMAOJOFPBPJPFOBMA"
elif img_code == 4:
# raining
icon = "IIMIOFOBPFPDPJOFMA"
elif img_code == 5:
# storming
icon = "IIAAIIMEODLBJAAAAA"
elif img_code == 6:
# snowing
icon = "IIJEKCMBPHMBKCJEAA"
elif img_code == 7:
# wind/mist
icon = "IIABIBIBIJIJJGJAGA"
temp = event.data.get("temp", None)
if icon is not None and temp is not None:
icon = "x=2," + icon
msg = "weather.display=" + str(temp) + "," + str(icon)
self.writer.write(msg)
| en | 0.855848 | # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Listens for Enclosure API commands to display indicators of the weather. Performs the associated command on Arduino by writing on the Serial port. # Convert img_code to icon # sunny # partly cloudy # cloudy # light rain # raining # storming # snowing # wind/mist | 2.617499 | 3 |
tests/processing_components/test_image_iterators.py | cnwangfeng/algorithm-reference-library | 22 | 9075 | <reponame>cnwangfeng/algorithm-reference-library
"""Unit tests for image iteration
"""
import logging
import unittest
import numpy
from data_models.polarisation import PolarisationFrame
from processing_components.image.iterators import image_raster_iter, image_channel_iter, image_null_iter
from processing_components.image.operations import create_empty_image_like
from processing_components.simulation.testing_support import create_test_image
log = logging.getLogger(__name__)
class TestImageIterators(unittest.TestCase):
def test_raster(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
for nraster in [1, 2, 4, 8, 9]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch in image_raster_iter(m31model, facets=nraster):
assert patch.data.shape[3] == (m31model.data.shape[3] // nraster), \
"Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[3],
(m31model.data.shape[3] // nraster))
assert patch.data.shape[2] == (m31model.data.shape[2] // nraster), \
"Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[2],
(m31model.data.shape[2] // nraster))
patch.data *= 2.0
diff = m31model.data - 2.0 * m31original.data
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
assert numpy.max(numpy.abs(diff)) == 0.0, "Raster set failed for %d" % nraster
def test_raster_exception(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
for nraster, overlap in [(-1, -1), (-1, 0), (2, 128), (1e6, 127)]:
with self.assertRaises(AssertionError) as context:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch in image_raster_iter(m31model, facets=nraster, overlap=overlap):
patch.data *= 2.0
def test_raster_overlap(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_linear(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='linear'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_quadratic(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='quadratic'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_tukey(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='tukey'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_channelise(self):
m31cube = create_test_image(polarisation_frame=PolarisationFrame('stokesI'),
frequency=numpy.linspace(1e8,1.1e8, 128))
for subimages in [128, 16, 8, 2, 1]:
for slab in image_channel_iter(m31cube, subimages=subimages):
assert slab.data.shape[0] == 128 // subimages
def test_null(self):
m31cube = create_test_image(polarisation_frame=PolarisationFrame('stokesI'),
frequency=numpy.linspace(1e8, 1.1e8, 128))
for i, im in enumerate(image_null_iter(m31cube)):
assert i<1, "Null iterator returns more than one value"
if __name__ == '__main__':
unittest.main()
| """Unit tests for image iteration
"""
import logging
import unittest
import numpy
from data_models.polarisation import PolarisationFrame
from processing_components.image.iterators import image_raster_iter, image_channel_iter, image_null_iter
from processing_components.image.operations import create_empty_image_like
from processing_components.simulation.testing_support import create_test_image
log = logging.getLogger(__name__)
class TestImageIterators(unittest.TestCase):
def test_raster(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
for nraster in [1, 2, 4, 8, 9]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch in image_raster_iter(m31model, facets=nraster):
assert patch.data.shape[3] == (m31model.data.shape[3] // nraster), \
"Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[3],
(m31model.data.shape[3] // nraster))
assert patch.data.shape[2] == (m31model.data.shape[2] // nraster), \
"Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[2],
(m31model.data.shape[2] // nraster))
patch.data *= 2.0
diff = m31model.data - 2.0 * m31original.data
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
assert numpy.max(numpy.abs(diff)) == 0.0, "Raster set failed for %d" % nraster
def test_raster_exception(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
for nraster, overlap in [(-1, -1), (-1, 0), (2, 128), (1e6, 127)]:
with self.assertRaises(AssertionError) as context:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch in image_raster_iter(m31model, facets=nraster, overlap=overlap):
patch.data *= 2.0
def test_raster_overlap(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_linear(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='linear'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_quadratic(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='quadratic'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_tukey(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='tukey'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_channelise(self):
m31cube = create_test_image(polarisation_frame=PolarisationFrame('stokesI'),
frequency=numpy.linspace(1e8,1.1e8, 128))
for subimages in [128, 16, 8, 2, 1]:
for slab in image_channel_iter(m31cube, subimages=subimages):
assert slab.data.shape[0] == 128 // subimages
def test_null(self):
m31cube = create_test_image(polarisation_frame=PolarisationFrame('stokesI'),
frequency=numpy.linspace(1e8, 1.1e8, 128))
for i, im in enumerate(image_null_iter(m31cube)):
assert i<1, "Null iterator returns more than one value"
if __name__ == '__main__':
unittest.main() | en | 0.788423 | Unit tests for image iteration | 2.709569 | 3 |
a_other_video/MCL-Motion-Focused-Contrastive-Learning/sts/motion_sts.py | alisure-fork/Video-Swin-Transformer | 0 | 9076 | <reponame>alisure-fork/Video-Swin-Transformer<filename>a_other_video/MCL-Motion-Focused-Contrastive-Learning/sts/motion_sts.py
import cv2
import numpy as np
from scipy import ndimage
def compute_motion_boudary(flow_clip):
mx = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
my = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
dx_all = []
dy_all = []
mb_x = 0
mb_y = 0
for flow_img in flow_clip:
d_x = ndimage.convolve(flow_img, mx)
d_y = ndimage.convolve(flow_img, my)
dx_all.append(d_x)
dy_all.append(d_y)
mb_x += d_x
mb_y += d_y
dx_all = np.array(dx_all)
dy_all = np.array(dy_all)
return dx_all, dy_all, mb_x, mb_y
def zero_boundary(frame_mag):
frame_mag[:8, :] = 0
frame_mag[:, :8] = 0
frame_mag[-8:, :] = 0
frame_mag[:, -8:] = 0
return frame_mag
def motion_mag_downsample(mag, size, input_size):
block_size = input_size // size
mask = np.zeros((size,size))
for i in range(size):
for j in range(size):
x_start = i * block_size
x_end = x_start + block_size
y_start = j * block_size
y_end = y_start + block_size
tmp_block = mag[x_start:x_end, y_start:y_end]
block_mean = np.mean(tmp_block)
mask[i, j] = block_mean
return mask
def motion_sts(flow_clip, size, input_size):
dx_all, dy_all, dx_sum, dy_sum = compute_motion_boudary(flow_clip)
mag, ang = cv2.cartToPolar(dx_sum, dy_sum, angleInDegrees=True)
mag_down = motion_mag_downsample(mag, size, input_size)
return mag_down
| import cv2
import numpy as np
from scipy import ndimage
def compute_motion_boudary(flow_clip):
mx = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
my = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
dx_all = []
dy_all = []
mb_x = 0
mb_y = 0
for flow_img in flow_clip:
d_x = ndimage.convolve(flow_img, mx)
d_y = ndimage.convolve(flow_img, my)
dx_all.append(d_x)
dy_all.append(d_y)
mb_x += d_x
mb_y += d_y
dx_all = np.array(dx_all)
dy_all = np.array(dy_all)
return dx_all, dy_all, mb_x, mb_y
def zero_boundary(frame_mag):
frame_mag[:8, :] = 0
frame_mag[:, :8] = 0
frame_mag[-8:, :] = 0
frame_mag[:, -8:] = 0
return frame_mag
def motion_mag_downsample(mag, size, input_size):
block_size = input_size // size
mask = np.zeros((size,size))
for i in range(size):
for j in range(size):
x_start = i * block_size
x_end = x_start + block_size
y_start = j * block_size
y_end = y_start + block_size
tmp_block = mag[x_start:x_end, y_start:y_end]
block_mean = np.mean(tmp_block)
mask[i, j] = block_mean
return mask
def motion_sts(flow_clip, size, input_size):
dx_all, dy_all, dx_sum, dy_sum = compute_motion_boudary(flow_clip)
mag, ang = cv2.cartToPolar(dx_sum, dy_sum, angleInDegrees=True)
mag_down = motion_mag_downsample(mag, size, input_size)
return mag_down | none | 1 | 2.351971 | 2 |
|
tests/test_button.py | MSLNZ/msl-qt | 1 | 9077 | import os
import sys
import pytest
from msl.qt import convert, Button, QtWidgets, QtCore, Qt
def test_text():
b = Button(text='hello')
assert b.text() == 'hello'
assert b.icon().isNull()
assert b.toolButtonStyle() == Qt.ToolButtonTextOnly
def test_icon():
path = os.path.dirname(__file__) + '/gamma.png'
gamma_size = QtCore.QSize(191, 291)
int_val = QtWidgets.QStyle.SP_DriveNetIcon
icon = convert.to_qicon(int_val)
sizes = icon.availableSizes()
if sys.platform == 'win32':
assert len(sizes) > 1
b = Button(icon=int_val)
assert b.text() == ''
assert not b.icon().isNull()
assert b.iconSize() == sizes[0]
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
b = Button(icon=path)
assert b.text() == ''
assert not b.icon().isNull()
assert b.iconSize() == gamma_size
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
b = Button(icon=convert.icon_to_base64(convert.to_qicon(path)))
assert b.text() == ''
assert not b.icon().isNull()
assert b.iconSize() == gamma_size
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
def test_icon_size():
int_val = QtWidgets.QStyle.SP_DriveNetIcon
icon = convert.to_qicon(int_val)
sizes = icon.availableSizes()
if sys.platform == 'win32':
assert len(sizes) > 1
#
# specify the size to the get_icon function
#
b = Button(icon=convert.to_qicon(int_val))
assert b.text() == ''
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
assert b.iconSize() == sizes[0]
b = Button(icon=convert.to_qicon(int_val, size=789))
assert b.iconSize() == QtCore.QSize(789, 789)
b = Button(icon=convert.to_qicon(int_val, size=3.0))
# specifying a scale factor will use the largest available size
assert b.iconSize() == QtCore.QSize(3*sizes[-1].width(), 3*sizes[-1].height())
b = Button(icon=convert.to_qicon(int_val, size=QtCore.QSize(50, 50)))
assert b.iconSize() == QtCore.QSize(50, 50)
for size in [(256,), (256, 256, 256)]:
with pytest.raises(ValueError, match='(width, height)'):
Button(icon=convert.to_qicon(int_val, size=size))
#
# use the icon_size kwarg
#
b = Button(icon=convert.to_qicon(int_val), icon_size=1234)
assert b.iconSize() == QtCore.QSize(1234, 1234)
b = Button(icon=convert.to_qicon(int_val), icon_size=3.0)
# specifying a scale factor will use the largest available size
assert b.iconSize() == QtCore.QSize(3*sizes[-1].width(), 3*sizes[-1].height())
b = Button(icon=convert.to_qicon(int_val), icon_size=(312, 312))
assert b.iconSize() == QtCore.QSize(312, 312)
b = Button(icon=convert.to_qicon(int_val), icon_size=QtCore.QSize(500, 500))
assert b.iconSize() == QtCore.QSize(500, 500)
for size in [(256,), (256, 256, 256)]:
with pytest.raises(ValueError, match='(width, height)'):
Button(icon=convert.to_qicon(int_val), icon_size=size)
def test_text_and_icon():
b = Button(text='hello', icon=QtWidgets.QStyle.SP_DriveNetIcon)
assert b.text() == 'hello'
assert not b.icon().isNull()
assert b.toolButtonStyle() == Qt.ToolButtonTextUnderIcon
b = Button(text='world', icon=QtWidgets.QStyle.SP_DriveNetIcon, is_text_under_icon=False)
assert b.text() == 'world'
assert not b.icon().isNull()
assert b.toolButtonStyle() == Qt.ToolButtonTextBesideIcon
def test_tooltip():
b = Button(tooltip='hello')
assert b.text() == ''
assert b.icon().isNull()
assert b.toolTip() == 'hello'
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
| import os
import sys
import pytest
from msl.qt import convert, Button, QtWidgets, QtCore, Qt
def test_text():
b = Button(text='hello')
assert b.text() == 'hello'
assert b.icon().isNull()
assert b.toolButtonStyle() == Qt.ToolButtonTextOnly
def test_icon():
path = os.path.dirname(__file__) + '/gamma.png'
gamma_size = QtCore.QSize(191, 291)
int_val = QtWidgets.QStyle.SP_DriveNetIcon
icon = convert.to_qicon(int_val)
sizes = icon.availableSizes()
if sys.platform == 'win32':
assert len(sizes) > 1
b = Button(icon=int_val)
assert b.text() == ''
assert not b.icon().isNull()
assert b.iconSize() == sizes[0]
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
b = Button(icon=path)
assert b.text() == ''
assert not b.icon().isNull()
assert b.iconSize() == gamma_size
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
b = Button(icon=convert.icon_to_base64(convert.to_qicon(path)))
assert b.text() == ''
assert not b.icon().isNull()
assert b.iconSize() == gamma_size
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
def test_icon_size():
int_val = QtWidgets.QStyle.SP_DriveNetIcon
icon = convert.to_qicon(int_val)
sizes = icon.availableSizes()
if sys.platform == 'win32':
assert len(sizes) > 1
#
# specify the size to the get_icon function
#
b = Button(icon=convert.to_qicon(int_val))
assert b.text() == ''
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
assert b.iconSize() == sizes[0]
b = Button(icon=convert.to_qicon(int_val, size=789))
assert b.iconSize() == QtCore.QSize(789, 789)
b = Button(icon=convert.to_qicon(int_val, size=3.0))
# specifying a scale factor will use the largest available size
assert b.iconSize() == QtCore.QSize(3*sizes[-1].width(), 3*sizes[-1].height())
b = Button(icon=convert.to_qicon(int_val, size=QtCore.QSize(50, 50)))
assert b.iconSize() == QtCore.QSize(50, 50)
for size in [(256,), (256, 256, 256)]:
with pytest.raises(ValueError, match='(width, height)'):
Button(icon=convert.to_qicon(int_val, size=size))
#
# use the icon_size kwarg
#
b = Button(icon=convert.to_qicon(int_val), icon_size=1234)
assert b.iconSize() == QtCore.QSize(1234, 1234)
b = Button(icon=convert.to_qicon(int_val), icon_size=3.0)
# specifying a scale factor will use the largest available size
assert b.iconSize() == QtCore.QSize(3*sizes[-1].width(), 3*sizes[-1].height())
b = Button(icon=convert.to_qicon(int_val), icon_size=(312, 312))
assert b.iconSize() == QtCore.QSize(312, 312)
b = Button(icon=convert.to_qicon(int_val), icon_size=QtCore.QSize(500, 500))
assert b.iconSize() == QtCore.QSize(500, 500)
for size in [(256,), (256, 256, 256)]:
with pytest.raises(ValueError, match='(width, height)'):
Button(icon=convert.to_qicon(int_val), icon_size=size)
def test_text_and_icon():
b = Button(text='hello', icon=QtWidgets.QStyle.SP_DriveNetIcon)
assert b.text() == 'hello'
assert not b.icon().isNull()
assert b.toolButtonStyle() == Qt.ToolButtonTextUnderIcon
b = Button(text='world', icon=QtWidgets.QStyle.SP_DriveNetIcon, is_text_under_icon=False)
assert b.text() == 'world'
assert not b.icon().isNull()
assert b.toolButtonStyle() == Qt.ToolButtonTextBesideIcon
def test_tooltip():
b = Button(tooltip='hello')
assert b.text() == ''
assert b.icon().isNull()
assert b.toolTip() == 'hello'
assert b.toolButtonStyle() == Qt.ToolButtonIconOnly
| en | 0.325497 | # # specify the size to the get_icon function # # specifying a scale factor will use the largest available size # # use the icon_size kwarg # # specifying a scale factor will use the largest available size | 2.176756 | 2 |
Exercicios/ex028.py | MateusBarboza99/Python-03- | 0 | 9078 | <filename>Exercicios/ex028.py
from random import randint
from time import sleep
computador = randint(0, 5) # Faz o computador "PENSAR"
print('-=-' * 20)
print('Vou Pensar em Um Número Entre 0 e 5. Tente Adivinhar Paçoca...')
print('-=-' * 20)
jogador = int(input('Em que Número eu Pensei? ')) # Jogador tenta Adivinhar
print('PROCESSANDO........')
sleep(3)
if jogador == computador:
print('PARABÊNS! Você conseguiu me Vencer Paçoca')
else:
print('GANHEI! Eu Pensei no Número {} e não no {}!'.format(computador, jogador))
| <filename>Exercicios/ex028.py
from random import randint
from time import sleep
computador = randint(0, 5) # Faz o computador "PENSAR"
print('-=-' * 20)
print('Vou Pensar em Um Número Entre 0 e 5. Tente Adivinhar Paçoca...')
print('-=-' * 20)
jogador = int(input('Em que Número eu Pensei? ')) # Jogador tenta Adivinhar
print('PROCESSANDO........')
sleep(3)
if jogador == computador:
print('PARABÊNS! Você conseguiu me Vencer Paçoca')
else:
print('GANHEI! Eu Pensei no Número {} e não no {}!'.format(computador, jogador))
| pt | 0.782573 | # Faz o computador "PENSAR" # Jogador tenta Adivinhar | 3.945409 | 4 |
Student Database/input_details.py | manas1410/Miscellaneous-Development | 0 | 9079 | from tkinter import*
import tkinter.font as font
import sqlite3
name2=''
regis2=''
branch2=''
def main():
inp=Tk()
inp.geometry("430x300")
inp.title("Enter The Details")
inp.iconbitmap("logo/spectrumlogo.ico")
f=font.Font(family='Bookman Old Style',size=15,weight='bold')
f1=font.Font(family='Bookman Old Style',size=20,weight='bold')
global n2
global reg2
global b2
det=Label(inp,text=" Enter The Details\n",font=f1,fg='magenta')
det.grid(row=0,column=0,columnspan=2)
n1=Label(inp,text=" Name:",font=f)
n1.grid(row=1,column=0)
n2=Entry(inp,width=40)
n2.grid(row=1,column=1)
reg1=Label(inp,text="Registration ID:",font=f)
reg1.grid(row=2,column=0)
reg2=Entry(inp,width=40)
reg2.grid(row=2,column=1)
b1=Label(inp,text=" Branch:",font=f)
b1.grid(row=3,column=0)
b2=Entry(inp,width=40)
b2.grid(row=3,column=1)
invalid=Label(inp,text=' ',fg='red')
invalid.grid(row=4,columnspan=2)
def submit():
name2=n2.get()
regis2=reg2.get()
branch2=b2.get()
l=[name2,regis2,branch2]
if (None in l or "" in l):
invalid['text']="Please fill all the fields"
else:
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#insert into tabels
c.execute("""UPDATE mark_list SET name=? WHERE name=?""",(name2,' '))
c.execute("""UPDATE mark_list SET registration_no=? WHERE registration_no=?""",(regis2,' '))
c.execute("""UPDATE mark_list SET branch=? WHERE branch=?""",(branch2,' '))
#commit_changes
db.commit()
#close connection
db.close()
inp.destroy()
import subject
subject.main()
def back():
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
c.execute("""DELETE from mark_list where name=' '""")
#commit_changes
db.commit()
#close connection
db.close()
inp.destroy()
import welcome
welcome.main()
#buttons
sub1=Button(inp,text="Submit",borderwidth=3,padx=40,font=f,bg='green',command=submit)
sub1.grid(row=5,column=0,columnspan=2)
back1=Button(inp,text="Back",borderwidth=3,padx=20,font=f,bg='red',command=back)
back1.grid(row=6,column=0,columnspan=2)
inp.mainloop()
if __name__=='__main__':
main()
| from tkinter import*
import tkinter.font as font
import sqlite3
name2=''
regis2=''
branch2=''
def main():
inp=Tk()
inp.geometry("430x300")
inp.title("Enter The Details")
inp.iconbitmap("logo/spectrumlogo.ico")
f=font.Font(family='Bookman Old Style',size=15,weight='bold')
f1=font.Font(family='Bookman Old Style',size=20,weight='bold')
global n2
global reg2
global b2
det=Label(inp,text=" Enter The Details\n",font=f1,fg='magenta')
det.grid(row=0,column=0,columnspan=2)
n1=Label(inp,text=" Name:",font=f)
n1.grid(row=1,column=0)
n2=Entry(inp,width=40)
n2.grid(row=1,column=1)
reg1=Label(inp,text="Registration ID:",font=f)
reg1.grid(row=2,column=0)
reg2=Entry(inp,width=40)
reg2.grid(row=2,column=1)
b1=Label(inp,text=" Branch:",font=f)
b1.grid(row=3,column=0)
b2=Entry(inp,width=40)
b2.grid(row=3,column=1)
invalid=Label(inp,text=' ',fg='red')
invalid.grid(row=4,columnspan=2)
def submit():
name2=n2.get()
regis2=reg2.get()
branch2=b2.get()
l=[name2,regis2,branch2]
if (None in l or "" in l):
invalid['text']="Please fill all the fields"
else:
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#insert into tabels
c.execute("""UPDATE mark_list SET name=? WHERE name=?""",(name2,' '))
c.execute("""UPDATE mark_list SET registration_no=? WHERE registration_no=?""",(regis2,' '))
c.execute("""UPDATE mark_list SET branch=? WHERE branch=?""",(branch2,' '))
#commit_changes
db.commit()
#close connection
db.close()
inp.destroy()
import subject
subject.main()
def back():
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
c.execute("""DELETE from mark_list where name=' '""")
#commit_changes
db.commit()
#close connection
db.close()
inp.destroy()
import welcome
welcome.main()
#buttons
sub1=Button(inp,text="Submit",borderwidth=3,padx=40,font=f,bg='green',command=submit)
sub1.grid(row=5,column=0,columnspan=2)
back1=Button(inp,text="Back",borderwidth=3,padx=20,font=f,bg='red',command=back)
back1.grid(row=6,column=0,columnspan=2)
inp.mainloop()
if __name__=='__main__':
main()
| en | 0.339318 | #cursor #insert into tabels UPDATE mark_list SET name=? WHERE name=? UPDATE mark_list SET registration_no=? WHERE registration_no=? UPDATE mark_list SET branch=? WHERE branch=? #commit_changes #close connection #cursor DELETE from mark_list where name=' ' #commit_changes #close connection #buttons | 3.26545 | 3 |
IQS5xx/IQS5xx.py | jakezimmerTHT/py_IQS5xx | 1 | 9080 | <reponame>jakezimmerTHT/py_IQS5xx<filename>IQS5xx/IQS5xx.py<gh_stars>1-10
import unittest
import time
import logging
logging.basicConfig()
from intelhex import IntelHex
import Adafruit_GPIO.I2C as i2c
from gpiozero import OutputDevice
from gpiozero import DigitalInputDevice
from ctypes import c_uint8, c_uint16, c_uint32, cast, pointer, POINTER
from ctypes import create_string_buffer, Structure
from fcntl import ioctl
import struct
import Adafruit_PureIO.smbus as smbus
from Adafruit_PureIO.smbus import make_i2c_rdwr_data
from IQS5xx_Defs import *
def bytesToHexString(bytes):
if isinstance(bytes, basestring):
return ''.join('{:02x} '.format(ord(c)) for c in bytes)
if isinstance(bytes, bytearray):
return ''.join('{:02x} '.format(b) for b in bytes)
raise ValueError("Must pass bytesToHexString() a string or bytearray")
IQS5xx_DEFAULT_ADDRESS = 0x74
IQS5xx_MAX_ADDRESS = 0x78
CHECKSUM_DESCRIPTOR_START = 0x83C0
CHECKSUM_DESCRIPTOR_END = 0x83FF
APP_START_ADDRESS = 0x8400
APP_END_ADDRESS = 0xBDFF #inclusive
NV_SETTINGS_START = 0xBE00
NV_SETTINGS_END = 0xBFFF #inclusive
FLASH_PADDING = 0x00
BLOCK_SIZE = 64
APP_SIZE_BLOCKS = (((APP_END_ADDRESS+1) - APP_START_ADDRESS) / BLOCK_SIZE)
NV_SETTINGS_SIZE_BLOCKS = (((NV_SETTINGS_END+1) - NV_SETTINGS_START) / BLOCK_SIZE)
BL_CMD_READ_VERSION = 0x00
BL_CMD_READ_64_BYTES = 0x01
BL_CMD_EXECUTE_APP = 0x02 # Write only, 0 bytes
BL_CMD_RUN_CRC = 0x03
BL_CRC_FAIL = 0x01
BL_CRC_PASS = 0x00
BL_VERSION = 0x0200
def swapEndianess(uint16):
return ((uint16 & 0xFF) << 8) | ((uint16 & 0xFF00) >> 8)
def writeBytes(self, data):
self._bus.write_bytes(self._address, bytes(data))
i2c.Device.writeBytes = writeBytes
def readBytes(self, data):
return self._bus.read_bytes(self._address, data)
i2c.Device.readBytes = readBytes
def writeRawListReadRawList(self, data, readLength):
self.writeBytes(data)
# This isn't using a repeat start
return self.readBytes(readLength)
i2c.Device.writeRawListReadRawList = writeRawListReadRawList
def writeBytes_16BitAddress(self, address, data):
addressBytes = struct.pack('>H', address)
dataBytes = bytearray(data)
bytes = addressBytes + dataBytes
self.writeBytes(bytes)
i2c.Device.writeBytes_16BitAddress = writeBytes_16BitAddress
def readBytes_16BitAddress(self, address, length):
assert self._bus._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint16(swapEndianess(address))
result = create_string_buffer(length)
# Build ioctl request.
request = make_i2c_rdwr_data([
(self._address, 0, 2, cast(pointer(reg), POINTER(c_uint8))), # Write cmd register.
(self._address, smbus.I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data.
])
# Make ioctl call and return result data.
ioctl(self._bus._device.fileno(), smbus.I2C_RDWR, request)
return bytearray(result.raw) # Use .raw instead of .value which will stop at a null byte!
i2c.Device.readBytes_16BitAddress = readBytes_16BitAddress
def readByte_16BitAddress(self, address):
result = self.readBytes_16BitAddress(address, 1)
result = struct.unpack('>B', result)[0]
return result
i2c.Device.readByte_16BitAddress = readByte_16BitAddress
def writeByte_16BitAddress(self, address, value, mask=0xFF):
if mask is not 0xFF:
register = self.readByte_16BitAddress(address)
register &= ~mask
register |= (value & mask)
value = register
format = '>HB' if (value > 0) else '>Hb'
bytes = struct.pack(format, address, value)
self.writeBytes(bytes)
i2c.Device.writeByte_16BitAddress = writeByte_16BitAddress
class IQS5xx(object):
def __init__(self, resetPin, readyPin, address=IQS5xx_DEFAULT_ADDRESS):
self.address = address
self._resetPinNum = resetPin
self._readyPinNum = readyPin
self._resetPin = OutputDevice(pin=self._resetPinNum, active_high=False, initial_value=True)
self._readypin = DigitalInputDevice(pin=self._readyPinNum, active_state=True, pull_up=None)
def begin(self):
self.releaseReset()
time.sleep(0.01)
self.waitUntilReady()
self.acknowledgeReset()
time.sleep(0.01)
self.acknowledgeReset()
time.sleep(0.01)
self.endSession()
time.sleep(0.020)
@property
def address(self):
return self.__address
@address.setter
def address(self, value):
if (value < IQS5xx_DEFAULT_ADDRESS) or (value > IQS5xx_MAX_ADDRESS):
raise ValueError("Invalid I2C Address. Use something in the range [%x, %x]" %(IQS5xx_DEFAULT_ADDRESS, IQS5xx_MAX_ADDRESS))
self.__address = value
self._device = i2c.get_i2c_device(value)
self._logger = logging.getLogger('IQS5xx.Address.{0:#0X}'.format(value))
def readUniqueID(self):
return bytesToHexString(self._device.readBytes_16BitAddress(0xF000, 12))
def setupComplete(self):
self._device.writeByte_16BitAddress(SystemConfig0_adr, SETUP_COMPLETE, SETUP_COMPLETE)
def setManualControl(self):
self._device.writeByte_16BitAddress(SystemConfig0_adr, MANUAL_CONTROL, MANUAL_CONTROL)
self._device.writeByte_16BitAddress(SystemControl0_adr, 0x00, 0x07) # active mode
def setTXPinMappings(self, pinList):
assert isinstance(pinList, list), "TX pinList must be a list of integers"
assert 0 <= len(pinList) <= 15, "TX pinList must be between 0 and 15 long"
self._device.writeBytes_16BitAddress(TxMapping_adr, pinList)
self._device.writeByte_16BitAddress(TotalTx_adr, len(pinList))
def setRXPinMappings(self, pinList):
assert isinstance(pinList, list), "RX pinList must be a list of integers"
assert 0 <= len(pinList) <= 10, "RX pinList must be between 0 and 15 long"
self._device.writeBytes_16BitAddress(RxMapping_adr, pinList)
self._device.writeByte_16BitAddress(TotalRx_adr, len(pinList))
def enableChannel(self, txChannel, rxChannel, enabled):
assert 0 <= txChannel < 15, "txChannel must be less than 15"
assert 0 <= rxChannel < 10, "rxChannel must be less than 10"
registerAddy = ActiveChannels_adr + (txChannel * 2)
if rxChannel >= 8:
mask = 1 << (rxChannel - 8)
else:
registerAddy += 1
mask = 1 << rxChannel
value = mask if enabled else 0x00
self._device.writeByte_16BitAddress(registerAddy, value, mask)
def setTXRXChannelCount(self, tx_count, rx_count):
assert 0 <= txChannel <= 15, "tx_count must be less or equal tp 15"
assert 0 <= rxChannel <= 10, "rx_count must be less than or equal to 10"
self._device.writeByte_16BitAddress(TotalTx_adr, txChannel)
self._device.writeByte_16BitAddress(TotalRx_adr, rxChannel)
def swapXY(self, swapped):
value = SWITCH_XY_AXIS if swapped else 0x00
self._device.writeByte_16BitAddress(XYConfig0_adr, value, SWITCH_XY_AXIS)
def setAtiGlobalC(self, globalC):
self._device.writeByte_16BitAddress(GlobalATIC_adr, globalC)
def setChannel_ATI_C_Adjustment(self, txChannel, rxChannel, adjustment):
assert 0 <= txChannel < 15, "txChannel must be less than 15"
assert 0 <= rxChannel < 10, "rxChannel must be less than 10"
registerAddy = ATICAdjust_adr + (txChannel * 10) + rxChannel
self._device.writeByte_16BitAddress(registerAddy, adjustment)
def setTouchMultipliers(self, set, clear):
self._device.writeByte_16BitAddress(GlobalTouchSet_adr, set)
self._device.writeByte_16BitAddress(GlobalTouchClear_adr, clear)
def rxFloat(self, floatWhenInactive):
value = RX_FLOAT if floatWhenInactive else 0x00
self._device.writeByte_16BitAddress(HardwareSettingsA_adr, value, RX_FLOAT)
def runAtiAlgorithm(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, AUTO_ATI, AUTO_ATI)
def acknowledgeReset(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, ACK_RESET, ACK_RESET)
def atiErrorDetected(self):
reg = self._device.readByte_16BitAddress(SystemInfo0_adr)
return bool(reg & ATI_ERROR)
def reseed(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, RESEED, RESEED)
def endSession(self):
self._device.writeByte_16BitAddress(EndWindow_adr, 0x00)
time.sleep(0.001)
def readVersionNumbers(self):
bytes = self._device.readBytes_16BitAddress(ProductNumber_adr, 6)
fields = struct.unpack(">HHBB",bytes)
return {"product":fields[0], "project":fields[1], "major":fields[2], "minor":fields[3]}
def bootloaderAvailable(self):
BOOTLOADER_AVAILABLE = 0xA5
NO_BOOTLOADER = 0xEE
result = self._device.readByte_16BitAddress(BLStatus_adr)
# result = ord(result)
if result == BOOTLOADER_AVAILABLE:
return True
elif result == NO_BOOTLOADER:
return False
else:
raise ValueError("Unexpected value returned for bootloader status: {0:#0X}".format(result))
def holdReset(self, millis=None):
self._resetPin.on()
if millis != None:
time.sleep(millis/1000.0)
self.releaseReset()
def releaseReset(self):
self._resetPin.off()
def isReady(self):
return self._readypin.is_active
def waitUntilReady(self, timeout=None):
self._readypin.wait_for_active(timeout)
def updateFirmware(self, hexFilePath, newDeviceAddress=None):
hexFile = IntelHex(source = hexFilePath)
hexFile.padding = FLASH_PADDING
appBinary = hexFile.tobinarray(start=APP_START_ADDRESS, end=NV_SETTINGS_END)
crcBinary = hexFile.tobinarray(start=CHECKSUM_DESCRIPTOR_START, end=CHECKSUM_DESCRIPTOR_END)
if newDeviceAddress:
self._logger.debug("Modifying the last byte in NV settings to change Device I2C Addrress to {0:#0X}".format(newDeviceAddress))
if (newDeviceAddress < IQS5xx_DEFAULT_ADDRESS) or (newDeviceAddress > IQS5xx_MAX_ADDRESS):
raise ValueError("Invalid I2C Address. Use something in the range [%x, %x]" %(IQS5xx_DEFAULT_ADDRESS, IQS5xx_MAX_ADDRESS))
appBinary[-1] = newDeviceAddress
# Step 1 - Enter Bootloader
self._logger.debug("Entering Bootloader")
bootloaderAddress = 0x40 ^ self.address
bootloaderDevice = i2c.get_i2c_device(bootloaderAddress)
self.holdReset(100)
bootloaderEntered = False
for i in range(10):
try:
version = bootloaderDevice.readU16(BL_CMD_READ_VERSION, little_endian=False)
bootloaderEntered = True
except:
pass
if not bootloaderEntered:
raise IOError("Timeout while trying to enter bootlaoder")
self._logger.debug("Bootloader entered successfully")
# Step 2 - Read and verify the bootloader version number
self._logger.debug("Reading Bootloader version")
if version != BL_VERSION:
raise Exception("Incompatible bootloader version detected: {0:#0X}".format(version))
self._logger.debug("Bootloader version is compatible: 0x%02X",version)
# Step 3 - Write the new application firmware and settings
self._logger.debug("Starting to write Application and NV settings")
for blockNum in range(APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS):
blockAddress = APP_START_ADDRESS + (blockNum * BLOCK_SIZE)
self._logger.debug('Writing 64-byte block {0}/{1} at address {2:#0X}'.format(blockNum+1, APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS ,blockAddress))
data = bytearray(BLOCK_SIZE + 2)
data[0] = (blockAddress >> 8) & 0xFF
data[1] = blockAddress & 0xFF
data[2:] = appBinary[blockNum*BLOCK_SIZE : (blockNum+1)*BLOCK_SIZE]
bootloaderDevice.writeBytes(data)
time.sleep(.010) # give the device time to write to flash
# Step 4 - Write the checksum descriptor section
self._logger.debug("Writing CRC section")
blockAddress = CHECKSUM_DESCRIPTOR_START
data = bytearray(BLOCK_SIZE + 2)
data[0] = (blockAddress >> 8) & 0xFF
data[1] = blockAddress & 0xFF
data[2:] = crcBinary[0:]
bootloaderDevice.writeBytes(data)
time.sleep(0.010) # give the device time to write to flash
# Step 5 - Perform CRC and read back settins section
time.sleep(0.1)
self._logger.debug("Performing CRC calculation")
bootloaderDevice.writeRaw8(BL_CMD_RUN_CRC)
time.sleep(0.2)
crcStatus = bootloaderDevice.readRaw8()
if crcStatus != BL_CRC_PASS:
raise Exception("CRC Failure")
self._logger.debug("CRC Success")
self._logger.debug("Reading back NV settings and comparing")
for blockNum in range(NV_SETTINGS_SIZE_BLOCKS):
blockAddress = NV_SETTINGS_START + (blockNum * BLOCK_SIZE)
self._logger.debug('Reading 64-byte block {0}/{1} at address {2:#0X}'.format(blockNum+1, NV_SETTINGS_SIZE_BLOCKS, blockAddress))
data = bytearray(3)
data[0] = BL_CMD_READ_64_BYTES
data[1] = (blockAddress >> 8) & 0xFF
data[2] = blockAddress & 0xFF
reply = bootloaderDevice.writeRawListReadRawList(data, BLOCK_SIZE)
expectedReply = appBinary[(APP_SIZE_BLOCKS+blockNum)*BLOCK_SIZE : (APP_SIZE_BLOCKS+blockNum+1)*BLOCK_SIZE].tostring()
if reply != expectedReply:
raise Exception("Unexpected values while reading back NV Setting: {0} \nExpected values: {1}".format(bytesToHexString(reply), bytesToHexString(expectedReply)))
self._logger.debug("NV Settings match expected values")
# Step 6 - Execute application
self._logger.debug("Execute Application")
bootloaderDevice.writeRaw8(BL_CMD_EXECUTE_APP)
if newDeviceAddress:
self.address = newDeviceAddress
class TestIQS5xx(unittest.TestCase):
hexFile = "IQS550_B000_Trackpad_40_15_2_2_BL.HEX"
possibleAddresses = [0x74, 0x75, 0x76, 0x77]
desiredAddress = 0x74
device = None
def setUp(self):
if not self.__class__.device:
self.__class__.device = IQS5xx(17, 27)
for address in self.__class__.possibleAddresses:
self.__class__.device.address = address
self.__class__.device._logger.setLevel(logging.DEBUG)
try:
self.__class__.device.waitUntilReady(1)
self.__class__.device.bootloaderAvailable()
break
except:
if address == self.__class__.possibleAddresses[-1]:
raise IOError("Couldn't communicate with the controller")
if self.__class__.device.address != self.__class__.desiredAddress:
self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=self.__class__.desiredAddress)
def tearDown(self):
if self.__class__.device.address != self.__class__.desiredAddress:
print("Cleaning up by reprogramming the controller to the default address")
self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=self.__class__.desiredAddress)
def test_bootloaderAvailable(self):
self.assertTrue(self.__class__.device.bootloaderAvailable())
# @unittest.skip
# def test_update(self):
# self.__class__.device.updateFirmware(self.__class__.hexFile)
#
# @unittest.skip
# def test_update_and_changeaddress(self):
# newAddy = 0x77
# self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=newAddy)
# self.assertEqual(self.__class__.device.address, newAddy)
# time.sleep(0.1)
# self.assertTrue(self.__class__.device.bootloaderAvailable())
if __name__ == '__main__':
unittest.main()
| import unittest
import time
import logging
logging.basicConfig()
from intelhex import IntelHex
import Adafruit_GPIO.I2C as i2c
from gpiozero import OutputDevice
from gpiozero import DigitalInputDevice
from ctypes import c_uint8, c_uint16, c_uint32, cast, pointer, POINTER
from ctypes import create_string_buffer, Structure
from fcntl import ioctl
import struct
import Adafruit_PureIO.smbus as smbus
from Adafruit_PureIO.smbus import make_i2c_rdwr_data
from IQS5xx_Defs import *
def bytesToHexString(bytes):
if isinstance(bytes, basestring):
return ''.join('{:02x} '.format(ord(c)) for c in bytes)
if isinstance(bytes, bytearray):
return ''.join('{:02x} '.format(b) for b in bytes)
raise ValueError("Must pass bytesToHexString() a string or bytearray")
IQS5xx_DEFAULT_ADDRESS = 0x74
IQS5xx_MAX_ADDRESS = 0x78
CHECKSUM_DESCRIPTOR_START = 0x83C0
CHECKSUM_DESCRIPTOR_END = 0x83FF
APP_START_ADDRESS = 0x8400
APP_END_ADDRESS = 0xBDFF #inclusive
NV_SETTINGS_START = 0xBE00
NV_SETTINGS_END = 0xBFFF #inclusive
FLASH_PADDING = 0x00
BLOCK_SIZE = 64
APP_SIZE_BLOCKS = (((APP_END_ADDRESS+1) - APP_START_ADDRESS) / BLOCK_SIZE)
NV_SETTINGS_SIZE_BLOCKS = (((NV_SETTINGS_END+1) - NV_SETTINGS_START) / BLOCK_SIZE)
BL_CMD_READ_VERSION = 0x00
BL_CMD_READ_64_BYTES = 0x01
BL_CMD_EXECUTE_APP = 0x02 # Write only, 0 bytes
BL_CMD_RUN_CRC = 0x03
BL_CRC_FAIL = 0x01
BL_CRC_PASS = 0x00
BL_VERSION = 0x0200
def swapEndianess(uint16):
return ((uint16 & 0xFF) << 8) | ((uint16 & 0xFF00) >> 8)
def writeBytes(self, data):
self._bus.write_bytes(self._address, bytes(data))
i2c.Device.writeBytes = writeBytes
def readBytes(self, data):
return self._bus.read_bytes(self._address, data)
i2c.Device.readBytes = readBytes
def writeRawListReadRawList(self, data, readLength):
self.writeBytes(data)
# This isn't using a repeat start
return self.readBytes(readLength)
i2c.Device.writeRawListReadRawList = writeRawListReadRawList
def writeBytes_16BitAddress(self, address, data):
addressBytes = struct.pack('>H', address)
dataBytes = bytearray(data)
bytes = addressBytes + dataBytes
self.writeBytes(bytes)
i2c.Device.writeBytes_16BitAddress = writeBytes_16BitAddress
def readBytes_16BitAddress(self, address, length):
assert self._bus._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint16(swapEndianess(address))
result = create_string_buffer(length)
# Build ioctl request.
request = make_i2c_rdwr_data([
(self._address, 0, 2, cast(pointer(reg), POINTER(c_uint8))), # Write cmd register.
(self._address, smbus.I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data.
])
# Make ioctl call and return result data.
ioctl(self._bus._device.fileno(), smbus.I2C_RDWR, request)
return bytearray(result.raw) # Use .raw instead of .value which will stop at a null byte!
i2c.Device.readBytes_16BitAddress = readBytes_16BitAddress
def readByte_16BitAddress(self, address):
result = self.readBytes_16BitAddress(address, 1)
result = struct.unpack('>B', result)[0]
return result
i2c.Device.readByte_16BitAddress = readByte_16BitAddress
def writeByte_16BitAddress(self, address, value, mask=0xFF):
if mask is not 0xFF:
register = self.readByte_16BitAddress(address)
register &= ~mask
register |= (value & mask)
value = register
format = '>HB' if (value > 0) else '>Hb'
bytes = struct.pack(format, address, value)
self.writeBytes(bytes)
i2c.Device.writeByte_16BitAddress = writeByte_16BitAddress
class IQS5xx(object):
def __init__(self, resetPin, readyPin, address=IQS5xx_DEFAULT_ADDRESS):
self.address = address
self._resetPinNum = resetPin
self._readyPinNum = readyPin
self._resetPin = OutputDevice(pin=self._resetPinNum, active_high=False, initial_value=True)
self._readypin = DigitalInputDevice(pin=self._readyPinNum, active_state=True, pull_up=None)
def begin(self):
self.releaseReset()
time.sleep(0.01)
self.waitUntilReady()
self.acknowledgeReset()
time.sleep(0.01)
self.acknowledgeReset()
time.sleep(0.01)
self.endSession()
time.sleep(0.020)
@property
def address(self):
return self.__address
@address.setter
def address(self, value):
if (value < IQS5xx_DEFAULT_ADDRESS) or (value > IQS5xx_MAX_ADDRESS):
raise ValueError("Invalid I2C Address. Use something in the range [%x, %x]" %(IQS5xx_DEFAULT_ADDRESS, IQS5xx_MAX_ADDRESS))
self.__address = value
self._device = i2c.get_i2c_device(value)
self._logger = logging.getLogger('IQS5xx.Address.{0:#0X}'.format(value))
def readUniqueID(self):
return bytesToHexString(self._device.readBytes_16BitAddress(0xF000, 12))
def setupComplete(self):
self._device.writeByte_16BitAddress(SystemConfig0_adr, SETUP_COMPLETE, SETUP_COMPLETE)
def setManualControl(self):
self._device.writeByte_16BitAddress(SystemConfig0_adr, MANUAL_CONTROL, MANUAL_CONTROL)
self._device.writeByte_16BitAddress(SystemControl0_adr, 0x00, 0x07) # active mode
def setTXPinMappings(self, pinList):
assert isinstance(pinList, list), "TX pinList must be a list of integers"
assert 0 <= len(pinList) <= 15, "TX pinList must be between 0 and 15 long"
self._device.writeBytes_16BitAddress(TxMapping_adr, pinList)
self._device.writeByte_16BitAddress(TotalTx_adr, len(pinList))
def setRXPinMappings(self, pinList):
assert isinstance(pinList, list), "RX pinList must be a list of integers"
assert 0 <= len(pinList) <= 10, "RX pinList must be between 0 and 15 long"
self._device.writeBytes_16BitAddress(RxMapping_adr, pinList)
self._device.writeByte_16BitAddress(TotalRx_adr, len(pinList))
def enableChannel(self, txChannel, rxChannel, enabled):
assert 0 <= txChannel < 15, "txChannel must be less than 15"
assert 0 <= rxChannel < 10, "rxChannel must be less than 10"
registerAddy = ActiveChannels_adr + (txChannel * 2)
if rxChannel >= 8:
mask = 1 << (rxChannel - 8)
else:
registerAddy += 1
mask = 1 << rxChannel
value = mask if enabled else 0x00
self._device.writeByte_16BitAddress(registerAddy, value, mask)
def setTXRXChannelCount(self, tx_count, rx_count):
assert 0 <= txChannel <= 15, "tx_count must be less or equal tp 15"
assert 0 <= rxChannel <= 10, "rx_count must be less than or equal to 10"
self._device.writeByte_16BitAddress(TotalTx_adr, txChannel)
self._device.writeByte_16BitAddress(TotalRx_adr, rxChannel)
def swapXY(self, swapped):
value = SWITCH_XY_AXIS if swapped else 0x00
self._device.writeByte_16BitAddress(XYConfig0_adr, value, SWITCH_XY_AXIS)
def setAtiGlobalC(self, globalC):
self._device.writeByte_16BitAddress(GlobalATIC_adr, globalC)
def setChannel_ATI_C_Adjustment(self, txChannel, rxChannel, adjustment):
assert 0 <= txChannel < 15, "txChannel must be less than 15"
assert 0 <= rxChannel < 10, "rxChannel must be less than 10"
registerAddy = ATICAdjust_adr + (txChannel * 10) + rxChannel
self._device.writeByte_16BitAddress(registerAddy, adjustment)
def setTouchMultipliers(self, set, clear):
self._device.writeByte_16BitAddress(GlobalTouchSet_adr, set)
self._device.writeByte_16BitAddress(GlobalTouchClear_adr, clear)
def rxFloat(self, floatWhenInactive):
value = RX_FLOAT if floatWhenInactive else 0x00
self._device.writeByte_16BitAddress(HardwareSettingsA_adr, value, RX_FLOAT)
def runAtiAlgorithm(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, AUTO_ATI, AUTO_ATI)
def acknowledgeReset(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, ACK_RESET, ACK_RESET)
def atiErrorDetected(self):
reg = self._device.readByte_16BitAddress(SystemInfo0_adr)
return bool(reg & ATI_ERROR)
def reseed(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, RESEED, RESEED)
def endSession(self):
self._device.writeByte_16BitAddress(EndWindow_adr, 0x00)
time.sleep(0.001)
def readVersionNumbers(self):
bytes = self._device.readBytes_16BitAddress(ProductNumber_adr, 6)
fields = struct.unpack(">HHBB",bytes)
return {"product":fields[0], "project":fields[1], "major":fields[2], "minor":fields[3]}
def bootloaderAvailable(self):
BOOTLOADER_AVAILABLE = 0xA5
NO_BOOTLOADER = 0xEE
result = self._device.readByte_16BitAddress(BLStatus_adr)
# result = ord(result)
if result == BOOTLOADER_AVAILABLE:
return True
elif result == NO_BOOTLOADER:
return False
else:
raise ValueError("Unexpected value returned for bootloader status: {0:#0X}".format(result))
def holdReset(self, millis=None):
self._resetPin.on()
if millis != None:
time.sleep(millis/1000.0)
self.releaseReset()
def releaseReset(self):
self._resetPin.off()
def isReady(self):
return self._readypin.is_active
def waitUntilReady(self, timeout=None):
self._readypin.wait_for_active(timeout)
def updateFirmware(self, hexFilePath, newDeviceAddress=None):
hexFile = IntelHex(source = hexFilePath)
hexFile.padding = FLASH_PADDING
appBinary = hexFile.tobinarray(start=APP_START_ADDRESS, end=NV_SETTINGS_END)
crcBinary = hexFile.tobinarray(start=CHECKSUM_DESCRIPTOR_START, end=CHECKSUM_DESCRIPTOR_END)
if newDeviceAddress:
self._logger.debug("Modifying the last byte in NV settings to change Device I2C Addrress to {0:#0X}".format(newDeviceAddress))
if (newDeviceAddress < IQS5xx_DEFAULT_ADDRESS) or (newDeviceAddress > IQS5xx_MAX_ADDRESS):
raise ValueError("Invalid I2C Address. Use something in the range [%x, %x]" %(IQS5xx_DEFAULT_ADDRESS, IQS5xx_MAX_ADDRESS))
appBinary[-1] = newDeviceAddress
# Step 1 - Enter Bootloader
self._logger.debug("Entering Bootloader")
bootloaderAddress = 0x40 ^ self.address
bootloaderDevice = i2c.get_i2c_device(bootloaderAddress)
self.holdReset(100)
bootloaderEntered = False
for i in range(10):
try:
version = bootloaderDevice.readU16(BL_CMD_READ_VERSION, little_endian=False)
bootloaderEntered = True
except:
pass
if not bootloaderEntered:
raise IOError("Timeout while trying to enter bootlaoder")
self._logger.debug("Bootloader entered successfully")
# Step 2 - Read and verify the bootloader version number
self._logger.debug("Reading Bootloader version")
if version != BL_VERSION:
raise Exception("Incompatible bootloader version detected: {0:#0X}".format(version))
self._logger.debug("Bootloader version is compatible: 0x%02X",version)
# Step 3 - Write the new application firmware and settings
self._logger.debug("Starting to write Application and NV settings")
for blockNum in range(APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS):
blockAddress = APP_START_ADDRESS + (blockNum * BLOCK_SIZE)
self._logger.debug('Writing 64-byte block {0}/{1} at address {2:#0X}'.format(blockNum+1, APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS ,blockAddress))
data = bytearray(BLOCK_SIZE + 2)
data[0] = (blockAddress >> 8) & 0xFF
data[1] = blockAddress & 0xFF
data[2:] = appBinary[blockNum*BLOCK_SIZE : (blockNum+1)*BLOCK_SIZE]
bootloaderDevice.writeBytes(data)
time.sleep(.010) # give the device time to write to flash
# Step 4 - Write the checksum descriptor section
self._logger.debug("Writing CRC section")
blockAddress = CHECKSUM_DESCRIPTOR_START
data = bytearray(BLOCK_SIZE + 2)
data[0] = (blockAddress >> 8) & 0xFF
data[1] = blockAddress & 0xFF
data[2:] = crcBinary[0:]
bootloaderDevice.writeBytes(data)
time.sleep(0.010) # give the device time to write to flash
# Step 5 - Perform CRC and read back settins section
time.sleep(0.1)
self._logger.debug("Performing CRC calculation")
bootloaderDevice.writeRaw8(BL_CMD_RUN_CRC)
time.sleep(0.2)
crcStatus = bootloaderDevice.readRaw8()
if crcStatus != BL_CRC_PASS:
raise Exception("CRC Failure")
self._logger.debug("CRC Success")
self._logger.debug("Reading back NV settings and comparing")
for blockNum in range(NV_SETTINGS_SIZE_BLOCKS):
blockAddress = NV_SETTINGS_START + (blockNum * BLOCK_SIZE)
self._logger.debug('Reading 64-byte block {0}/{1} at address {2:#0X}'.format(blockNum+1, NV_SETTINGS_SIZE_BLOCKS, blockAddress))
data = bytearray(3)
data[0] = BL_CMD_READ_64_BYTES
data[1] = (blockAddress >> 8) & 0xFF
data[2] = blockAddress & 0xFF
reply = bootloaderDevice.writeRawListReadRawList(data, BLOCK_SIZE)
expectedReply = appBinary[(APP_SIZE_BLOCKS+blockNum)*BLOCK_SIZE : (APP_SIZE_BLOCKS+blockNum+1)*BLOCK_SIZE].tostring()
if reply != expectedReply:
raise Exception("Unexpected values while reading back NV Setting: {0} \nExpected values: {1}".format(bytesToHexString(reply), bytesToHexString(expectedReply)))
self._logger.debug("NV Settings match expected values")
# Step 6 - Execute application
self._logger.debug("Execute Application")
bootloaderDevice.writeRaw8(BL_CMD_EXECUTE_APP)
if newDeviceAddress:
self.address = newDeviceAddress
class TestIQS5xx(unittest.TestCase):
hexFile = "IQS550_B000_Trackpad_40_15_2_2_BL.HEX"
possibleAddresses = [0x74, 0x75, 0x76, 0x77]
desiredAddress = 0x74
device = None
def setUp(self):
if not self.__class__.device:
self.__class__.device = IQS5xx(17, 27)
for address in self.__class__.possibleAddresses:
self.__class__.device.address = address
self.__class__.device._logger.setLevel(logging.DEBUG)
try:
self.__class__.device.waitUntilReady(1)
self.__class__.device.bootloaderAvailable()
break
except:
if address == self.__class__.possibleAddresses[-1]:
raise IOError("Couldn't communicate with the controller")
if self.__class__.device.address != self.__class__.desiredAddress:
self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=self.__class__.desiredAddress)
def tearDown(self):
if self.__class__.device.address != self.__class__.desiredAddress:
print("Cleaning up by reprogramming the controller to the default address")
self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=self.__class__.desiredAddress)
def test_bootloaderAvailable(self):
self.assertTrue(self.__class__.device.bootloaderAvailable())
# @unittest.skip
# def test_update(self):
# self.__class__.device.updateFirmware(self.__class__.hexFile)
#
# @unittest.skip
# def test_update_and_changeaddress(self):
# newAddy = 0x77
# self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=newAddy)
# self.assertEqual(self.__class__.device.address, newAddy)
# time.sleep(0.1)
# self.assertTrue(self.__class__.device.bootloaderAvailable())
if __name__ == '__main__':
unittest.main() | en | 0.461483 | #inclusive #inclusive # Write only, 0 bytes # This isn't using a repeat start # Build ctypes values to marshall between ioctl and Python. # Build ioctl request. # Write cmd register. # Read data. # Make ioctl call and return result data. # Use .raw instead of .value which will stop at a null byte! #0X}'.format(value)) # active mode # result = ord(result) #0X}".format(result)) #0X}".format(newDeviceAddress)) # Step 1 - Enter Bootloader # Step 2 - Read and verify the bootloader version number #0X}".format(version)) # Step 3 - Write the new application firmware and settings #0X}'.format(blockNum+1, APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS ,blockAddress)) # give the device time to write to flash # Step 4 - Write the checksum descriptor section # give the device time to write to flash # Step 5 - Perform CRC and read back settins section #0X}'.format(blockNum+1, NV_SETTINGS_SIZE_BLOCKS, blockAddress)) # Step 6 - Execute application # @unittest.skip # def test_update(self): # self.__class__.device.updateFirmware(self.__class__.hexFile) # # @unittest.skip # def test_update_and_changeaddress(self): # newAddy = 0x77 # self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=newAddy) # self.assertEqual(self.__class__.device.address, newAddy) # time.sleep(0.1) # self.assertTrue(self.__class__.device.bootloaderAvailable()) | 1.958284 | 2 |
code/loader/lock.py | IBCNServices/StardogStreamReasoning | 5 | 9081 | <reponame>IBCNServices/StardogStreamReasoning
import threading
class RWLock:
"""Synchronization object used in a solution of so-called second
readers-writers problem. In this problem, many readers can simultaneously
access a share, and a writer has an exclusive access to this share.
Additionally, the following constraints should be met:
1) no reader should be kept waiting if the share is currently opened for
reading unless a writer is also waiting for the share,
2) no writer should be kept waiting for the share longer than absolutely
necessary.
The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7]
with a modification -- adding an additional lock (C{self.__readers_queue})
-- in accordance with [2].
Sources:
[1] <NAME>: "The little book of semaphores", Version 2.1.5, 2008
[2] <NAME>, <NAME>, <NAME>:
"Concurrent Control with 'Readers' and 'Writers'",
Communications of the ACM, 1971 (via [3])
[3] http://en.wikipedia.org/wiki/Readers-writers_problem
"""
def __init__(self):
self.__read_switch = _LightSwitch()
self.__write_switch = _LightSwitch()
self.__no_readers = threading.Lock()
self.__no_writers = threading.Lock()
self.__readers_queue = threading.Lock()
"""A lock giving an even higher priority to the writer in certain
cases (see [2] for a discussion)"""
def reader_acquire(self):
self.__readers_queue.acquire()
self.__no_readers.acquire()
self.__read_switch.acquire(self.__no_writers)
self.__no_readers.release()
self.__readers_queue.release()
def reader_release(self):
self.__read_switch.release(self.__no_writers)
def writer_acquire(self):
self.__write_switch.acquire(self.__no_readers)
self.__no_writers.acquire()
def writer_release(self):
self.__no_writers.release()
self.__write_switch.release(self.__no_readers)
class _LightSwitch:
"""An auxiliary "light switch"-like object. The first thread turns on the
"switch", the last one turns it off (see [1, sec. 4.2.2] for details)."""
def __init__(self):
self.__counter = 0
self.__mutex = threading.Lock()
def acquire(self, lock):
self.__mutex.acquire()
self.__counter += 1
if self.__counter == 1:
lock.acquire()
self.__mutex.release()
def release(self, lock):
self.__mutex.acquire()
self.__counter -= 1
if self.__counter == 0:
lock.release()
self.__mutex.release()
| import threading
class RWLock:
"""Synchronization object used in a solution of so-called second
readers-writers problem. In this problem, many readers can simultaneously
access a share, and a writer has an exclusive access to this share.
Additionally, the following constraints should be met:
1) no reader should be kept waiting if the share is currently opened for
reading unless a writer is also waiting for the share,
2) no writer should be kept waiting for the share longer than absolutely
necessary.
The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7]
with a modification -- adding an additional lock (C{self.__readers_queue})
-- in accordance with [2].
Sources:
[1] <NAME>: "The little book of semaphores", Version 2.1.5, 2008
[2] <NAME>, <NAME>, <NAME>:
"Concurrent Control with 'Readers' and 'Writers'",
Communications of the ACM, 1971 (via [3])
[3] http://en.wikipedia.org/wiki/Readers-writers_problem
"""
def __init__(self):
self.__read_switch = _LightSwitch()
self.__write_switch = _LightSwitch()
self.__no_readers = threading.Lock()
self.__no_writers = threading.Lock()
self.__readers_queue = threading.Lock()
"""A lock giving an even higher priority to the writer in certain
cases (see [2] for a discussion)"""
def reader_acquire(self):
self.__readers_queue.acquire()
self.__no_readers.acquire()
self.__read_switch.acquire(self.__no_writers)
self.__no_readers.release()
self.__readers_queue.release()
def reader_release(self):
self.__read_switch.release(self.__no_writers)
def writer_acquire(self):
self.__write_switch.acquire(self.__no_readers)
self.__no_writers.acquire()
def writer_release(self):
self.__no_writers.release()
self.__write_switch.release(self.__no_readers)
class _LightSwitch:
"""An auxiliary "light switch"-like object. The first thread turns on the
"switch", the last one turns it off (see [1, sec. 4.2.2] for details)."""
def __init__(self):
self.__counter = 0
self.__mutex = threading.Lock()
def acquire(self, lock):
self.__mutex.acquire()
self.__counter += 1
if self.__counter == 1:
lock.acquire()
self.__mutex.release()
def release(self, lock):
self.__mutex.acquire()
self.__counter -= 1
if self.__counter == 0:
lock.release()
self.__mutex.release() | en | 0.912033 | Synchronization object used in a solution of so-called second readers-writers problem. In this problem, many readers can simultaneously access a share, and a writer has an exclusive access to this share. Additionally, the following constraints should be met: 1) no reader should be kept waiting if the share is currently opened for reading unless a writer is also waiting for the share, 2) no writer should be kept waiting for the share longer than absolutely necessary. The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7] with a modification -- adding an additional lock (C{self.__readers_queue}) -- in accordance with [2]. Sources: [1] <NAME>: "The little book of semaphores", Version 2.1.5, 2008 [2] <NAME>, <NAME>, <NAME>: "Concurrent Control with 'Readers' and 'Writers'", Communications of the ACM, 1971 (via [3]) [3] http://en.wikipedia.org/wiki/Readers-writers_problem A lock giving an even higher priority to the writer in certain cases (see [2] for a discussion) An auxiliary "light switch"-like object. The first thread turns on the "switch", the last one turns it off (see [1, sec. 4.2.2] for details). | 3.485101 | 3 |
src/pyfmodex/channel_group.py | Loodoor/UnamedPy | 1 | 9082 | <gh_stars>1-10
from .fmodobject import *
from .globalvars import dll as _dll
from .globalvars import get_class
class ChannelGroup(FmodObject):
def add_dsp(self, dsp):
check_type(dsp, get_class("DSP"))
c_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_AddDSP", d._ptr, byref(c_ptr))
return get_class("DSPConnection")(c_ptr)
def add_group(self, group):
check_type(group, ChannelGroup)
self._call_fmod("FMOD_ChannelGroup_AddGroup", group._ptr)
@property
def _occlusion(self):
direct = c_float()
reverb = c_float()
self._call_fmod("FMOD_ChannelGroup_Get3DOcclusion", byref(direct), byref(reverb))
return direct.value, reverb.value
@_occlusion.setter
def _occlusion(self, occs):
self._call_fmod("FMOD_ChannelGroup_Set3DOcclusion", c_float(occs[0]), c_float(occs[1]))
@property
def direct_occlusion(self):
return self._occlusion[0]
@direct_occlusion.setter
def direct_occlusion(self, occ):
self._occlusion = (occ, self._occlusion[1])
@property
def reverb_occlusion(self):
return self._occlusion[1]
@reverb_occlusion.setter
def reverb_occlusion(self, occ):
self._occlusion = (self._occlusion[0], occ)
def get_channel(self, idx):
c_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetChannel", idx, byref(c_ptr))
return channel.Channel(c_ptr)
@property
def dsp_head(self):
dsp_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetDSPHead", byref(dsp_ptr))
return get_class("DSP")(dsp_ptr)
def get_group(self, idx):
grp_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetGroup", idx)
return ChannelGroup(grp_ptr)
@property
def mute(self):
mute = c_bool()
self._call_fmod("FMOD_ChannelGroup_GetMute", byref(mute))
return mute.value
@mute.setter
def mute(self, m):
self._call_fmod("FMOD_Channel_SetMute", m)
@property
def name(self):
buf = create_string_buffer(512)
self._call_fmod("FMOD_ChannelGroup_GetName", buf, 512)
return buf.value
@property
def num_channels(self):
num = c_int()
self._call_fmod("FMOD_ChannelGroup_GetNumChannels", byref(num))
return num.value
@property
def num_groups(self):
num = c_int()
self._call_fmod("FMOD_ChannelGroup_GetNumGroups", byref(num))
return num.value
@property
def parent_group(self):
grp_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetParentGroup", byref(grp_ptr))
return ChannelGroup(grp_ptr)
@property
def paused(self):
paused = c_bool()
self._call_fmod("FMOD_ChannelGroup_GetPaused", byref(paused))
return paused.value
@paused.setter
def paused(self, p):
self._call_fmod("FMOD_ChannelGroup_SetPaused", p)
@property
def pitch(self):
pitch = c_float()
self._call_fmod("FMOD_ChannelGroup_GetPitch", byref(pitch))
return pitch.value
@property
def pitch(self, p):
self._call_fmod("FMOD_ChannelGroup_SetPitch", p)
def get_spectrum(self, numvalues, channeloffset, window):
arr = c_float * numvalues
arri = arr()
self._call_fmod("FMOD_ChannelGroup_GetSpectrum", byref(arri), numvalues, channeloffset, window)
return list(arri)
@property
def system_object(self):
sptr = c_void_p()
self._call_fmod("FMOD_channelGroup_GetSystemObject", byref(sptr))
return get_class("System")(sptr, False)
@property
def volume(self):
vol = c_float()
self._call_fmod("FMOD_ChannelGroup_GetVolume", byref(vol))
return vol.value
@volume.setter
def volume(self, vol):
self._call_fmod("FMOD_ChannelGroup_SetVolume", c_float(vol))
def get_wave_data(self, numvalues, channeloffset):
arr = c_float * numvalues
arri = arr()
self._call_fmod("FMOD_ChannelGroup_GetWaveData", byref(arri), numvalues, channeloffset)
return list(arri)
def override_3d_attributes(self, pos=0, vel=0):
self._call_fmod("FMOD_ChannelGroup_Override3DAttributes", pos, vel)
def override_frequency(self, freq):
self._call_fmod("FMOD_ChannelGroup_OverrideFrequency", c_float(freq))
def override_pan(self, pan):
self._call_fmod("FMOD_ChannelGroup_OverridePan", c_float(pan))
def override_reverb_properties(self, props):
check_type(props, REVERB_CHANNELPROPERTIES)
self._call_fmod("FMOD_ChannelGroup_OverrideReverbProperties", props)
def override_speaker_mix(self, frontleft, frontright, center, lfe, backleft, backright, sideleft, sideright):
self._call_fmod("FMOD_ChannelGroup_OverrideSpeakerMix", frontleft, frontright, center, lfe, backleft, backright,
sideleft, sideright)
def override_volume(self, vol):
self._call_fmod("FMOD_ChannelGroup_OverrideVolume", c_float(vol))
def release(self):
self._call_fmod("FMOD_ChannelGroup_Release")
def stop(self):
self._call_fmod("FMOD_ChannelGroup_Stop")
@property
def reverb_properties(self):
props = REVERB_CHANNELPROPERTIES()
ckresult(_dll.FMOD_ChannelGroup_GetReverbProperties(self._ptr, byref(props)))
return props
@reverb_properties.setter
def reverb_properties(self, props):
check_type(props, REVERB_CHANNELPROPERTIES)
ckresult(_dll.FMOD_ChannelGroup_SetReverbProperties(self._ptr, byref(props)))
| from .fmodobject import *
from .globalvars import dll as _dll
from .globalvars import get_class
class ChannelGroup(FmodObject):
def add_dsp(self, dsp):
check_type(dsp, get_class("DSP"))
c_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_AddDSP", d._ptr, byref(c_ptr))
return get_class("DSPConnection")(c_ptr)
def add_group(self, group):
check_type(group, ChannelGroup)
self._call_fmod("FMOD_ChannelGroup_AddGroup", group._ptr)
@property
def _occlusion(self):
direct = c_float()
reverb = c_float()
self._call_fmod("FMOD_ChannelGroup_Get3DOcclusion", byref(direct), byref(reverb))
return direct.value, reverb.value
@_occlusion.setter
def _occlusion(self, occs):
self._call_fmod("FMOD_ChannelGroup_Set3DOcclusion", c_float(occs[0]), c_float(occs[1]))
@property
def direct_occlusion(self):
return self._occlusion[0]
@direct_occlusion.setter
def direct_occlusion(self, occ):
self._occlusion = (occ, self._occlusion[1])
@property
def reverb_occlusion(self):
return self._occlusion[1]
@reverb_occlusion.setter
def reverb_occlusion(self, occ):
self._occlusion = (self._occlusion[0], occ)
def get_channel(self, idx):
c_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetChannel", idx, byref(c_ptr))
return channel.Channel(c_ptr)
@property
def dsp_head(self):
dsp_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetDSPHead", byref(dsp_ptr))
return get_class("DSP")(dsp_ptr)
def get_group(self, idx):
grp_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetGroup", idx)
return ChannelGroup(grp_ptr)
@property
def mute(self):
mute = c_bool()
self._call_fmod("FMOD_ChannelGroup_GetMute", byref(mute))
return mute.value
@mute.setter
def mute(self, m):
self._call_fmod("FMOD_Channel_SetMute", m)
@property
def name(self):
buf = create_string_buffer(512)
self._call_fmod("FMOD_ChannelGroup_GetName", buf, 512)
return buf.value
@property
def num_channels(self):
num = c_int()
self._call_fmod("FMOD_ChannelGroup_GetNumChannels", byref(num))
return num.value
@property
def num_groups(self):
num = c_int()
self._call_fmod("FMOD_ChannelGroup_GetNumGroups", byref(num))
return num.value
@property
def parent_group(self):
grp_ptr = c_void_p()
self._call_fmod("FMOD_ChannelGroup_GetParentGroup", byref(grp_ptr))
return ChannelGroup(grp_ptr)
@property
def paused(self):
paused = c_bool()
self._call_fmod("FMOD_ChannelGroup_GetPaused", byref(paused))
return paused.value
@paused.setter
def paused(self, p):
self._call_fmod("FMOD_ChannelGroup_SetPaused", p)
@property
def pitch(self):
pitch = c_float()
self._call_fmod("FMOD_ChannelGroup_GetPitch", byref(pitch))
return pitch.value
@property
def pitch(self, p):
self._call_fmod("FMOD_ChannelGroup_SetPitch", p)
def get_spectrum(self, numvalues, channeloffset, window):
arr = c_float * numvalues
arri = arr()
self._call_fmod("FMOD_ChannelGroup_GetSpectrum", byref(arri), numvalues, channeloffset, window)
return list(arri)
@property
def system_object(self):
sptr = c_void_p()
self._call_fmod("FMOD_channelGroup_GetSystemObject", byref(sptr))
return get_class("System")(sptr, False)
@property
def volume(self):
vol = c_float()
self._call_fmod("FMOD_ChannelGroup_GetVolume", byref(vol))
return vol.value
@volume.setter
def volume(self, vol):
self._call_fmod("FMOD_ChannelGroup_SetVolume", c_float(vol))
def get_wave_data(self, numvalues, channeloffset):
arr = c_float * numvalues
arri = arr()
self._call_fmod("FMOD_ChannelGroup_GetWaveData", byref(arri), numvalues, channeloffset)
return list(arri)
def override_3d_attributes(self, pos=0, vel=0):
self._call_fmod("FMOD_ChannelGroup_Override3DAttributes", pos, vel)
def override_frequency(self, freq):
self._call_fmod("FMOD_ChannelGroup_OverrideFrequency", c_float(freq))
def override_pan(self, pan):
self._call_fmod("FMOD_ChannelGroup_OverridePan", c_float(pan))
def override_reverb_properties(self, props):
check_type(props, REVERB_CHANNELPROPERTIES)
self._call_fmod("FMOD_ChannelGroup_OverrideReverbProperties", props)
def override_speaker_mix(self, frontleft, frontright, center, lfe, backleft, backright, sideleft, sideright):
self._call_fmod("FMOD_ChannelGroup_OverrideSpeakerMix", frontleft, frontright, center, lfe, backleft, backright,
sideleft, sideright)
def override_volume(self, vol):
self._call_fmod("FMOD_ChannelGroup_OverrideVolume", c_float(vol))
def release(self):
self._call_fmod("FMOD_ChannelGroup_Release")
def stop(self):
self._call_fmod("FMOD_ChannelGroup_Stop")
@property
def reverb_properties(self):
props = REVERB_CHANNELPROPERTIES()
ckresult(_dll.FMOD_ChannelGroup_GetReverbProperties(self._ptr, byref(props)))
return props
@reverb_properties.setter
def reverb_properties(self, props):
check_type(props, REVERB_CHANNELPROPERTIES)
ckresult(_dll.FMOD_ChannelGroup_SetReverbProperties(self._ptr, byref(props))) | none | 1 | 2.128802 | 2 |
|
program.py | siddhi117/ADB_Homework | 0 | 9083 | import sqlite3
from bottle import route, run,debug,template,request,redirect
@route('/todo')
def todo_list():
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
result = c.fetchall()
c.close()
output = template('make_table', rows=result)
return output
@route('/new', method='GET')
def new_item():
if request.GET.save:
new = request.GET.task.strip()
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("INSERT INTO todo (task,status) VALUES (?,?)", (new,1))
new_id = c.lastrowid
conn.commit()
c.close()
redirect('/todo')
#return '<p>The new task was inserted into the database, the ID is %s</p>' % new_id
else:
return template('new_task.tpl')
@route('/do_insert' , method='GET')
def get_id():
redirect('/new')
@route('/edit/<no:int>', method='GET')
def edit_item(no):
if request.GET.save:
edit = request.GET.task.strip()
status = request.GET.status.strip()
if status == 'open':
status = 1
else:
status = 0
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("UPDATE todo SET task = ?, status = ? WHERE id LIKE ?", (edit, status, no))
conn.commit()
return '<p>The item number %s was successfully updated</p>' % no
else:
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (str(no)))
cur_data = c.fetchone()
return template('edit_task', old=cur_data, no=no)
@route('/find_edit' , method='GET')
def get_id():
id_edit = request.GET.editdata.strip()
redirect('/edit/' + id_edit)
@route('/delete/<no:int>', method='GET')
def delete_item(no):
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("DELETE FROM todo WHERE id LIKE ?", (str(no)))
conn.commit()
redirect('/todo')
@route('/find_delete' , method='GET')
def get_id():
id_delete = request.GET.deletedata.strip()
redirect('/delete/' + id_delete)
debug(True)
run(reloader=True)
| import sqlite3
from bottle import route, run,debug,template,request,redirect
@route('/todo')
def todo_list():
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
result = c.fetchall()
c.close()
output = template('make_table', rows=result)
return output
@route('/new', method='GET')
def new_item():
if request.GET.save:
new = request.GET.task.strip()
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("INSERT INTO todo (task,status) VALUES (?,?)", (new,1))
new_id = c.lastrowid
conn.commit()
c.close()
redirect('/todo')
#return '<p>The new task was inserted into the database, the ID is %s</p>' % new_id
else:
return template('new_task.tpl')
@route('/do_insert' , method='GET')
def get_id():
redirect('/new')
@route('/edit/<no:int>', method='GET')
def edit_item(no):
if request.GET.save:
edit = request.GET.task.strip()
status = request.GET.status.strip()
if status == 'open':
status = 1
else:
status = 0
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("UPDATE todo SET task = ?, status = ? WHERE id LIKE ?", (edit, status, no))
conn.commit()
return '<p>The item number %s was successfully updated</p>' % no
else:
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (str(no)))
cur_data = c.fetchone()
return template('edit_task', old=cur_data, no=no)
@route('/find_edit' , method='GET')
def get_id():
id_edit = request.GET.editdata.strip()
redirect('/edit/' + id_edit)
@route('/delete/<no:int>', method='GET')
def delete_item(no):
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("DELETE FROM todo WHERE id LIKE ?", (str(no)))
conn.commit()
redirect('/todo')
@route('/find_delete' , method='GET')
def get_id():
id_delete = request.GET.deletedata.strip()
redirect('/delete/' + id_delete)
debug(True)
run(reloader=True)
| en | 0.936858 | #return '<p>The new task was inserted into the database, the ID is %s</p>' % new_id | 2.714779 | 3 |
pipeline/metadata/maxmind.py | censoredplanet/censoredplanet-analysis | 6 | 9084 | <gh_stars>1-10
"""Module to initialize Maxmind databases and lookup IP metadata."""
import logging
import os
from typing import Optional, Tuple, NamedTuple
import geoip2.database
from pipeline.metadata.mmdb_reader import mmdb_reader
MAXMIND_CITY = 'GeoLite2-City.mmdb'
MAXMIND_ASN = 'GeoLite2-ASN.mmdb'
# Tuple(netblock, asn, as_name, country)
# ex: ("1.0.0.1/24", 13335, "CLOUDFLARENET", "AU")
MaxmindReturnValues = NamedTuple('MaxmindReturnValues',
[('netblock', Optional[str]), ('asn', int),
('as_name', Optional[str]),
('country', Optional[str])])
class MaxmindIpMetadata():
"""Lookup database for Maxmind ASN and country metadata."""
def __init__(self, maxmind_folder: str) -> None:
"""Create a Maxmind Database.
Args:
maxmind_folder: a folder containing maxmind files.
Either a gcs filepath or a local system folder.
"""
maxmind_city_path = os.path.join(maxmind_folder, MAXMIND_CITY)
maxmind_asn_path = os.path.join(maxmind_folder, MAXMIND_ASN)
self.maxmind_city = mmdb_reader(maxmind_city_path)
self.maxmind_asn = mmdb_reader(maxmind_asn_path)
def lookup(self, ip: str) -> MaxmindReturnValues:
"""Lookup metadata infomation about an IP.
Args:
ip: string of the format 1.1.1.1 (ipv4 only)
Returns: MaxmindReturnValues
Raises:
KeyError: when the IP's ASN can't be found
"""
(asn, as_name, netblock) = self._get_maxmind_asn(ip)
country = self._get_country_code(ip)
if not asn:
raise KeyError(f"No Maxmind entry for {ip}")
return MaxmindReturnValues(netblock, asn, as_name, country)
def _get_country_code(self, vp_ip: str) -> Optional[str]:
"""Get country code for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
2-letter ISO country code
"""
try:
vp_info = self.maxmind_city.city(vp_ip)
return vp_info.country.iso_code
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None
def _get_maxmind_asn(
self, vp_ip: str) -> Tuple[Optional[int], Optional[str], Optional[str]]:
"""Get ASN information for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
Tuple containing AS num, AS org, and netblock
"""
try:
vp_info = self.maxmind_asn.asn(vp_ip)
asn = vp_info.autonomous_system_number
as_name = vp_info.autonomous_system_organization
if vp_info.network:
netblock: Optional[str] = vp_info.network.with_prefixlen
else:
netblock = None
return asn, as_name, netblock
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None, None, None
class FakeMaxmindIpMetadata(MaxmindIpMetadata):
"""A fake lookup table for testing MaxmindIpMetadata."""
# pylint: disable=super-init-not-called
def __init__(self) -> None:
pass
# pylint: disable=no-self-use
def lookup(self, _: str) -> MaxmindReturnValues:
return MaxmindReturnValues('192.168.127.12/16', 1221, 'ASN-TELSTRA', 'AU')
| """Module to initialize Maxmind databases and lookup IP metadata."""
import logging
import os
from typing import Optional, Tuple, NamedTuple
import geoip2.database
from pipeline.metadata.mmdb_reader import mmdb_reader
MAXMIND_CITY = 'GeoLite2-City.mmdb'
MAXMIND_ASN = 'GeoLite2-ASN.mmdb'
# Tuple(netblock, asn, as_name, country)
# ex: ("1.0.0.1/24", 13335, "CLOUDFLARENET", "AU")
MaxmindReturnValues = NamedTuple('MaxmindReturnValues',
[('netblock', Optional[str]), ('asn', int),
('as_name', Optional[str]),
('country', Optional[str])])
class MaxmindIpMetadata():
"""Lookup database for Maxmind ASN and country metadata."""
def __init__(self, maxmind_folder: str) -> None:
"""Create a Maxmind Database.
Args:
maxmind_folder: a folder containing maxmind files.
Either a gcs filepath or a local system folder.
"""
maxmind_city_path = os.path.join(maxmind_folder, MAXMIND_CITY)
maxmind_asn_path = os.path.join(maxmind_folder, MAXMIND_ASN)
self.maxmind_city = mmdb_reader(maxmind_city_path)
self.maxmind_asn = mmdb_reader(maxmind_asn_path)
def lookup(self, ip: str) -> MaxmindReturnValues:
"""Lookup metadata infomation about an IP.
Args:
ip: string of the format 1.1.1.1 (ipv4 only)
Returns: MaxmindReturnValues
Raises:
KeyError: when the IP's ASN can't be found
"""
(asn, as_name, netblock) = self._get_maxmind_asn(ip)
country = self._get_country_code(ip)
if not asn:
raise KeyError(f"No Maxmind entry for {ip}")
return MaxmindReturnValues(netblock, asn, as_name, country)
def _get_country_code(self, vp_ip: str) -> Optional[str]:
"""Get country code for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
2-letter ISO country code
"""
try:
vp_info = self.maxmind_city.city(vp_ip)
return vp_info.country.iso_code
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None
def _get_maxmind_asn(
self, vp_ip: str) -> Tuple[Optional[int], Optional[str], Optional[str]]:
"""Get ASN information for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
Tuple containing AS num, AS org, and netblock
"""
try:
vp_info = self.maxmind_asn.asn(vp_ip)
asn = vp_info.autonomous_system_number
as_name = vp_info.autonomous_system_organization
if vp_info.network:
netblock: Optional[str] = vp_info.network.with_prefixlen
else:
netblock = None
return asn, as_name, netblock
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None, None, None
class FakeMaxmindIpMetadata(MaxmindIpMetadata):
"""A fake lookup table for testing MaxmindIpMetadata."""
# pylint: disable=super-init-not-called
def __init__(self) -> None:
pass
# pylint: disable=no-self-use
def lookup(self, _: str) -> MaxmindReturnValues:
return MaxmindReturnValues('192.168.127.12/16', 1221, 'ASN-TELSTRA', 'AU') | en | 0.615686 | Module to initialize Maxmind databases and lookup IP metadata. # Tuple(netblock, asn, as_name, country) # ex: ("1.0.0.1/24", 13335, "CLOUDFLARENET", "AU") Lookup database for Maxmind ASN and country metadata. Create a Maxmind Database. Args: maxmind_folder: a folder containing maxmind files. Either a gcs filepath or a local system folder. Lookup metadata infomation about an IP. Args: ip: string of the format 1.1.1.1 (ipv4 only) Returns: MaxmindReturnValues Raises: KeyError: when the IP's ASN can't be found Get country code for IP address. Args: vp_ip: IP address of vantage point (as string) Returns: 2-letter ISO country code Get ASN information for IP address. Args: vp_ip: IP address of vantage point (as string) Returns: Tuple containing AS num, AS org, and netblock A fake lookup table for testing MaxmindIpMetadata. # pylint: disable=super-init-not-called # pylint: disable=no-self-use | 2.630947 | 3 |
examples/plot_graph.py | huyvo/gevent-websocket-py3.5 | 0 | 9085 | from __future__ import print_function
"""
This example generates random data and plots a graph in the browser.
Run it using Gevent directly using:
$ python plot_graph.py
Or with an Gunicorn wrapper:
$ gunicorn -k "geventwebsocket.gunicorn.workers.GeventWebSocketWorker" \
plot_graph:resource
"""
import gevent
import random
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
from geventwebsocket._compat import range_type
class PlotApplication(WebSocketApplication):
def on_open(self):
for i in range_type(10000):
self.ws.send("0 %s %s\n" % (i, random.random()))
gevent.sleep(0.1)
def on_close(self, reason):
print("Connection Closed!!!", reason)
def static_wsgi_app(environ, start_response):
start_response("200 OK", [("Content-Type", "text/html")])
return open("plot_graph.html").readlines()
resource = Resource([
('/', static_wsgi_app),
('/data', PlotApplication)
])
if __name__ == "__main__":
server = WebSocketServer(('', 8000), resource, debug=True)
server.serve_forever()
| from __future__ import print_function
"""
This example generates random data and plots a graph in the browser.
Run it using Gevent directly using:
$ python plot_graph.py
Or with an Gunicorn wrapper:
$ gunicorn -k "geventwebsocket.gunicorn.workers.GeventWebSocketWorker" \
plot_graph:resource
"""
import gevent
import random
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
from geventwebsocket._compat import range_type
class PlotApplication(WebSocketApplication):
def on_open(self):
for i in range_type(10000):
self.ws.send("0 %s %s\n" % (i, random.random()))
gevent.sleep(0.1)
def on_close(self, reason):
print("Connection Closed!!!", reason)
def static_wsgi_app(environ, start_response):
start_response("200 OK", [("Content-Type", "text/html")])
return open("plot_graph.html").readlines()
resource = Resource([
('/', static_wsgi_app),
('/data', PlotApplication)
])
if __name__ == "__main__":
server = WebSocketServer(('', 8000), resource, debug=True)
server.serve_forever()
| en | 0.378002 | This example generates random data and plots a graph in the browser. Run it using Gevent directly using: $ python plot_graph.py Or with an Gunicorn wrapper: $ gunicorn -k "geventwebsocket.gunicorn.workers.GeventWebSocketWorker" \ plot_graph:resource | 3.253254 | 3 |
nas_big_data/combo/best/combo_4gpu_8_agebo/predict.py | deephyper/NASBigData | 3 | 9086 | <gh_stars>1-10
import os
import numpy as np
import tensorflow as tf
from nas_big_data.combo.load_data import load_data_npz_gz
from deephyper.nas.run.util import create_dir
from deephyper.nas.train_utils import selectMetric
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in range(4)])
HERE = os.path.dirname(os.path.abspath(__file__))
fname = HERE.split("/")[-1]
output_dir = "logs"
create_dir(output_dir)
X_test, y_test = load_data_npz_gz(test=True)
dependencies = {
"r2":selectMetric("r2")
}
model = tf.keras.models.load_model(f"best_model_{fname}.h5", custom_objects=dependencies)
model.compile(
metrics=["mse", "mae", selectMetric("r2")]
)
score = model.evaluate(X_test, y_test)
score_names = ["loss", "mse", "mae", "r2"]
print("score:")
output = " ".join([f"{sn}:{sv:.3f}" for sn,sv in zip(score_names, score)])
print(output) | import os
import numpy as np
import tensorflow as tf
from nas_big_data.combo.load_data import load_data_npz_gz
from deephyper.nas.run.util import create_dir
from deephyper.nas.train_utils import selectMetric
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in range(4)])
HERE = os.path.dirname(os.path.abspath(__file__))
fname = HERE.split("/")[-1]
output_dir = "logs"
create_dir(output_dir)
X_test, y_test = load_data_npz_gz(test=True)
dependencies = {
"r2":selectMetric("r2")
}
model = tf.keras.models.load_model(f"best_model_{fname}.h5", custom_objects=dependencies)
model.compile(
metrics=["mse", "mae", selectMetric("r2")]
)
score = model.evaluate(X_test, y_test)
score_names = ["loss", "mse", "mae", "r2"]
print("score:")
output = " ".join([f"{sn}:{sv:.3f}" for sn,sv in zip(score_names, score)])
print(output) | none | 1 | 2.229583 | 2 |
|
ship/utils/utilfunctions.py | duncan-r/SHIP | 6 | 9087 | """
Summary:
Utility Functions that could be helpful in any part of the API.
All functions that are likely to be called across a number of classes
and Functions in the API should be grouped here for convenience.
Author:
<NAME>
Created:
01 Apr 2016
Copyright:
<NAME> 2016
TODO: This module, like a lot of other probably, needs reviewing for how
'Pythonic' t is. There are a lot of places where generators,
comprehensions, maps, etc should be used to speed things up and make
them a bit clearer.
More importantly there are a lot of places using '==' compare that
should be using 'in' etc. This could cause bugs and must be fixed
soon.
Updates:
"""
from __future__ import unicode_literals
import re
import os
import operator
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
# def resolveSeDecorator(se_vals, path):
# """Decorator function for replacing Scen/Evt placholders.
#
# Checks fro scenario and event placeholders in the return value of a
# function and replaces them with corresponding values if found.
#
# Args:
# se_vals(dict): standard scenario/event dictionary in the format:
# {'scenario': {
# """
# def seDecorator(func):
# def seWrapper(*args, **kwargs):
# result = func(*args, **kwargs)
#
# if '~' in result:
# # Check for scenarion stuff
# for key, val in self.se_vals['scenario'].items():
# temp = '~' + key + '~'
# if temp in result:
# result = result.replace(temp, val)
# # Check for event stuff
# for key, val in self.se_vals['event'].items():
# temp = '~' + key + '~'
# if temp in result:
# result = result.replace(temp, val)
# return result
# return seWrapper
# return seDecorator
def formatFloat(value, no_of_dps, ignore_empty_str=True):
"""Format a float as a string to given number of decimal places.
Args:
value(float): the value to format.
no_of_dps(int): number of decimal places to format to.
ignore_empty_str(True): return a stripped blank string if set to True.
Return:
str - the formatted float.
Raises:
ValueError - if value param is not type float.
"""
if ignore_empty_str and not isNumeric(value) and str(value).strip() == '':
return str(value).strip()
if not isNumeric(value):
raise ValueError
decimal_format = '%0.' + str(no_of_dps) + 'f'
value = decimal_format % float(value)
return value
def checkFileType(file_path, ext):
"""Checks a file to see that it has the right extension.
Args:
file_path (str): The file path to check.
ext (List): list containing the extension types to match the file
against.
Returns:
True if the extension matches the ext variable given or False if not.
"""
file_ext = os.path.splitext(file_path)[1]
logger.info('File ext = ' + file_ext)
for e in ext:
if e == file_ext:
return True
else:
return False
def isNumeric(s):
"""Tests if string is a number or not.
Simply tries to convert it and catches the error if launched.
Args:
s (str): string to test number compatibility.
Returns:
Bool - True if number. False if not.
"""
try:
float(s)
return True
except (ValueError, TypeError):
return False
def encodeStr(value):
try:
value = unicode(value, "utf-8")
return value
except (UnicodeDecodeError, NameError, TypeError):
return value
def isString(value):
"""Tests a given value to see if it is an instance of basestring or not.
Note:
This function should be used whenever testing this as it accounts for
both Python 2.7+ and 3.2+ variations of string.
Args:
value: the variable to test.
Returns:
Bool - True if value is a unicode str (basestring type)
"""
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
# if not isinstance(value, basestring):
# return False
#
# return True
def isList(value):
"""Test a given value to see if it is a list or not.
Args:
value: the variable to test for list type.
Returns:
True if value is of type list; False otherwise.
"""
if not isinstance(value, list):
return False
return True
def arrayToString(self, str_array):
"""Convert a list to a String
Creates one string by adding each part of the array to one string using
', '.join()
Args:
str_array (List): to convert into single string.
Returns:
str - representaion of the array joined together.
Raises:
ValueError: if not contents of list are instances of basestring.
"""
if not isinstance(str_array[0], basestring):
raise ValueError('Array values are not strings')
out_string = ''
out_string = ', '.join(str_array)
return out_string
def findSubstringInList(substr, the_list):
"""Returns a list containing the indices that a substring was found at.
Uses a generator to quickly find all indices that str appears in.
Args:
substr (str): the sub string to search for.
the_list (List): a list containing the strings to search.
Returns:
tuple - containing:
* a list with the indices that the substring was found in
(this list can be empty if no matches were found).
* an integer containing the number of elements it was found in.
"""
indices = [i for i, s in enumerate(the_list) if substr in s]
return indices, len(indices)
def findMax(val1, val2):
"""Returns tuple containing min, max of two values
Args:
val1: first integer or float.
val2: second integer or float.
Returns:
tuple - containing:
* lower value
* higher value
* False if not same or True if the same.
"""
if val1 == val2:
return val1, val2, True
elif val1 > val2:
return val2, val1, False
else:
return val1, val2, False
def fileExtensionWithoutPeriod(filepath, name_only=False):
"""Extracts the extension without '.' from filepath.
The extension will always be converted to lower case before returning.
Args:
filepath (str): A full filepath if name_only=False. Otherwise a file
name with extension if name_only=True.
name_only (bool): True if filepath is only filename.extension.
"""
if name_only:
file, ext = os.path.splitext(filepath)
else:
path, filename = os.path.split(filepath)
file, ext = os.path.splitext(filename)
ext = ext[1:]
return ext.lower()
def findWholeWord(w):
"""Find a whole word amoungst a string."""
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
def convertRunOptionsToSEDict(options):
"""Converts tuflow command line options to scenario/event dict.
Tuflow uses command line option (e.g. -s1 blah -e1 blah) to set scenario
values which can either be provided on the command line or through the
FMP run form. The TuflowLoader can use these arguments but requires a
slightly different setup.
This function converts the command line string into the scenarion and
event dictionary expected by the TuflowLoader.
Args:
options(str): command line options.
Return:
dict - {'scenario': {'s1': blah}, 'event': {'e1': blah}}
Raises:
AttributeError: if both -s and -s1 or -e and -e1 occurr in the options
string. -x and -x1 are treated as the same variable by tuflow and
one of the values would be ignored.
"""
if ' -s ' in options and ' -s1 ' in options:
raise AttributeError
if ' -e ' in options and ' -e2 ' in options:
raise AttributeError
outvals = {'scenario': {}, 'event': {}}
vals = options.split(" ")
for i in range(len(vals)):
if vals[i].startswith('-s'):
outvals['scenario'][vals[i][1:]] = vals[i + 1]
elif vals[i].startswith('-e'):
outvals['event'][vals[i][1:]] = vals[i + 1]
return outvals
def getSEResolvedFilename(filename, se_vals):
"""Replace a tuflow placeholder filename with the scenario/event values.
Replaces all of the placholder values (e.g. ~s1~_~e1~) in a tuflow
filename with the corresponding values provided in the run options string.
If the run options flags are not found in the filename their values will
be appended to the end of the string.
The setup of the returned filename is always the same:
- First replace all placeholders with corresponding flag values.
- s1 == s and e1 == e.
- Append additional e values to end with '_' before first and '+' before others.
- Append additional s values to end with '_' before first and '+' before others.
Args:
filename(str): the filename to update.
se_vals(str): the run options string containing the 's' and
'e' flags and their corresponding values.
Return:
str - the updated filename.
"""
if not 'scenario' in se_vals.keys():
se_vals['scenario'] = {}
if not 'event' in se_vals.keys():
se_vals['event'] = {}
# Format the key value pairs into a list and combine the scenario and
# event list together and sort them into e, e1, e2, s, s1, s2 order.
scen_keys = ['-' + a for a in se_vals['scenario'].keys()]
scen_vals = se_vals['scenario'].values()
event_keys = ['-' + a for a in se_vals['event'].keys()]
event_vals = se_vals['event'].values()
scen = [list(a) for a in zip(scen_keys, scen_vals)]
event = [list(a) for a in zip(event_keys, event_vals)]
se_vals = scen + event
vals = sorted(se_vals, key=operator.itemgetter(0))
# Build a new filename by replacing or adding the flag values
outname = filename
in_e = False
for v in vals:
placeholder = ''.join(['~', v[0][1:], '~'])
if placeholder in filename:
outname = outname.replace(placeholder, v[1])
elif v[0] == '-e1' and '~e~' in filename and not '-e' in se_vals:
outname = outname.replace('~e~', v[1])
elif v[0] == '-s1' and '~s~' in filename and not '-s' in se_vals:
outname = outname.replace('~s~', v[1])
# DEBUG - CHECK THIS IS TRUE!
elif v[0] == '-e' and '~e1~' in filename:
outname = outname.replace('~e1~', v[1])
elif v[0] == '-s' and '~s1~' in filename:
outname = outname.replace('~s1~', v[1])
else:
if v[0].startswith('-e'):
if not in_e:
prefix = '_'
else:
prefix = '+'
in_e = True
elif v[0].startswith('-s'):
if in_e:
prefix = '_'
else:
prefix = '+'
in_e = False
outname += prefix + v[1]
return outname
def enum(*sequential, **named):
"""Creates a new enum using the values handed to it.
Taken from <NAME> on StackOverflow:
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
Examples:
Can be created and accessed using:
>>> Numbers = enum('ZERO', 'ONE', 'TWO')
>>> Numbers.ZERO
0
>>> Numbers.ONE
1
Or reverse the process o get the name from the value:
>>> Numbers.reverse_mapping['three']
'THREE'
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type(str('Enum'), (), enums)
class FileQueue(object):
"""Queueing class for storing data to go into the database
"""
def __init__(self):
self.items = []
def isEmpty(self):
"""Returns True if list is empty
"""
return self.items == []
def enqueue(self, item):
"""Add an item to the queue
"""
self.items.insert(0, item)
def dequeue(self):
"""Pop an item from the front of the queue.
"""
return self.items.pop()
def size(self):
"""Get the size of the queue
"""
return len(self.items)
class LoadStack(object):
"""Stack class for loading logic."""
def __init__(self, max_size=-1):
self.items = []
self.max_size = max_size
def isEmpty(self):
"""Return True if stack is empty."""
return self.items == []
def add(self, item):
"""Add an item to the stack.
Args:
item: the item to add to the stack.
Raises:
IndexError: if max_size has been set and adding another item would
make the stack bigger than max size.
"""
if not self.max_size == -1:
if len(self.items) + 1 > self.max_size:
raise IndexError
self.items.append(item)
def pop(self):
"""Get an item From the stack.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty.
"""
if len(self.items) == 0:
raise IndexError
return self.items.pop()
def peek(self):
"""See what the next item on the stack is, but don't remove it.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty.
"""
if len(self.items) == 0:
raise IndexError
return self.items[-1]
def size(self):
"""Return the number of items in the stack."""
return len(self.items)
| """
Summary:
Utility Functions that could be helpful in any part of the API.
All functions that are likely to be called across a number of classes
and Functions in the API should be grouped here for convenience.
Author:
<NAME>
Created:
01 Apr 2016
Copyright:
<NAME> 2016
TODO: This module, like a lot of other probably, needs reviewing for how
'Pythonic' t is. There are a lot of places where generators,
comprehensions, maps, etc should be used to speed things up and make
them a bit clearer.
More importantly there are a lot of places using '==' compare that
should be using 'in' etc. This could cause bugs and must be fixed
soon.
Updates:
"""
from __future__ import unicode_literals
import re
import os
import operator
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
# def resolveSeDecorator(se_vals, path):
# """Decorator function for replacing Scen/Evt placholders.
#
# Checks fro scenario and event placeholders in the return value of a
# function and replaces them with corresponding values if found.
#
# Args:
# se_vals(dict): standard scenario/event dictionary in the format:
# {'scenario': {
# """
# def seDecorator(func):
# def seWrapper(*args, **kwargs):
# result = func(*args, **kwargs)
#
# if '~' in result:
# # Check for scenarion stuff
# for key, val in self.se_vals['scenario'].items():
# temp = '~' + key + '~'
# if temp in result:
# result = result.replace(temp, val)
# # Check for event stuff
# for key, val in self.se_vals['event'].items():
# temp = '~' + key + '~'
# if temp in result:
# result = result.replace(temp, val)
# return result
# return seWrapper
# return seDecorator
def formatFloat(value, no_of_dps, ignore_empty_str=True):
"""Format a float as a string to given number of decimal places.
Args:
value(float): the value to format.
no_of_dps(int): number of decimal places to format to.
ignore_empty_str(True): return a stripped blank string if set to True.
Return:
str - the formatted float.
Raises:
ValueError - if value param is not type float.
"""
if ignore_empty_str and not isNumeric(value) and str(value).strip() == '':
return str(value).strip()
if not isNumeric(value):
raise ValueError
decimal_format = '%0.' + str(no_of_dps) + 'f'
value = decimal_format % float(value)
return value
def checkFileType(file_path, ext):
"""Checks a file to see that it has the right extension.
Args:
file_path (str): The file path to check.
ext (List): list containing the extension types to match the file
against.
Returns:
True if the extension matches the ext variable given or False if not.
"""
file_ext = os.path.splitext(file_path)[1]
logger.info('File ext = ' + file_ext)
for e in ext:
if e == file_ext:
return True
else:
return False
def isNumeric(s):
"""Tests if string is a number or not.
Simply tries to convert it and catches the error if launched.
Args:
s (str): string to test number compatibility.
Returns:
Bool - True if number. False if not.
"""
try:
float(s)
return True
except (ValueError, TypeError):
return False
def encodeStr(value):
try:
value = unicode(value, "utf-8")
return value
except (UnicodeDecodeError, NameError, TypeError):
return value
def isString(value):
"""Tests a given value to see if it is an instance of basestring or not.
Note:
This function should be used whenever testing this as it accounts for
both Python 2.7+ and 3.2+ variations of string.
Args:
value: the variable to test.
Returns:
Bool - True if value is a unicode str (basestring type)
"""
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
# if not isinstance(value, basestring):
# return False
#
# return True
def isList(value):
"""Test a given value to see if it is a list or not.
Args:
value: the variable to test for list type.
Returns:
True if value is of type list; False otherwise.
"""
if not isinstance(value, list):
return False
return True
def arrayToString(self, str_array):
"""Convert a list to a String
Creates one string by adding each part of the array to one string using
', '.join()
Args:
str_array (List): to convert into single string.
Returns:
str - representaion of the array joined together.
Raises:
ValueError: if not contents of list are instances of basestring.
"""
if not isinstance(str_array[0], basestring):
raise ValueError('Array values are not strings')
out_string = ''
out_string = ', '.join(str_array)
return out_string
def findSubstringInList(substr, the_list):
"""Returns a list containing the indices that a substring was found at.
Uses a generator to quickly find all indices that str appears in.
Args:
substr (str): the sub string to search for.
the_list (List): a list containing the strings to search.
Returns:
tuple - containing:
* a list with the indices that the substring was found in
(this list can be empty if no matches were found).
* an integer containing the number of elements it was found in.
"""
indices = [i for i, s in enumerate(the_list) if substr in s]
return indices, len(indices)
def findMax(val1, val2):
"""Returns tuple containing min, max of two values
Args:
val1: first integer or float.
val2: second integer or float.
Returns:
tuple - containing:
* lower value
* higher value
* False if not same or True if the same.
"""
if val1 == val2:
return val1, val2, True
elif val1 > val2:
return val2, val1, False
else:
return val1, val2, False
def fileExtensionWithoutPeriod(filepath, name_only=False):
"""Extracts the extension without '.' from filepath.
The extension will always be converted to lower case before returning.
Args:
filepath (str): A full filepath if name_only=False. Otherwise a file
name with extension if name_only=True.
name_only (bool): True if filepath is only filename.extension.
"""
if name_only:
file, ext = os.path.splitext(filepath)
else:
path, filename = os.path.split(filepath)
file, ext = os.path.splitext(filename)
ext = ext[1:]
return ext.lower()
def findWholeWord(w):
"""Find a whole word amoungst a string."""
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
def convertRunOptionsToSEDict(options):
"""Converts tuflow command line options to scenario/event dict.
Tuflow uses command line option (e.g. -s1 blah -e1 blah) to set scenario
values which can either be provided on the command line or through the
FMP run form. The TuflowLoader can use these arguments but requires a
slightly different setup.
This function converts the command line string into the scenarion and
event dictionary expected by the TuflowLoader.
Args:
options(str): command line options.
Return:
dict - {'scenario': {'s1': blah}, 'event': {'e1': blah}}
Raises:
AttributeError: if both -s and -s1 or -e and -e1 occurr in the options
string. -x and -x1 are treated as the same variable by tuflow and
one of the values would be ignored.
"""
if ' -s ' in options and ' -s1 ' in options:
raise AttributeError
if ' -e ' in options and ' -e2 ' in options:
raise AttributeError
outvals = {'scenario': {}, 'event': {}}
vals = options.split(" ")
for i in range(len(vals)):
if vals[i].startswith('-s'):
outvals['scenario'][vals[i][1:]] = vals[i + 1]
elif vals[i].startswith('-e'):
outvals['event'][vals[i][1:]] = vals[i + 1]
return outvals
def getSEResolvedFilename(filename, se_vals):
"""Replace a tuflow placeholder filename with the scenario/event values.
Replaces all of the placholder values (e.g. ~s1~_~e1~) in a tuflow
filename with the corresponding values provided in the run options string.
If the run options flags are not found in the filename their values will
be appended to the end of the string.
The setup of the returned filename is always the same:
- First replace all placeholders with corresponding flag values.
- s1 == s and e1 == e.
- Append additional e values to end with '_' before first and '+' before others.
- Append additional s values to end with '_' before first and '+' before others.
Args:
filename(str): the filename to update.
se_vals(str): the run options string containing the 's' and
'e' flags and their corresponding values.
Return:
str - the updated filename.
"""
if not 'scenario' in se_vals.keys():
se_vals['scenario'] = {}
if not 'event' in se_vals.keys():
se_vals['event'] = {}
# Format the key value pairs into a list and combine the scenario and
# event list together and sort them into e, e1, e2, s, s1, s2 order.
scen_keys = ['-' + a for a in se_vals['scenario'].keys()]
scen_vals = se_vals['scenario'].values()
event_keys = ['-' + a for a in se_vals['event'].keys()]
event_vals = se_vals['event'].values()
scen = [list(a) for a in zip(scen_keys, scen_vals)]
event = [list(a) for a in zip(event_keys, event_vals)]
se_vals = scen + event
vals = sorted(se_vals, key=operator.itemgetter(0))
# Build a new filename by replacing or adding the flag values
outname = filename
in_e = False
for v in vals:
placeholder = ''.join(['~', v[0][1:], '~'])
if placeholder in filename:
outname = outname.replace(placeholder, v[1])
elif v[0] == '-e1' and '~e~' in filename and not '-e' in se_vals:
outname = outname.replace('~e~', v[1])
elif v[0] == '-s1' and '~s~' in filename and not '-s' in se_vals:
outname = outname.replace('~s~', v[1])
# DEBUG - CHECK THIS IS TRUE!
elif v[0] == '-e' and '~e1~' in filename:
outname = outname.replace('~e1~', v[1])
elif v[0] == '-s' and '~s1~' in filename:
outname = outname.replace('~s1~', v[1])
else:
if v[0].startswith('-e'):
if not in_e:
prefix = '_'
else:
prefix = '+'
in_e = True
elif v[0].startswith('-s'):
if in_e:
prefix = '_'
else:
prefix = '+'
in_e = False
outname += prefix + v[1]
return outname
def enum(*sequential, **named):
"""Creates a new enum using the values handed to it.
Taken from <NAME> on StackOverflow:
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
Examples:
Can be created and accessed using:
>>> Numbers = enum('ZERO', 'ONE', 'TWO')
>>> Numbers.ZERO
0
>>> Numbers.ONE
1
Or reverse the process o get the name from the value:
>>> Numbers.reverse_mapping['three']
'THREE'
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type(str('Enum'), (), enums)
class FileQueue(object):
"""Queueing class for storing data to go into the database
"""
def __init__(self):
self.items = []
def isEmpty(self):
"""Returns True if list is empty
"""
return self.items == []
def enqueue(self, item):
"""Add an item to the queue
"""
self.items.insert(0, item)
def dequeue(self):
"""Pop an item from the front of the queue.
"""
return self.items.pop()
def size(self):
"""Get the size of the queue
"""
return len(self.items)
class LoadStack(object):
"""Stack class for loading logic."""
def __init__(self, max_size=-1):
self.items = []
self.max_size = max_size
def isEmpty(self):
"""Return True if stack is empty."""
return self.items == []
def add(self, item):
"""Add an item to the stack.
Args:
item: the item to add to the stack.
Raises:
IndexError: if max_size has been set and adding another item would
make the stack bigger than max size.
"""
if not self.max_size == -1:
if len(self.items) + 1 > self.max_size:
raise IndexError
self.items.append(item)
def pop(self):
"""Get an item From the stack.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty.
"""
if len(self.items) == 0:
raise IndexError
return self.items.pop()
def peek(self):
"""See what the next item on the stack is, but don't remove it.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty.
"""
if len(self.items) == 0:
raise IndexError
return self.items[-1]
def size(self):
"""Return the number of items in the stack."""
return len(self.items)
| en | 0.725658 | Summary:
Utility Functions that could be helpful in any part of the API.
All functions that are likely to be called across a number of classes
and Functions in the API should be grouped here for convenience.
Author:
<NAME>
Created:
01 Apr 2016
Copyright:
<NAME> 2016
TODO: This module, like a lot of other probably, needs reviewing for how
'Pythonic' t is. There are a lot of places where generators,
comprehensions, maps, etc should be used to speed things up and make
them a bit clearer.
More importantly there are a lot of places using '==' compare that
should be using 'in' etc. This could cause bugs and must be fixed
soon.
Updates: logging references with a __name__ set to this module. # def resolveSeDecorator(se_vals, path): # """Decorator function for replacing Scen/Evt placholders. # # Checks fro scenario and event placeholders in the return value of a # function and replaces them with corresponding values if found. # # Args: # se_vals(dict): standard scenario/event dictionary in the format: # {'scenario': { # """ # def seDecorator(func): # def seWrapper(*args, **kwargs): # result = func(*args, **kwargs) # # if '~' in result: # # Check for scenarion stuff # for key, val in self.se_vals['scenario'].items(): # temp = '~' + key + '~' # if temp in result: # result = result.replace(temp, val) # # Check for event stuff # for key, val in self.se_vals['event'].items(): # temp = '~' + key + '~' # if temp in result: # result = result.replace(temp, val) # return result # return seWrapper # return seDecorator Format a float as a string to given number of decimal places.
Args:
value(float): the value to format.
no_of_dps(int): number of decimal places to format to.
ignore_empty_str(True): return a stripped blank string if set to True.
Return:
str - the formatted float.
Raises:
ValueError - if value param is not type float. Checks a file to see that it has the right extension.
Args:
file_path (str): The file path to check.
ext (List): list containing the extension types to match the file
against.
Returns:
True if the extension matches the ext variable given or False if not. Tests if string is a number or not.
Simply tries to convert it and catches the error if launched.
Args:
s (str): string to test number compatibility.
Returns:
Bool - True if number. False if not. Tests a given value to see if it is an instance of basestring or not.
Note:
This function should be used whenever testing this as it accounts for
both Python 2.7+ and 3.2+ variations of string.
Args:
value: the variable to test.
Returns:
Bool - True if value is a unicode str (basestring type) # if not isinstance(value, basestring): # return False # # return True Test a given value to see if it is a list or not.
Args:
value: the variable to test for list type.
Returns:
True if value is of type list; False otherwise. Convert a list to a String
Creates one string by adding each part of the array to one string using
', '.join()
Args:
str_array (List): to convert into single string.
Returns:
str - representaion of the array joined together.
Raises:
ValueError: if not contents of list are instances of basestring. Returns a list containing the indices that a substring was found at.
Uses a generator to quickly find all indices that str appears in.
Args:
substr (str): the sub string to search for.
the_list (List): a list containing the strings to search.
Returns:
tuple - containing:
* a list with the indices that the substring was found in
(this list can be empty if no matches were found).
* an integer containing the number of elements it was found in. Returns tuple containing min, max of two values
Args:
val1: first integer or float.
val2: second integer or float.
Returns:
tuple - containing:
* lower value
* higher value
* False if not same or True if the same. Extracts the extension without '.' from filepath.
The extension will always be converted to lower case before returning.
Args:
filepath (str): A full filepath if name_only=False. Otherwise a file
name with extension if name_only=True.
name_only (bool): True if filepath is only filename.extension. Find a whole word amoungst a string. Converts tuflow command line options to scenario/event dict.
Tuflow uses command line option (e.g. -s1 blah -e1 blah) to set scenario
values which can either be provided on the command line or through the
FMP run form. The TuflowLoader can use these arguments but requires a
slightly different setup.
This function converts the command line string into the scenarion and
event dictionary expected by the TuflowLoader.
Args:
options(str): command line options.
Return:
dict - {'scenario': {'s1': blah}, 'event': {'e1': blah}}
Raises:
AttributeError: if both -s and -s1 or -e and -e1 occurr in the options
string. -x and -x1 are treated as the same variable by tuflow and
one of the values would be ignored. Replace a tuflow placeholder filename with the scenario/event values.
Replaces all of the placholder values (e.g. ~s1~_~e1~) in a tuflow
filename with the corresponding values provided in the run options string.
If the run options flags are not found in the filename their values will
be appended to the end of the string.
The setup of the returned filename is always the same:
- First replace all placeholders with corresponding flag values.
- s1 == s and e1 == e.
- Append additional e values to end with '_' before first and '+' before others.
- Append additional s values to end with '_' before first and '+' before others.
Args:
filename(str): the filename to update.
se_vals(str): the run options string containing the 's' and
'e' flags and their corresponding values.
Return:
str - the updated filename. # Format the key value pairs into a list and combine the scenario and # event list together and sort them into e, e1, e2, s, s1, s2 order. # Build a new filename by replacing or adding the flag values # DEBUG - CHECK THIS IS TRUE! Creates a new enum using the values handed to it.
Taken from <NAME> on StackOverflow:
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
Examples:
Can be created and accessed using:
>>> Numbers = enum('ZERO', 'ONE', 'TWO')
>>> Numbers.ZERO
0
>>> Numbers.ONE
1
Or reverse the process o get the name from the value:
>>> Numbers.reverse_mapping['three']
'THREE' Queueing class for storing data to go into the database Returns True if list is empty Add an item to the queue Pop an item from the front of the queue. Get the size of the queue Stack class for loading logic. Return True if stack is empty. Add an item to the stack.
Args:
item: the item to add to the stack.
Raises:
IndexError: if max_size has been set and adding another item would
make the stack bigger than max size. Get an item From the stack.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty. See what the next item on the stack is, but don't remove it.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty. Return the number of items in the stack. | 2.971061 | 3 |
src/ansible_navigator/ui_framework/content_defs.py | goneri/ansible-navigator | 0 | 9088 | <filename>src/ansible_navigator/ui_framework/content_defs.py<gh_stars>0
"""Definitions of UI content objects."""
from dataclasses import asdict
from dataclasses import dataclass
from enum import Enum
from typing import Dict
from typing import Generic
from typing import TypeVar
from ..utils.compatibility import TypeAlias
from ..utils.serialize import SerializationFormat
class ContentView(Enum):
"""The content view."""
FULL = "full"
NORMAL = "normal"
T = TypeVar("T") # pylint:disable=invalid-name # https://github.com/PyCQA/pylint/pull/5221
DictType: TypeAlias = Dict[str, T]
@dataclass
class ContentBase(Generic[T]):
r"""The base class for all content dataclasses presented in the UI.
It should be noted, that while the return type is defined as ``T``
for the serialization functions below, mypy will not catch in incorrect
definition of ``T`` at this time. This is because of how ``asdict()``
is typed:
@overload
def asdict(obj: Any) -> dict[str, Any]: ...
@overload
def asdict(obj: Any, \*, dict_factory: Callable[[list[tuple[str, Any]]], _T]) -> _T: ...
Which result in mypy believing the outcome of asdict is dict[str, Any] and letting it silently
pass through an incorrect ``T``. ``Mypy`` identifies this as a known issue:
https://mypy.readthedocs.io/en/stable/additional_features.html#caveats-known-issues
"""
def asdict(
self,
content_view: ContentView,
serialization_format: SerializationFormat,
) -> DictType:
"""Convert thy self into a dictionary.
:param content_view: The content view
:param serialization_format: The serialization format
:returns: A dictionary created from self
"""
converter_map = {
(ContentView.FULL, SerializationFormat.JSON): self.serialize_json_full,
(ContentView.FULL, SerializationFormat.YAML): self.serialize_yaml_full,
(ContentView.NORMAL, SerializationFormat.JSON): self.serialize_json_normal,
(ContentView.NORMAL, SerializationFormat.YAML): self.serialize_yaml_normal,
}
try:
dump_self_as_dict = converter_map[content_view, serialization_format]
except KeyError:
return asdict(self)
else:
return dump_self_as_dict()
def serialize_json_full(self) -> DictType:
"""Provide dictionary for ``JSON`` with all attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def serialize_json_normal(self) -> DictType:
"""Provide dictionary for ``JSON`` with curated attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def serialize_yaml_full(self) -> DictType:
"""Provide dictionary for ``YAML`` with all attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def serialize_yaml_normal(self) -> DictType:
"""Provide dictionary for ``JSON`` with curated attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def get(self, attribute: str):
"""Allow this dataclass to be treated like a dictionary.
This is a work around until the UI fully supports dataclasses
at which time this can be removed.
Default is intentionally not implemented as a safeguard to enure
this is not more work than necessary to remove in the future
and will only return attributes in existence.
:param attribute: The attribute to get
:returns: The gotten attribute
"""
return getattr(self, attribute)
| <filename>src/ansible_navigator/ui_framework/content_defs.py<gh_stars>0
"""Definitions of UI content objects."""
from dataclasses import asdict
from dataclasses import dataclass
from enum import Enum
from typing import Dict
from typing import Generic
from typing import TypeVar
from ..utils.compatibility import TypeAlias
from ..utils.serialize import SerializationFormat
class ContentView(Enum):
"""The content view."""
FULL = "full"
NORMAL = "normal"
T = TypeVar("T") # pylint:disable=invalid-name # https://github.com/PyCQA/pylint/pull/5221
DictType: TypeAlias = Dict[str, T]
@dataclass
class ContentBase(Generic[T]):
r"""The base class for all content dataclasses presented in the UI.
It should be noted, that while the return type is defined as ``T``
for the serialization functions below, mypy will not catch in incorrect
definition of ``T`` at this time. This is because of how ``asdict()``
is typed:
@overload
def asdict(obj: Any) -> dict[str, Any]: ...
@overload
def asdict(obj: Any, \*, dict_factory: Callable[[list[tuple[str, Any]]], _T]) -> _T: ...
Which result in mypy believing the outcome of asdict is dict[str, Any] and letting it silently
pass through an incorrect ``T``. ``Mypy`` identifies this as a known issue:
https://mypy.readthedocs.io/en/stable/additional_features.html#caveats-known-issues
"""
def asdict(
self,
content_view: ContentView,
serialization_format: SerializationFormat,
) -> DictType:
"""Convert thy self into a dictionary.
:param content_view: The content view
:param serialization_format: The serialization format
:returns: A dictionary created from self
"""
converter_map = {
(ContentView.FULL, SerializationFormat.JSON): self.serialize_json_full,
(ContentView.FULL, SerializationFormat.YAML): self.serialize_yaml_full,
(ContentView.NORMAL, SerializationFormat.JSON): self.serialize_json_normal,
(ContentView.NORMAL, SerializationFormat.YAML): self.serialize_yaml_normal,
}
try:
dump_self_as_dict = converter_map[content_view, serialization_format]
except KeyError:
return asdict(self)
else:
return dump_self_as_dict()
def serialize_json_full(self) -> DictType:
"""Provide dictionary for ``JSON`` with all attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def serialize_json_normal(self) -> DictType:
"""Provide dictionary for ``JSON`` with curated attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def serialize_yaml_full(self) -> DictType:
"""Provide dictionary for ``YAML`` with all attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def serialize_yaml_normal(self) -> DictType:
"""Provide dictionary for ``JSON`` with curated attributes.
:returns: A dictionary created from self
"""
return asdict(self)
def get(self, attribute: str):
"""Allow this dataclass to be treated like a dictionary.
This is a work around until the UI fully supports dataclasses
at which time this can be removed.
Default is intentionally not implemented as a safeguard to enure
this is not more work than necessary to remove in the future
and will only return attributes in existence.
:param attribute: The attribute to get
:returns: The gotten attribute
"""
return getattr(self, attribute)
| en | 0.824268 | Definitions of UI content objects. The content view. # pylint:disable=invalid-name # https://github.com/PyCQA/pylint/pull/5221 The base class for all content dataclasses presented in the UI. It should be noted, that while the return type is defined as ``T`` for the serialization functions below, mypy will not catch in incorrect definition of ``T`` at this time. This is because of how ``asdict()`` is typed: @overload def asdict(obj: Any) -> dict[str, Any]: ... @overload def asdict(obj: Any, \*, dict_factory: Callable[[list[tuple[str, Any]]], _T]) -> _T: ... Which result in mypy believing the outcome of asdict is dict[str, Any] and letting it silently pass through an incorrect ``T``. ``Mypy`` identifies this as a known issue: https://mypy.readthedocs.io/en/stable/additional_features.html#caveats-known-issues Convert thy self into a dictionary. :param content_view: The content view :param serialization_format: The serialization format :returns: A dictionary created from self Provide dictionary for ``JSON`` with all attributes. :returns: A dictionary created from self Provide dictionary for ``JSON`` with curated attributes. :returns: A dictionary created from self Provide dictionary for ``YAML`` with all attributes. :returns: A dictionary created from self Provide dictionary for ``JSON`` with curated attributes. :returns: A dictionary created from self Allow this dataclass to be treated like a dictionary. This is a work around until the UI fully supports dataclasses at which time this can be removed. Default is intentionally not implemented as a safeguard to enure this is not more work than necessary to remove in the future and will only return attributes in existence. :param attribute: The attribute to get :returns: The gotten attribute | 2.283492 | 2 |
FWCore/MessageService/test/u28_cerr_cfg.py | SWuchterl/cmssw | 6 | 9089 | # u28_cerr_cfg.py:
#
# Non-regression test configuration file for MessageLogger service:
# distinct threshold level for linked destination, where
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.MessageService.test.Services_cff")
process.MessageLogger = cms.Service("MessageLogger",
categories = cms.untracked.vstring('preEventProcessing'),
destinations = cms.untracked.vstring('cerr'),
statistics = cms.untracked.vstring('cerr_stats'),
cerr_stats = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
output = cms.untracked.string('cerr')
),
u28_output = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
noTimeStamps = cms.untracked.bool(True),
preEventProcessing = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.source = cms.Source("EmptySource")
process.sendSomeMessages = cms.EDAnalyzer("UnitTestClient_A")
process.p = cms.Path(process.sendSomeMessages)
| # u28_cerr_cfg.py:
#
# Non-regression test configuration file for MessageLogger service:
# distinct threshold level for linked destination, where
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.MessageService.test.Services_cff")
process.MessageLogger = cms.Service("MessageLogger",
categories = cms.untracked.vstring('preEventProcessing'),
destinations = cms.untracked.vstring('cerr'),
statistics = cms.untracked.vstring('cerr_stats'),
cerr_stats = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
output = cms.untracked.string('cerr')
),
u28_output = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
noTimeStamps = cms.untracked.bool(True),
preEventProcessing = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.source = cms.Source("EmptySource")
process.sendSomeMessages = cms.EDAnalyzer("UnitTestClient_A")
process.p = cms.Path(process.sendSomeMessages)
| en | 0.646996 | # u28_cerr_cfg.py: # # Non-regression test configuration file for MessageLogger service: # distinct threshold level for linked destination, where # | 1.595888 | 2 |
content/browse/utils.py | Revibe-Music/core-services | 2 | 9090 | <filename>content/browse/utils.py
"""
Created:04 Mar. 2020
Author: <NAME>
"""
from revibe._helpers import const
from administration.utils import retrieve_variable
from content.models import Song, Album, Artist
from content.serializers import v1 as cnt_ser_v1
# -----------------------------------------------------------------------------
# _DEFAULT_LIMIT = 50
# limit_variable = retrieve_variable()
# try:
# limit_variable = int(limit_variable)
# _DEFAULT_LIMIT = max(min(limit_variable, 100), 10)
# except ValueError as ve:
# print("Could not read browse section default limit variable")
# print(ve)
def _DEFAULT_LIMIT():
limit_variable = retrieve_variable("browse_section_default_limit", 50)
try:
limit_variable = int(limit_variable)
return max(min(limit_variable, 100), 10)
except ValueError as ve:
print("Could not read browse section default limit variable")
print(ve)
return 50
_name = "name"
_type = "type"
_results = "results"
_endpoint = "endpoint"
def _browse_song(annotation, limit=None, platform=const.REVIBE_STRING, **options):
limit = limit if limit else _DEFAULT_LIMIT()
songs = Song.display_objects.filter(platform=platform).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.SongSerializer(songs, many=True).data
return options
def _browse_album(annotation, limit=None, **options):
limit = limit if limit else _DEFAULT_LIMIT()
albums = Album.display_objects.filter(platform=const.REVIBE_STRING).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.AlbumSerializer(albums, many=True).data
return options
def _browse_artist(annotation, limit=None, **options):
limit = limit if limit else _DEFAULT_LIMIT()
artists = Artist.display_objects.filter(platform=const.REVIBE_STRING).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.ArtistSerializer(artists, many=True).data
return options | <filename>content/browse/utils.py
"""
Created:04 Mar. 2020
Author: <NAME>
"""
from revibe._helpers import const
from administration.utils import retrieve_variable
from content.models import Song, Album, Artist
from content.serializers import v1 as cnt_ser_v1
# -----------------------------------------------------------------------------
# _DEFAULT_LIMIT = 50
# limit_variable = retrieve_variable()
# try:
# limit_variable = int(limit_variable)
# _DEFAULT_LIMIT = max(min(limit_variable, 100), 10)
# except ValueError as ve:
# print("Could not read browse section default limit variable")
# print(ve)
def _DEFAULT_LIMIT():
limit_variable = retrieve_variable("browse_section_default_limit", 50)
try:
limit_variable = int(limit_variable)
return max(min(limit_variable, 100), 10)
except ValueError as ve:
print("Could not read browse section default limit variable")
print(ve)
return 50
_name = "name"
_type = "type"
_results = "results"
_endpoint = "endpoint"
def _browse_song(annotation, limit=None, platform=const.REVIBE_STRING, **options):
limit = limit if limit else _DEFAULT_LIMIT()
songs = Song.display_objects.filter(platform=platform).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.SongSerializer(songs, many=True).data
return options
def _browse_album(annotation, limit=None, **options):
limit = limit if limit else _DEFAULT_LIMIT()
albums = Album.display_objects.filter(platform=const.REVIBE_STRING).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.AlbumSerializer(albums, many=True).data
return options
def _browse_artist(annotation, limit=None, **options):
limit = limit if limit else _DEFAULT_LIMIT()
artists = Artist.display_objects.filter(platform=const.REVIBE_STRING).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.ArtistSerializer(artists, many=True).data
return options | en | 0.40492 | Created:04 Mar. 2020 Author: <NAME> # ----------------------------------------------------------------------------- # _DEFAULT_LIMIT = 50 # limit_variable = retrieve_variable() # try: # limit_variable = int(limit_variable) # _DEFAULT_LIMIT = max(min(limit_variable, 100), 10) # except ValueError as ve: # print("Could not read browse section default limit variable") # print(ve) | 2.095323 | 2 |
Segmentation/model.py | vasetrendafilov/ComputerVision | 0 | 9091 | """
Authors: <NAME>, <NAME>
E-mail: <EMAIL>, <EMAIL>
Course: Mashinski vid, FEEIT, Spring 2021
Date: 09.03.2021
Description: function library
model operations: construction, loading, saving
Python version: 3.6
"""
# python imports
from keras.layers import Conv2D, Conv2DTranspose, MaxPool2D, UpSampling2D, Input, Concatenate
from keras.models import Model, model_from_json
def load_model(model_path, weights_path):
"""
loads a pre-trained model configuration and calculated weights
:param model_path: path of the serialized model configuration file (.json) [string]
:param weights_path: path of the serialized model weights file (.h5) [string]
:return: model - keras model object
"""
# --- load model configuration ---
json_file = open(model_path, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json) # load model architecture
model.load_weights(weights_path) # load weights
return model
def construct_model_unet_orig(input_shape):
"""
construct semantic segmentation model architecture (encoder-decoder)
:param input_shape: list of input dimensions (height, width, depth) [tuple]
:return: model - Keras model object
"""
input = Input(shape=input_shape)
# --- encoder ---
conv1 = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(input)
conv11 = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPool2D(pool_size=(2, 2))(conv11)
conv2 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv22 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPool2D(pool_size=(2, 2))(conv22)
conv3 = Conv2D(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv33 = Conv2D(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPool2D(pool_size=(2, 2))(conv33)
conv4 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv44 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
pool4 = MaxPool2D(pool_size=(2, 2))(conv44)
# --- decoder ---
conv5 = Conv2D(filters=1024, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv55 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
up1 = UpSampling2D(size=(2, 2))(conv55)
merge1 = Concatenate(axis=3)([conv44, up1])
deconv1 = Conv2DTranspose(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
deconv11 = Conv2DTranspose(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv1)
up2 = UpSampling2D(size=(2, 2))(deconv11)
merge2 = Concatenate(axis=3)([conv33, up2])
deconv2 = Conv2DTranspose(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2)
deconv22 = Conv2DTranspose(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv2)
up3 = UpSampling2D(size=(2, 2))(deconv22)
merge3 = Concatenate(axis=3)([conv22, up3])
deconv3 = Conv2DTranspose(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
deconv33 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv3)
up4 = UpSampling2D(size=(2, 2))(deconv33)
merge4 = Concatenate(axis=3)([conv11, up4])
deconv4 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge4)
deconv44 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv4)
output = Conv2DTranspose(filters=input_shape[2], kernel_size=1, padding='same', activation='sigmoid')(deconv44)
model = Model(input=input, output=output)
return model
| """
Authors: <NAME>, <NAME>
E-mail: <EMAIL>, <EMAIL>
Course: Mashinski vid, FEEIT, Spring 2021
Date: 09.03.2021
Description: function library
model operations: construction, loading, saving
Python version: 3.6
"""
# python imports
from keras.layers import Conv2D, Conv2DTranspose, MaxPool2D, UpSampling2D, Input, Concatenate
from keras.models import Model, model_from_json
def load_model(model_path, weights_path):
"""
loads a pre-trained model configuration and calculated weights
:param model_path: path of the serialized model configuration file (.json) [string]
:param weights_path: path of the serialized model weights file (.h5) [string]
:return: model - keras model object
"""
# --- load model configuration ---
json_file = open(model_path, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json) # load model architecture
model.load_weights(weights_path) # load weights
return model
def construct_model_unet_orig(input_shape):
"""
construct semantic segmentation model architecture (encoder-decoder)
:param input_shape: list of input dimensions (height, width, depth) [tuple]
:return: model - Keras model object
"""
input = Input(shape=input_shape)
# --- encoder ---
conv1 = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(input)
conv11 = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPool2D(pool_size=(2, 2))(conv11)
conv2 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv22 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPool2D(pool_size=(2, 2))(conv22)
conv3 = Conv2D(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv33 = Conv2D(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPool2D(pool_size=(2, 2))(conv33)
conv4 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv44 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
pool4 = MaxPool2D(pool_size=(2, 2))(conv44)
# --- decoder ---
conv5 = Conv2D(filters=1024, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv55 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
up1 = UpSampling2D(size=(2, 2))(conv55)
merge1 = Concatenate(axis=3)([conv44, up1])
deconv1 = Conv2DTranspose(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
deconv11 = Conv2DTranspose(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv1)
up2 = UpSampling2D(size=(2, 2))(deconv11)
merge2 = Concatenate(axis=3)([conv33, up2])
deconv2 = Conv2DTranspose(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2)
deconv22 = Conv2DTranspose(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv2)
up3 = UpSampling2D(size=(2, 2))(deconv22)
merge3 = Concatenate(axis=3)([conv22, up3])
deconv3 = Conv2DTranspose(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
deconv33 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv3)
up4 = UpSampling2D(size=(2, 2))(deconv33)
merge4 = Concatenate(axis=3)([conv11, up4])
deconv4 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge4)
deconv44 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv4)
output = Conv2DTranspose(filters=input_shape[2], kernel_size=1, padding='same', activation='sigmoid')(deconv44)
model = Model(input=input, output=output)
return model
| en | 0.728724 | Authors: <NAME>, <NAME> E-mail: <EMAIL>, <EMAIL> Course: Mashinski vid, FEEIT, Spring 2021 Date: 09.03.2021 Description: function library model operations: construction, loading, saving Python version: 3.6 # python imports loads a pre-trained model configuration and calculated weights :param model_path: path of the serialized model configuration file (.json) [string] :param weights_path: path of the serialized model weights file (.h5) [string] :return: model - keras model object # --- load model configuration --- # load model architecture # load weights construct semantic segmentation model architecture (encoder-decoder) :param input_shape: list of input dimensions (height, width, depth) [tuple] :return: model - Keras model object # --- encoder --- # --- decoder --- | 2.841247 | 3 |
Day24_Python/part1.py | Rog3rSm1th/PolyglotOfCode | 7 | 9092 | <reponame>Rog3rSm1th/PolyglotOfCode
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from itertools import combinations
def solve(packages, groups):
total = sum(packages)
result = 9999999999999999
# we should use `for i in range(1, len(packages) - 2)` but it would
# make the computation significantly slower
for i in range(1, 7):
for c in combinations(packages, i):
if sum(c) == total / groups:
quantum_entanglement = reduce(lambda a, b: a * b, list(c))
result = min(result, quantum_entanglement)
return result
packages = [int(num) for num in open('input.txt')]
print(solve(packages, 3)) | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
from itertools import combinations
def solve(packages, groups):
total = sum(packages)
result = 9999999999999999
# we should use `for i in range(1, len(packages) - 2)` but it would
# make the computation significantly slower
for i in range(1, 7):
for c in combinations(packages, i):
if sum(c) == total / groups:
quantum_entanglement = reduce(lambda a, b: a * b, list(c))
result = min(result, quantum_entanglement)
return result
packages = [int(num) for num in open('input.txt')]
print(solve(packages, 3)) | en | 0.731337 | #!/usr/bin/env python3 #-*- coding: utf-8 -*- # we should use `for i in range(1, len(packages) - 2)` but it would # make the computation significantly slower | 3.045598 | 3 |
generate-album.py | atomicparade/photo-album | 0 | 9093 | <filename>generate-album.py
import configparser
import math
import re
import urllib
from pathlib import Path
from PIL import Image
def get_images(image_directory, thumbnail_directory, thumbnail_size):
thumbnail_directory = Path(thumbnail_directory)
for file in [file for file in thumbnail_directory.glob('*')]:
file.unlink()
thumbnail_directory.mkdir(mode=0o755, exist_ok=True)
files = [file for file in Path(image_directory).glob('*')]
images = []
for file in files:
thumbnail_name = Path(thumbnail_directory, file.stem + '.jpg')
image = Image.open(file)
image.thumbnail(thumbnail_size)
top_left = (0, 0)
if image.width < thumbnail_size[0]:
top_left = (math.floor(abs(image.width - thumbnail_size[0]) / 2), top_left[1])
if image.height < thumbnail_size[1]:
top_left = (top_left[0], math.floor(abs(image.height - thumbnail_size[1]) / 2))
final_image = Image.new('RGB', thumbnail_size, (0, 0, 0))
final_image.paste(image, top_left)
final_image.save(thumbnail_name, 'jpeg')
if '_' in file.stem:
description = file.stem.split('_', maxsplit=1)[1]
else:
description = file.stem
images.append({
'path': str(file),
'thumbnail': thumbnail_name,
'description': description,
'stem': file.stem
})
def get_image_file_number(image):
if re.match(r'^(\d+)', image['stem']) is not None:
return int(re.split(r'^(\d+)', image['stem'])[1])
else:
return 999
images = sorted(images, key=get_image_file_number)
return images
def write_html(file, images, page_title, thumbnail_size):
file.write(f'''\
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{page_title}</title>
<link rel="stylesheet" type="text/css" href="album.css">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
</head>
<body>
<h1>{page_title}</h1>
<div id="album">
\
''')
# write thumbnails
for image, idx in zip(images, range(1, len(images) + 1)):
thumbnail_path = urllib.parse.quote(str(image['thumbnail']).replace('\\', '/'))
file.write(f'''\
<p id="thumbnail-{idx}" class="thumbnail"><img src="{thumbnail_path}" alt="{image['description']}" width="{thumbnail_size[0]}" height="{thumbnail_size[1]}"></p>\
''')
file.write(f'''\
<div id="large-view">
<p id="instructions" class="image">Hover over an image</p>
''')
# write images
for image, idx in zip(images, range(1, len(images) + 1)):
image_path = urllib.parse.quote(str(image['path']).replace('\\', '/'))
file.write(f'''\
<p id="image-{idx}" class="image"><img src="{image_path}" alt="{image['description']}"><br>{image['description']}</p>
''')
file.write(f'''\
</div>
</div>
</body>
</html>
''')
def write_css(file, images):
file.write('''\
@media print {
body {
font-family: sans-serif;
}
.thumbnail {
display: none;
}
#instructions {
display: none;
}
.image img {
max-width: 100%;
margin-bottom: 1em;
}
}
@media
screen and (max-width: 768px),
/* Tablets and smartphones */
screen and (hover: none)
{
body {
background: #333;
color: #eee;
font-family: sans-serif;
margin: 1em;
padding: 0;
}
h1 {
margin-top: 0;
}
.thumbnail {
display: none;
}
#instructions {
display: none;
}
.image:nth-child(2) img {
margin-top: 0;
}
.image img {
max-width: calc(100vw - 3em);
}
}
@media
screen and (min-width: 769px) and (hover: hover),
/* IE10 and IE11 (they don't support (hover: hover) */
screen and (min-width: 769px) and (-ms-high-contrast: none),
screen and (min-width: 769px) and (-ms-high-contrast: active)
{
body {
background: #333;
color: #eee;
font-family: sans-serif;
margin: 2em 60% 2em 4em;
padding: 0;
}
.album {
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.thumbnail {
display: inline-block;;
margin: 0 .5em .2em 0;
}
.image {
background: #333;
display: none;
position: fixed;
top: 2em;
left: 40%;
text-align: center;
height: 90vh;
width: calc(60% - 4em);
}
.image img {
display: block;
max-height: 92%;
max-width: 100%;
margin: 0 auto;
}
#instructions {
display: block;
top: 4em;
}
''')
if len(images) > 0:
for idx in range(1, len(images) + 1):
file.write(f'''\
#thumbnail-{idx}:hover ~ #large-view #image-{idx}\
''')
if idx < len(images):
file.write('''\
,
''')
file.write('''\
{
display: block;
}
''')
file.write('''\
}
''')
def main():
config = configparser.ConfigParser()
config.read('./config')
image_directory = config['settings']['image_directory']
output_css = config['settings']['output_css']
output_html = config['settings']['output_html']
page_title = config['settings']['page_title']
thumbnail_directory = config['settings']['thumbnail_directory']
thumbnail_width = int(config['settings']['thumbnail_width'])
thumbnail_height = int(config['settings']['thumbnail_height'])
thumbnail_size = (thumbnail_width, thumbnail_height)
out_html = open(output_html, 'w')
out_css = open(output_css, 'w')
images = get_images(image_directory, thumbnail_directory, thumbnail_size)
write_html(out_html, images, page_title, thumbnail_size)
write_css(out_css, images)
out_html.close()
out_css.close()
if __name__ == '__main__':
main()
| <filename>generate-album.py
import configparser
import math
import re
import urllib
from pathlib import Path
from PIL import Image
def get_images(image_directory, thumbnail_directory, thumbnail_size):
thumbnail_directory = Path(thumbnail_directory)
for file in [file for file in thumbnail_directory.glob('*')]:
file.unlink()
thumbnail_directory.mkdir(mode=0o755, exist_ok=True)
files = [file for file in Path(image_directory).glob('*')]
images = []
for file in files:
thumbnail_name = Path(thumbnail_directory, file.stem + '.jpg')
image = Image.open(file)
image.thumbnail(thumbnail_size)
top_left = (0, 0)
if image.width < thumbnail_size[0]:
top_left = (math.floor(abs(image.width - thumbnail_size[0]) / 2), top_left[1])
if image.height < thumbnail_size[1]:
top_left = (top_left[0], math.floor(abs(image.height - thumbnail_size[1]) / 2))
final_image = Image.new('RGB', thumbnail_size, (0, 0, 0))
final_image.paste(image, top_left)
final_image.save(thumbnail_name, 'jpeg')
if '_' in file.stem:
description = file.stem.split('_', maxsplit=1)[1]
else:
description = file.stem
images.append({
'path': str(file),
'thumbnail': thumbnail_name,
'description': description,
'stem': file.stem
})
def get_image_file_number(image):
if re.match(r'^(\d+)', image['stem']) is not None:
return int(re.split(r'^(\d+)', image['stem'])[1])
else:
return 999
images = sorted(images, key=get_image_file_number)
return images
def write_html(file, images, page_title, thumbnail_size):
file.write(f'''\
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{page_title}</title>
<link rel="stylesheet" type="text/css" href="album.css">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
</head>
<body>
<h1>{page_title}</h1>
<div id="album">
\
''')
# write thumbnails
for image, idx in zip(images, range(1, len(images) + 1)):
thumbnail_path = urllib.parse.quote(str(image['thumbnail']).replace('\\', '/'))
file.write(f'''\
<p id="thumbnail-{idx}" class="thumbnail"><img src="{thumbnail_path}" alt="{image['description']}" width="{thumbnail_size[0]}" height="{thumbnail_size[1]}"></p>\
''')
file.write(f'''\
<div id="large-view">
<p id="instructions" class="image">Hover over an image</p>
''')
# write images
for image, idx in zip(images, range(1, len(images) + 1)):
image_path = urllib.parse.quote(str(image['path']).replace('\\', '/'))
file.write(f'''\
<p id="image-{idx}" class="image"><img src="{image_path}" alt="{image['description']}"><br>{image['description']}</p>
''')
file.write(f'''\
</div>
</div>
</body>
</html>
''')
def write_css(file, images):
file.write('''\
@media print {
body {
font-family: sans-serif;
}
.thumbnail {
display: none;
}
#instructions {
display: none;
}
.image img {
max-width: 100%;
margin-bottom: 1em;
}
}
@media
screen and (max-width: 768px),
/* Tablets and smartphones */
screen and (hover: none)
{
body {
background: #333;
color: #eee;
font-family: sans-serif;
margin: 1em;
padding: 0;
}
h1 {
margin-top: 0;
}
.thumbnail {
display: none;
}
#instructions {
display: none;
}
.image:nth-child(2) img {
margin-top: 0;
}
.image img {
max-width: calc(100vw - 3em);
}
}
@media
screen and (min-width: 769px) and (hover: hover),
/* IE10 and IE11 (they don't support (hover: hover) */
screen and (min-width: 769px) and (-ms-high-contrast: none),
screen and (min-width: 769px) and (-ms-high-contrast: active)
{
body {
background: #333;
color: #eee;
font-family: sans-serif;
margin: 2em 60% 2em 4em;
padding: 0;
}
.album {
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.thumbnail {
display: inline-block;;
margin: 0 .5em .2em 0;
}
.image {
background: #333;
display: none;
position: fixed;
top: 2em;
left: 40%;
text-align: center;
height: 90vh;
width: calc(60% - 4em);
}
.image img {
display: block;
max-height: 92%;
max-width: 100%;
margin: 0 auto;
}
#instructions {
display: block;
top: 4em;
}
''')
if len(images) > 0:
for idx in range(1, len(images) + 1):
file.write(f'''\
#thumbnail-{idx}:hover ~ #large-view #image-{idx}\
''')
if idx < len(images):
file.write('''\
,
''')
file.write('''\
{
display: block;
}
''')
file.write('''\
}
''')
def main():
config = configparser.ConfigParser()
config.read('./config')
image_directory = config['settings']['image_directory']
output_css = config['settings']['output_css']
output_html = config['settings']['output_html']
page_title = config['settings']['page_title']
thumbnail_directory = config['settings']['thumbnail_directory']
thumbnail_width = int(config['settings']['thumbnail_width'])
thumbnail_height = int(config['settings']['thumbnail_height'])
thumbnail_size = (thumbnail_width, thumbnail_height)
out_html = open(output_html, 'w')
out_css = open(output_css, 'w')
images = get_images(image_directory, thumbnail_directory, thumbnail_size)
write_html(out_html, images, page_title, thumbnail_size)
write_css(out_css, images)
out_html.close()
out_css.close()
if __name__ == '__main__':
main()
| en | 0.223412 | \ <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>{page_title}</title> <link rel="stylesheet" type="text/css" href="album.css"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> </head> <body> <h1>{page_title}</h1> <div id="album"> \ # write thumbnails \ <p id="thumbnail-{idx}" class="thumbnail"><img src="{thumbnail_path}" alt="{image['description']}" width="{thumbnail_size[0]}" height="{thumbnail_size[1]}"></p>\ \ <div id="large-view"> <p id="instructions" class="image">Hover over an image</p> # write images \ <p id="image-{idx}" class="image"><img src="{image_path}" alt="{image['description']}"><br>{image['description']}</p> \ </div> </div> </body> </html> \ @media print { body { font-family: sans-serif; } .thumbnail { display: none; } #instructions { display: none; } .image img { max-width: 100%; margin-bottom: 1em; } } @media screen and (max-width: 768px), /* Tablets and smartphones */ screen and (hover: none) { body { background: #333; color: #eee; font-family: sans-serif; margin: 1em; padding: 0; } h1 { margin-top: 0; } .thumbnail { display: none; } #instructions { display: none; } .image:nth-child(2) img { margin-top: 0; } .image img { max-width: calc(100vw - 3em); } } @media screen and (min-width: 769px) and (hover: hover), /* IE10 and IE11 (they don't support (hover: hover) */ screen and (min-width: 769px) and (-ms-high-contrast: none), screen and (min-width: 769px) and (-ms-high-contrast: active) { body { background: #333; color: #eee; font-family: sans-serif; margin: 2em 60% 2em 4em; padding: 0; } .album { display: flex; flex-direction: row; flex-wrap: wrap; } .thumbnail { display: inline-block;; margin: 0 .5em .2em 0; } .image { background: #333; display: none; position: fixed; top: 2em; left: 40%; text-align: center; height: 90vh; width: calc(60% - 4em); } .image img { display: block; max-height: 92%; max-width: 100%; margin: 0 auto; } #instructions { display: block; top: 4em; } \ #thumbnail-{idx}:hover ~ #large-view #image-{idx}\ \ , \ { display: block; } \ } | 3.07302 | 3 |
tests/test_sne_truth.py | LSSTDESC/sims_TruthCatalog | 2 | 9094 | """
Unit tests for SNIa truth catalog code.
"""
import os
import unittest
import sqlite3
import numpy as np
import pandas as pd
from desc.sims_truthcatalog import SNeTruthWriter, SNSynthPhotFactory
class SNSynthPhotFactoryTestCase(unittest.TestCase):
"""
Test case class for SNIa synthetic photometry factory class.
"""
def test_SNSythPhotFactory(self):
"""
Test some flux calculations using the underlying SNObject
and SyntheticPhotometry classes.
"""
sp_factory = SNSynthPhotFactory(z=0.6322702169418335,
t0=61719.9950436545,
x0=4.2832710977804034e-06,
x1=-1.207738485943195,
c=-0.0069750402968899936,
snra=55.26407314527358,
sndec=-40.81575605788344)
mjds = (61689.150791, 61697.354470, 61712.258685)
bands = ('z', 'i', 'r')
fluxes = (2.6401569864737633, 71.18561504923377, 1048.0327802379868)
for mjd, band, flux in zip(mjds, bands, fluxes):
sp = sp_factory.create(mjd)
self.assertAlmostEqual(sp.calcFlux(band), flux)
class SNeTruthWriterTestCase(unittest.TestCase):
"""
Test case class for SNIa truth catalog generation class.
"""
def setUp(self):
self.outfile = 'test_sne_truth_cat.db'
self.data_dir = os.path.join(os.environ['SIMS_TRUTHCATALOG_DIR'],
'data')
sn_db_file = os.path.join(self.data_dir,
'sne_cosmoDC2_v1.1.4_MS_DDF_small.db')
self.sne_truth_writer = SNeTruthWriter(self.outfile, sn_db_file)
def tearDown(self):
if os.path.isfile(self.outfile):
os.remove(self.outfile)
def test_truth_summary(self):
"""Test that the truth_summary columns are filled out as expected."""
self.sne_truth_writer.write()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from truth_summary', conn)
zeros = np.zeros(len(df))
ones = np.ones(len(df))
np.testing.assert_equal(df['is_variable'], ones)
np.testing.assert_equal(df['is_pointsource'], ones)
for band in 'ugrizy':
flux_col = f'flux_{band}'
np.testing.assert_equal(df[flux_col], zeros)
flux_col += '_noMW'
np.testing.assert_equal(df[flux_col], zeros)
def test_auxiliary_truth(self):
"""
Test that the columns from the sne_params table are transcribed
correctly.
"""
self.sne_truth_writer.write_auxiliary_truth()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_auxiliary_info', conn)
np.testing.assert_equal(self.sne_truth_writer.sne_df['snid_in'],
df['id'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['galaxy_id'],
df['host_galaxy'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['snra_in'],
df['ra'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['t0_in'],
df['t0'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['z_in'],
df['redshift'].to_numpy())
def test_variability_truth(self):
"""
Test some expected values for a SNIa in the test SNe catalog
using a small opsim db table.
"""
opsim_db_file = os.path.join(self.data_dir,
'minion_1016_desc_dithered_v4_small.db')
self.sne_truth_writer.write_variability_truth(opsim_db_file,
max_rows=60)
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_variability_truth', conn)
my_object = 'MS_10195_1375'
self.assertIn(my_object, df['id'].to_list())
my_df = df.query(f'id == "{my_object}"')
for visit in (1425850, 1433860, 1495410):
self.assertIn(visit, my_df['obsHistID'].to_list())
if __name__ == '__main__':
unittest.main()
| """
Unit tests for SNIa truth catalog code.
"""
import os
import unittest
import sqlite3
import numpy as np
import pandas as pd
from desc.sims_truthcatalog import SNeTruthWriter, SNSynthPhotFactory
class SNSynthPhotFactoryTestCase(unittest.TestCase):
"""
Test case class for SNIa synthetic photometry factory class.
"""
def test_SNSythPhotFactory(self):
"""
Test some flux calculations using the underlying SNObject
and SyntheticPhotometry classes.
"""
sp_factory = SNSynthPhotFactory(z=0.6322702169418335,
t0=61719.9950436545,
x0=4.2832710977804034e-06,
x1=-1.207738485943195,
c=-0.0069750402968899936,
snra=55.26407314527358,
sndec=-40.81575605788344)
mjds = (61689.150791, 61697.354470, 61712.258685)
bands = ('z', 'i', 'r')
fluxes = (2.6401569864737633, 71.18561504923377, 1048.0327802379868)
for mjd, band, flux in zip(mjds, bands, fluxes):
sp = sp_factory.create(mjd)
self.assertAlmostEqual(sp.calcFlux(band), flux)
class SNeTruthWriterTestCase(unittest.TestCase):
"""
Test case class for SNIa truth catalog generation class.
"""
def setUp(self):
self.outfile = 'test_sne_truth_cat.db'
self.data_dir = os.path.join(os.environ['SIMS_TRUTHCATALOG_DIR'],
'data')
sn_db_file = os.path.join(self.data_dir,
'sne_cosmoDC2_v1.1.4_MS_DDF_small.db')
self.sne_truth_writer = SNeTruthWriter(self.outfile, sn_db_file)
def tearDown(self):
if os.path.isfile(self.outfile):
os.remove(self.outfile)
def test_truth_summary(self):
"""Test that the truth_summary columns are filled out as expected."""
self.sne_truth_writer.write()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from truth_summary', conn)
zeros = np.zeros(len(df))
ones = np.ones(len(df))
np.testing.assert_equal(df['is_variable'], ones)
np.testing.assert_equal(df['is_pointsource'], ones)
for band in 'ugrizy':
flux_col = f'flux_{band}'
np.testing.assert_equal(df[flux_col], zeros)
flux_col += '_noMW'
np.testing.assert_equal(df[flux_col], zeros)
def test_auxiliary_truth(self):
"""
Test that the columns from the sne_params table are transcribed
correctly.
"""
self.sne_truth_writer.write_auxiliary_truth()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_auxiliary_info', conn)
np.testing.assert_equal(self.sne_truth_writer.sne_df['snid_in'],
df['id'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['galaxy_id'],
df['host_galaxy'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['snra_in'],
df['ra'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['t0_in'],
df['t0'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['z_in'],
df['redshift'].to_numpy())
def test_variability_truth(self):
"""
Test some expected values for a SNIa in the test SNe catalog
using a small opsim db table.
"""
opsim_db_file = os.path.join(self.data_dir,
'minion_1016_desc_dithered_v4_small.db')
self.sne_truth_writer.write_variability_truth(opsim_db_file,
max_rows=60)
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_variability_truth', conn)
my_object = 'MS_10195_1375'
self.assertIn(my_object, df['id'].to_list())
my_df = df.query(f'id == "{my_object}"')
for visit in (1425850, 1433860, 1495410):
self.assertIn(visit, my_df['obsHistID'].to_list())
if __name__ == '__main__':
unittest.main()
| en | 0.762134 | Unit tests for SNIa truth catalog code. Test case class for SNIa synthetic photometry factory class. Test some flux calculations using the underlying SNObject and SyntheticPhotometry classes. Test case class for SNIa truth catalog generation class. Test that the truth_summary columns are filled out as expected. Test that the columns from the sne_params table are transcribed correctly. Test some expected values for a SNIa in the test SNe catalog using a small opsim db table. | 2.369221 | 2 |
testsite/management/commands/load_test_transactions.py | gikoluo/djaodjin-saas | 0 | 9095 | # Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime, logging, random
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from django.template.defaultfilters import slugify
from django.utils.timezone import utc
from saas.backends.razorpay_processor import RazorpayBackend
from saas.models import Plan, Transaction, get_broker
from saas.utils import datetime_or_now
from saas.settings import PROCESSOR_ID
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Load the database with random transactions (testing purposes).
"""
USE_OF_SERVICE = 0
PAY_BALANCE = 1
REDEEM = 2
REFUND = 3
CHARGEBACK = 4
WRITEOFF = 5
FIRST_NAMES = (
'Anthony',
'Alexander',
'Alexis',
'Alicia',
'Ashley',
'Benjamin',
'Bruce',
'Chloe',
'Christopher',
'Daniel',
'David',
'Edward',
'Emily',
'Emma',
'Ethan',
'Grace',
'Isabella',
'Jacob',
'James',
'Jayden',
'Jennifer',
'John',
'Julia',
'Lily',
'Lucie',
'Luis',
'Matthew',
'Michael',
'Olivia',
'Ryan',
'Samantha',
'Samuel',
'Scott',
'Sophia',
'Williom',
)
LAST_NAMES = (
'Smith',
'Johnson',
'Williams',
'Jones',
'Brown',
'Davis',
'Miller',
'Wilson',
'Moore',
'Taylor',
'Anderson',
'Thomas',
'Jackson',
'White',
'Harris',
'Martin',
'Thompson',
'Garcia',
'Martinez',
'Robinson',
'Clark',
'Rogriguez',
'Lewis',
'Lee',
'Walker',
'Hall',
'Allen',
'Young',
'Hernandez',
'King',
'Wright',
'Lopez',
'Hill',
'Green',
'Baker',
'Gonzalez',
'Nelson',
'Mitchell',
'Perez',
'Roberts',
'Turner',
'Philips',
'Campbell',
'Parker',
'Collins',
'Stewart',
'Sanchez',
'Morris',
'Rogers',
'Reed',
'Cook',
'Bell',
'Cooper',
'Richardson',
'Cox',
'Ward',
'Peterson',
)
def add_arguments(self, parser):
parser.add_argument('--provider',
action='store', dest='provider',
default=settings.SAAS['BROKER']['GET_INSTANCE'],
help='create sample subscribers on this provider')
def handle(self, *args, **options):
#pylint: disable=too-many-locals,too-many-statements
from saas.managers.metrics import month_periods # avoid import loop
from saas.models import (Charge, ChargeItem, Organization, Plan,
Subscription)
RazorpayBackend.bypass_api = True
now = datetime.datetime.utcnow().replace(tzinfo=utc)
from_date = now
from_date = datetime.datetime(
year=from_date.year, month=from_date.month, day=1)
if args:
from_date = datetime.datetime.strptime(
args[0], '%Y-%m-%d')
# Create a set of 3 plans
broker = get_broker()
Plan.objects.get_or_create(
slug='basic',
defaults={
'title': "Basic",
'description': "Basic Plan",
'period_amount': 24900,
'broker_fee_percent': 0,
'period_type': 4,
'advance_discount': 1000,
'organization': broker,
'is_active': True
})
Plan.objects.get_or_create(
slug='medium',
defaults={
'title': "Medium",
'description': "Medium Plan",
'period_amount': 24900,
'broker_fee_percent': 0,
'period_type': 4,
'organization': broker,
'is_active': True
})
Plan.objects.get_or_create(
slug='premium',
defaults={
'title': "Premium",
'description': "Premium Plan",
'period_amount': 18900,
'broker_fee_percent': 0,
'period_type': 4,
'advance_discount': 81,
'organization': broker,
'is_active': True
})
# Create Income transactions that represents a growing bussiness.
provider = Organization.objects.get(slug=options['provider'])
processor = Organization.objects.get(pk=PROCESSOR_ID)
for end_period in month_periods(from_date=from_date):
nb_new_customers = random.randint(0, 9)
for _ in range(nb_new_customers):
queryset = Plan.objects.filter(
organization=provider, period_amount__gt=0)
plan = queryset[random.randint(0, queryset.count() - 1)]
created = False
trials = 0
while not created:
try:
first_name = self.FIRST_NAMES[random.randint(
0, len(self.FIRST_NAMES)-1)]
last_name = self.LAST_NAMES[random.randint(
0, len(self.LAST_NAMES)-1)]
full_name = '%s %s' % (first_name, last_name)
slug = slugify('demo%d' % random.randint(1, 1000))
customer, created = Organization.objects.get_or_create(
slug=slug, full_name=full_name)
#pylint: disable=catching-non-exception
except IntegrityError:
trials = trials + 1
if trials > 10:
raise RuntimeError(
'impossible to create a new customer after 10 trials.')
Organization.objects.filter(pk=customer.id).update(
created_at=end_period)
subscription = Subscription.objects.create(
organization=customer, plan=plan,
ends_at=now + datetime.timedelta(days=31))
Subscription.objects.filter(
pk=subscription.id).update(created_at=end_period)
# Insert some churn in %
churn_rate = 2
all_subscriptions = Subscription.objects.filter(
plan__organization=provider)
nb_churn_customers = (all_subscriptions.count()
* churn_rate // 100)
subscriptions = random.sample(list(all_subscriptions),
all_subscriptions.count() - nb_churn_customers)
for subscription in subscriptions:
nb_periods = random.randint(1, 6)
transaction_item = Transaction.objects.new_subscription_order(
subscription, nb_natural_periods=nb_periods,
created_at=end_period)
if transaction_item.dest_amount < 50:
continue
transaction_item.orig_amount = transaction_item.dest_amount
transaction_item.orig_unit = transaction_item.dest_unit
transaction_item.save()
charge = Charge.objects.create(
created_at=transaction_item.created_at,
amount=transaction_item.dest_amount,
customer=subscription.organization,
description='Charge for %d periods' % nb_periods,
last4=1241,
exp_date=datetime_or_now(),
processor=processor,
processor_key=str(transaction_item.pk),
# XXX We can't do that yet because of
# ``PROCESSOR_BACKEND.charge_distribution(self)``
# unit=transaction_item.dest_unit,
state=Charge.CREATED)
charge.created_at = transaction_item.created_at
charge.save()
ChargeItem.objects.create(
invoiced=transaction_item, charge=charge)
charge.payment_successful()
churned = all_subscriptions.exclude(
pk__in=[subscription.pk for subscription in subscriptions])
for subscription in churned:
subscription.ends_at = end_period
subscription.save()
self.stdout.write("%d new and %d churned customers at %s" % (
nb_new_customers, nb_churn_customers, end_period))
| # Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime, logging, random
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from django.template.defaultfilters import slugify
from django.utils.timezone import utc
from saas.backends.razorpay_processor import RazorpayBackend
from saas.models import Plan, Transaction, get_broker
from saas.utils import datetime_or_now
from saas.settings import PROCESSOR_ID
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Load the database with random transactions (testing purposes).
"""
USE_OF_SERVICE = 0
PAY_BALANCE = 1
REDEEM = 2
REFUND = 3
CHARGEBACK = 4
WRITEOFF = 5
FIRST_NAMES = (
'Anthony',
'Alexander',
'Alexis',
'Alicia',
'Ashley',
'Benjamin',
'Bruce',
'Chloe',
'Christopher',
'Daniel',
'David',
'Edward',
'Emily',
'Emma',
'Ethan',
'Grace',
'Isabella',
'Jacob',
'James',
'Jayden',
'Jennifer',
'John',
'Julia',
'Lily',
'Lucie',
'Luis',
'Matthew',
'Michael',
'Olivia',
'Ryan',
'Samantha',
'Samuel',
'Scott',
'Sophia',
'Williom',
)
LAST_NAMES = (
'Smith',
'Johnson',
'Williams',
'Jones',
'Brown',
'Davis',
'Miller',
'Wilson',
'Moore',
'Taylor',
'Anderson',
'Thomas',
'Jackson',
'White',
'Harris',
'Martin',
'Thompson',
'Garcia',
'Martinez',
'Robinson',
'Clark',
'Rogriguez',
'Lewis',
'Lee',
'Walker',
'Hall',
'Allen',
'Young',
'Hernandez',
'King',
'Wright',
'Lopez',
'Hill',
'Green',
'Baker',
'Gonzalez',
'Nelson',
'Mitchell',
'Perez',
'Roberts',
'Turner',
'Philips',
'Campbell',
'Parker',
'Collins',
'Stewart',
'Sanchez',
'Morris',
'Rogers',
'Reed',
'Cook',
'Bell',
'Cooper',
'Richardson',
'Cox',
'Ward',
'Peterson',
)
def add_arguments(self, parser):
parser.add_argument('--provider',
action='store', dest='provider',
default=settings.SAAS['BROKER']['GET_INSTANCE'],
help='create sample subscribers on this provider')
def handle(self, *args, **options):
#pylint: disable=too-many-locals,too-many-statements
from saas.managers.metrics import month_periods # avoid import loop
from saas.models import (Charge, ChargeItem, Organization, Plan,
Subscription)
RazorpayBackend.bypass_api = True
now = datetime.datetime.utcnow().replace(tzinfo=utc)
from_date = now
from_date = datetime.datetime(
year=from_date.year, month=from_date.month, day=1)
if args:
from_date = datetime.datetime.strptime(
args[0], '%Y-%m-%d')
# Create a set of 3 plans
broker = get_broker()
Plan.objects.get_or_create(
slug='basic',
defaults={
'title': "Basic",
'description': "Basic Plan",
'period_amount': 24900,
'broker_fee_percent': 0,
'period_type': 4,
'advance_discount': 1000,
'organization': broker,
'is_active': True
})
Plan.objects.get_or_create(
slug='medium',
defaults={
'title': "Medium",
'description': "Medium Plan",
'period_amount': 24900,
'broker_fee_percent': 0,
'period_type': 4,
'organization': broker,
'is_active': True
})
Plan.objects.get_or_create(
slug='premium',
defaults={
'title': "Premium",
'description': "Premium Plan",
'period_amount': 18900,
'broker_fee_percent': 0,
'period_type': 4,
'advance_discount': 81,
'organization': broker,
'is_active': True
})
# Create Income transactions that represents a growing bussiness.
provider = Organization.objects.get(slug=options['provider'])
processor = Organization.objects.get(pk=PROCESSOR_ID)
for end_period in month_periods(from_date=from_date):
nb_new_customers = random.randint(0, 9)
for _ in range(nb_new_customers):
queryset = Plan.objects.filter(
organization=provider, period_amount__gt=0)
plan = queryset[random.randint(0, queryset.count() - 1)]
created = False
trials = 0
while not created:
try:
first_name = self.FIRST_NAMES[random.randint(
0, len(self.FIRST_NAMES)-1)]
last_name = self.LAST_NAMES[random.randint(
0, len(self.LAST_NAMES)-1)]
full_name = '%s %s' % (first_name, last_name)
slug = slugify('demo%d' % random.randint(1, 1000))
customer, created = Organization.objects.get_or_create(
slug=slug, full_name=full_name)
#pylint: disable=catching-non-exception
except IntegrityError:
trials = trials + 1
if trials > 10:
raise RuntimeError(
'impossible to create a new customer after 10 trials.')
Organization.objects.filter(pk=customer.id).update(
created_at=end_period)
subscription = Subscription.objects.create(
organization=customer, plan=plan,
ends_at=now + datetime.timedelta(days=31))
Subscription.objects.filter(
pk=subscription.id).update(created_at=end_period)
# Insert some churn in %
churn_rate = 2
all_subscriptions = Subscription.objects.filter(
plan__organization=provider)
nb_churn_customers = (all_subscriptions.count()
* churn_rate // 100)
subscriptions = random.sample(list(all_subscriptions),
all_subscriptions.count() - nb_churn_customers)
for subscription in subscriptions:
nb_periods = random.randint(1, 6)
transaction_item = Transaction.objects.new_subscription_order(
subscription, nb_natural_periods=nb_periods,
created_at=end_period)
if transaction_item.dest_amount < 50:
continue
transaction_item.orig_amount = transaction_item.dest_amount
transaction_item.orig_unit = transaction_item.dest_unit
transaction_item.save()
charge = Charge.objects.create(
created_at=transaction_item.created_at,
amount=transaction_item.dest_amount,
customer=subscription.organization,
description='Charge for %d periods' % nb_periods,
last4=1241,
exp_date=datetime_or_now(),
processor=processor,
processor_key=str(transaction_item.pk),
# XXX We can't do that yet because of
# ``PROCESSOR_BACKEND.charge_distribution(self)``
# unit=transaction_item.dest_unit,
state=Charge.CREATED)
charge.created_at = transaction_item.created_at
charge.save()
ChargeItem.objects.create(
invoiced=transaction_item, charge=charge)
charge.payment_successful()
churned = all_subscriptions.exclude(
pk__in=[subscription.pk for subscription in subscriptions])
for subscription in churned:
subscription.ends_at = end_period
subscription.save()
self.stdout.write("%d new and %d churned customers at %s" % (
nb_new_customers, nb_churn_customers, end_period))
| en | 0.70082 | # Copyright (c) 2018, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Load the database with random transactions (testing purposes). #pylint: disable=too-many-locals,too-many-statements # avoid import loop # Create a set of 3 plans # Create Income transactions that represents a growing bussiness. #pylint: disable=catching-non-exception # Insert some churn in % # XXX We can't do that yet because of # ``PROCESSOR_BACKEND.charge_distribution(self)`` # unit=transaction_item.dest_unit, | 1.5163 | 2 |
seq2seq_pt/s2s/xutils.py | magic282/SEASS | 36 | 9096 | <reponame>magic282/SEASS<gh_stars>10-100
import sys
import struct
def save_sf_model(model):
name_dicts = {'encoder.word_lut.weight': 'SrcWordEmbed_Embed_W',
'encoder.forward_gru.linear_input.weight': 'EncGRUL2R_GRU_W',
'encoder.forward_gru.linear_input.bias': 'EncGRUL2R_GRU_B',
'encoder.forward_gru.linear_hidden.weight': 'EncGRUL2R_GRU_U',
'encoder.backward_gru.linear_input.weight': 'EncGRUR2L_GRU_W',
'encoder.backward_gru.linear_input.bias': 'EncGRUR2L_GRU_B',
'encoder.backward_gru.linear_hidden.weight': 'EncGRUR2L_GRU_U',
'decoder.word_lut.weight': 'TrgWordEmbed_Embed_W',
'decoder.rnn.layers.0.linear_input.weight': 'DecGRU_GRU_W',
'decoder.rnn.layers.0.linear_input.bias': 'DecGRU_GRU_B',
'decoder.rnn.layers.0.linear_hidden.weight': 'DecGRU_GRU_U',
'decoder.attn.linear_pre.weight': 'Alignment_ConcatAtt_W',
'decoder.attn.linear_pre.bias': 'Alignment_ConcatAtt_B',
'decoder.attn.linear_q.weight': 'Alignment_ConcatAtt_U',
'decoder.attn.linear_v.weight': 'Alignment_ConcatAtt_v',
'decoder.readout.weight': 'Readout_Linear_W',
'decoder.readout.bias': 'Readout_Linear_b',
'decIniter.initer.weight': 'DecInitial_Linear_W',
'decIniter.initer.bias': 'DecInitial_Linear_b',
'generator.0.weight': 'Scoring_Linear_W',
'generator.0.bias': 'Scoring_Linear_b', }
nParams = sum([p.nelement() for p in model.parameters()])
# logger.info('* number of parameters: %d' % nParams)
b_count = 0
of = open('model', 'wb')
for name, param in model.named_parameters():
# logger.info('[{0}] [{1}] [{2}]'.format(name, param.size(), param.nelement()))
SF_name = name_dicts[name]
# print(SF_name)
byte_name = bytes(SF_name, 'utf-16-le')
name_size = len(byte_name)
byte_name_size = name_size.to_bytes(4, sys.byteorder)
of.write(byte_name_size)
of.write(byte_name)
b_count += len(byte_name_size)
b_count += len(byte_name)
d = param.data.cpu()
if param.dim() == 1:
d = d.unsqueeze(0)
elif not SF_name.endswith('Embed_W'):
d = d.transpose(0, 1).contiguous()
for dim in d.size():
dim_byte = dim.to_bytes(4, sys.byteorder)
of.write(dim_byte)
b_count += len(dim_byte)
datas = d.view(-1).numpy().tolist()
float_array = struct.pack('f' * len(datas), *datas)
b_count += len(float_array)
of.write(float_array)
of.close()
# print('Total write {0} bytes'.format(b_count))
| import sys
import struct
def save_sf_model(model):
name_dicts = {'encoder.word_lut.weight': 'SrcWordEmbed_Embed_W',
'encoder.forward_gru.linear_input.weight': 'EncGRUL2R_GRU_W',
'encoder.forward_gru.linear_input.bias': 'EncGRUL2R_GRU_B',
'encoder.forward_gru.linear_hidden.weight': 'EncGRUL2R_GRU_U',
'encoder.backward_gru.linear_input.weight': 'EncGRUR2L_GRU_W',
'encoder.backward_gru.linear_input.bias': 'EncGRUR2L_GRU_B',
'encoder.backward_gru.linear_hidden.weight': 'EncGRUR2L_GRU_U',
'decoder.word_lut.weight': 'TrgWordEmbed_Embed_W',
'decoder.rnn.layers.0.linear_input.weight': 'DecGRU_GRU_W',
'decoder.rnn.layers.0.linear_input.bias': 'DecGRU_GRU_B',
'decoder.rnn.layers.0.linear_hidden.weight': 'DecGRU_GRU_U',
'decoder.attn.linear_pre.weight': 'Alignment_ConcatAtt_W',
'decoder.attn.linear_pre.bias': 'Alignment_ConcatAtt_B',
'decoder.attn.linear_q.weight': 'Alignment_ConcatAtt_U',
'decoder.attn.linear_v.weight': 'Alignment_ConcatAtt_v',
'decoder.readout.weight': 'Readout_Linear_W',
'decoder.readout.bias': 'Readout_Linear_b',
'decIniter.initer.weight': 'DecInitial_Linear_W',
'decIniter.initer.bias': 'DecInitial_Linear_b',
'generator.0.weight': 'Scoring_Linear_W',
'generator.0.bias': 'Scoring_Linear_b', }
nParams = sum([p.nelement() for p in model.parameters()])
# logger.info('* number of parameters: %d' % nParams)
b_count = 0
of = open('model', 'wb')
for name, param in model.named_parameters():
# logger.info('[{0}] [{1}] [{2}]'.format(name, param.size(), param.nelement()))
SF_name = name_dicts[name]
# print(SF_name)
byte_name = bytes(SF_name, 'utf-16-le')
name_size = len(byte_name)
byte_name_size = name_size.to_bytes(4, sys.byteorder)
of.write(byte_name_size)
of.write(byte_name)
b_count += len(byte_name_size)
b_count += len(byte_name)
d = param.data.cpu()
if param.dim() == 1:
d = d.unsqueeze(0)
elif not SF_name.endswith('Embed_W'):
d = d.transpose(0, 1).contiguous()
for dim in d.size():
dim_byte = dim.to_bytes(4, sys.byteorder)
of.write(dim_byte)
b_count += len(dim_byte)
datas = d.view(-1).numpy().tolist()
float_array = struct.pack('f' * len(datas), *datas)
b_count += len(float_array)
of.write(float_array)
of.close()
# print('Total write {0} bytes'.format(b_count)) | en | 0.166999 | # logger.info('* number of parameters: %d' % nParams) # logger.info('[{0}] [{1}] [{2}]'.format(name, param.size(), param.nelement())) # print(SF_name) # print('Total write {0} bytes'.format(b_count)) | 1.909983 | 2 |
pml-services/pml_storage.py | Novartis/Project-Mona-Lisa | 3 | 9097 | <reponame>Novartis/Project-Mona-Lisa
# Copyright 2017 Novartis Institutes for BioMedical Research Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import boto3
from boto3.dynamodb.conditions import Key
from random import randint
import os
import base64
class PMLStorage:
""" Project Mona Lisa Storage class.
"""
def __init__(self, storage_name):
self.storage_name = storage_name
def get_bucket(self):
"""
Returns:
(obj): The boto3 AWS S3 bucket object.
"""
s3 = boto3.resource('s3', region_name='TODO')
return s3.Bucket(self.storage_name)
def get_item_from_storage(self, item_key):
""" Get method for a image data in ML-PRJ image storage.
Args:
bucket_name (str): name for the storage.
item_key (str): key or filename for the item in storage.
Returns:
item (obj)
"""
# get the image data in the S3 bucket
img_obj = self.get_bucket().Object(item_key)
return str(img_obj.get()['Body'].read())
def post_item_in_storage(self, key, body, type='png'):
""" Posting collected image data in storage.
Args:
key (str): The unique key.
body (obj): the bulk data to be stored.
type (str): file suffix. The default is 'png'.
Returns:
bool: True if successful, otherwise, an error will
be thrown.
"""
self.get_bucket().put_object(
Key=key+str('.')+type,
Body=body,
ServerSideEncryption='AES256',
ContentType='img/'+type,
)
return True
def download_imgs(self, load_fns, save_dir):
""" Downloads all files in <load_fns> from storage to
the directory <save_dir>.
Args:
load_fns (list(str)): A list of strings of the filenames
of the files to be downloaded.
save_dir (str): A string of the source directory to
save the files. Formatted as:
/full/path/to/dir ... without a '/' character at
the end of the <save_dir>.
Returns:
bool: True if successful, otherwise, an error will
be thrown.
"""
print('downloading images from s3 . . .')
bucket = self.get_bucket()
pre_existing_fns = os.listdir(save_dir)
count = 0
for filename in load_fns:
count += 1
print(count)
if filename in pre_existing_fns:
continue
bucket.download_file(filename, save_dir + '/' + filename)
return True
def get_all_filenames(self):
""" Gets all filenames in storage.
Returns:
list(str): A list of all of the filenames
in the bucket, as a list of strings.
"""
iterobjs = self.get_bucket().objects.all()
filenames = [obj.key for obj in iterobjs]
return filenames
def remove_items(self, filenames):
""" Removes, from storage, all files from <filenames>.
Args:
filenames list(str): List of filenames, where
each filename is a string, of the filename contained
in the bucket.
Returns:
bool: True if successful, otherwise an error is thrown.
"""
bucket = self.get_bucket()
fn_objects = [{'Key': fn} for fn in filenames]
bucket.delete_objects(
Delete={
'Objects': fn_objects
}
)
return True
| # Copyright 2017 Novartis Institutes for BioMedical Research Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import boto3
from boto3.dynamodb.conditions import Key
from random import randint
import os
import base64
class PMLStorage:
""" Project Mona Lisa Storage class.
"""
def __init__(self, storage_name):
self.storage_name = storage_name
def get_bucket(self):
"""
Returns:
(obj): The boto3 AWS S3 bucket object.
"""
s3 = boto3.resource('s3', region_name='TODO')
return s3.Bucket(self.storage_name)
def get_item_from_storage(self, item_key):
""" Get method for a image data in ML-PRJ image storage.
Args:
bucket_name (str): name for the storage.
item_key (str): key or filename for the item in storage.
Returns:
item (obj)
"""
# get the image data in the S3 bucket
img_obj = self.get_bucket().Object(item_key)
return str(img_obj.get()['Body'].read())
def post_item_in_storage(self, key, body, type='png'):
""" Posting collected image data in storage.
Args:
key (str): The unique key.
body (obj): the bulk data to be stored.
type (str): file suffix. The default is 'png'.
Returns:
bool: True if successful, otherwise, an error will
be thrown.
"""
self.get_bucket().put_object(
Key=key+str('.')+type,
Body=body,
ServerSideEncryption='AES256',
ContentType='img/'+type,
)
return True
def download_imgs(self, load_fns, save_dir):
""" Downloads all files in <load_fns> from storage to
the directory <save_dir>.
Args:
load_fns (list(str)): A list of strings of the filenames
of the files to be downloaded.
save_dir (str): A string of the source directory to
save the files. Formatted as:
/full/path/to/dir ... without a '/' character at
the end of the <save_dir>.
Returns:
bool: True if successful, otherwise, an error will
be thrown.
"""
print('downloading images from s3 . . .')
bucket = self.get_bucket()
pre_existing_fns = os.listdir(save_dir)
count = 0
for filename in load_fns:
count += 1
print(count)
if filename in pre_existing_fns:
continue
bucket.download_file(filename, save_dir + '/' + filename)
return True
def get_all_filenames(self):
""" Gets all filenames in storage.
Returns:
list(str): A list of all of the filenames
in the bucket, as a list of strings.
"""
iterobjs = self.get_bucket().objects.all()
filenames = [obj.key for obj in iterobjs]
return filenames
def remove_items(self, filenames):
""" Removes, from storage, all files from <filenames>.
Args:
filenames list(str): List of filenames, where
each filename is a string, of the filename contained
in the bucket.
Returns:
bool: True if successful, otherwise an error is thrown.
"""
bucket = self.get_bucket()
fn_objects = [{'Key': fn} for fn in filenames]
bucket.delete_objects(
Delete={
'Objects': fn_objects
}
)
return True | en | 0.760313 | # Copyright 2017 Novartis Institutes for BioMedical Research Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Project Mona Lisa Storage class. Returns: (obj): The boto3 AWS S3 bucket object. Get method for a image data in ML-PRJ image storage. Args: bucket_name (str): name for the storage. item_key (str): key or filename for the item in storage. Returns: item (obj) # get the image data in the S3 bucket Posting collected image data in storage. Args: key (str): The unique key. body (obj): the bulk data to be stored. type (str): file suffix. The default is 'png'. Returns: bool: True if successful, otherwise, an error will be thrown. Downloads all files in <load_fns> from storage to the directory <save_dir>. Args: load_fns (list(str)): A list of strings of the filenames of the files to be downloaded. save_dir (str): A string of the source directory to save the files. Formatted as: /full/path/to/dir ... without a '/' character at the end of the <save_dir>. Returns: bool: True if successful, otherwise, an error will be thrown. Gets all filenames in storage. Returns: list(str): A list of all of the filenames in the bucket, as a list of strings. Removes, from storage, all files from <filenames>. Args: filenames list(str): List of filenames, where each filename is a string, of the filename contained in the bucket. Returns: bool: True if successful, otherwise an error is thrown. | 2.057537 | 2 |
binan.py | Nightleaf0512/PythonCryptoCurriencyPriceChecker | 0 | 9098 | <gh_stars>0
from binance.client import Client
import PySimpleGUI as sg
api_key = "your_binance_apikey"
secret_key = "your_binance_secretkey"
client = Client(api_key=api_key, api_secret=secret_key)
# price
def get_price(coin):
return round(float(client.get_symbol_ticker(symbol=f"{coin}USDT")['price']), 5)
def column_layout_price(coin):
col =[[sg.Text(f"{get_price(coin)}", font=("Arial", 9, 'bold'), size=(10,1), pad=(15,10), key=coin)]]
return col
# 24h percentchange
def price_change_24h(coin):
return round(float(client.get_ticker(symbol=f"{coin}USDT")["priceChangePercent"]), 2)
def column_layout_change(coin):
if price_change_24h(coin) == 0:
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="black", key=f"{coin}change")]]
elif price_change_24h(coin) > 0:
return [[sg.Text(f"+{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="green", key=f"{coin}change")]]
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="red", key=f"{coin}change")]]
def update_24h_change(coin):
if price_change_24h(coin) == 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="black")
elif price_change_24h(coin) > 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="green")
elif price_change_24h(coin) < 0:
window[f"{coin}change"].update(f"{price_change_24h(coin)}%", text_color="red")
# GUI
sg.theme('DefaultNoMoreNagging')
# Tabs
def tabs(coin):
tab_layout = [[sg.Image("{}.png".format(coin), size=(50,50)),
sg.Text("Price", font=("Arial", 10, 'bold'), size=(7,1), pad=(40,40)), sg.Text("24h change", font=("Arial", 10, 'bold'), size=(10,1), pad=(10,40))],
[sg.Text(f"{coin}/USDT", font=("Arial", 9, 'bold')), sg.Column(column_layout_price(coin)), sg.Column(column_layout_change(coin))]]
return tab_layout
# Layout
layout = [[sg.Text("Crypto Currencies", font=("Arial", 10, 'bold'))],
[sg.TabGroup([[sg.Tab("BTC", tabs("BTC"), border_width="18"), sg.Tab("XRP", tabs("XRP"), border_width="18"), sg.Tab("DOGE", tabs("DOGE"), border_width="18")]])]]
window = sg.Window("NightLeaf Crypto", layout)
def coin_window(*coins):
for coin in coins:
globals()[f"{coin}_last_price"] = 1
while True:
event,values = window.read(timeout=600)
if event == sg.WIN_CLOSED:
break
for coin in coins:
update_24h_change(coin)
price = get_price(coin)
if price != globals()[f"{coin}_last_price"]:
if price > globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠕", text_color="green")
elif price < globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠗", text_color="red")
globals()[f"{coin}_last_price"] = price
a_list =["BTC", "XRP", "DOGE"]
coin_window(*a_list)
| from binance.client import Client
import PySimpleGUI as sg
api_key = "your_binance_apikey"
secret_key = "your_binance_secretkey"
client = Client(api_key=api_key, api_secret=secret_key)
# price
def get_price(coin):
return round(float(client.get_symbol_ticker(symbol=f"{coin}USDT")['price']), 5)
def column_layout_price(coin):
col =[[sg.Text(f"{get_price(coin)}", font=("Arial", 9, 'bold'), size=(10,1), pad=(15,10), key=coin)]]
return col
# 24h percentchange
def price_change_24h(coin):
return round(float(client.get_ticker(symbol=f"{coin}USDT")["priceChangePercent"]), 2)
def column_layout_change(coin):
if price_change_24h(coin) == 0:
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="black", key=f"{coin}change")]]
elif price_change_24h(coin) > 0:
return [[sg.Text(f"+{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="green", key=f"{coin}change")]]
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="red", key=f"{coin}change")]]
def update_24h_change(coin):
if price_change_24h(coin) == 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="black")
elif price_change_24h(coin) > 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="green")
elif price_change_24h(coin) < 0:
window[f"{coin}change"].update(f"{price_change_24h(coin)}%", text_color="red")
# GUI
sg.theme('DefaultNoMoreNagging')
# Tabs
def tabs(coin):
tab_layout = [[sg.Image("{}.png".format(coin), size=(50,50)),
sg.Text("Price", font=("Arial", 10, 'bold'), size=(7,1), pad=(40,40)), sg.Text("24h change", font=("Arial", 10, 'bold'), size=(10,1), pad=(10,40))],
[sg.Text(f"{coin}/USDT", font=("Arial", 9, 'bold')), sg.Column(column_layout_price(coin)), sg.Column(column_layout_change(coin))]]
return tab_layout
# Layout
layout = [[sg.Text("Crypto Currencies", font=("Arial", 10, 'bold'))],
[sg.TabGroup([[sg.Tab("BTC", tabs("BTC"), border_width="18"), sg.Tab("XRP", tabs("XRP"), border_width="18"), sg.Tab("DOGE", tabs("DOGE"), border_width="18")]])]]
window = sg.Window("NightLeaf Crypto", layout)
def coin_window(*coins):
for coin in coins:
globals()[f"{coin}_last_price"] = 1
while True:
event,values = window.read(timeout=600)
if event == sg.WIN_CLOSED:
break
for coin in coins:
update_24h_change(coin)
price = get_price(coin)
if price != globals()[f"{coin}_last_price"]:
if price > globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠕", text_color="green")
elif price < globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠗", text_color="red")
globals()[f"{coin}_last_price"] = price
a_list =["BTC", "XRP", "DOGE"]
coin_window(*a_list) | en | 0.49165 | # price # 24h percentchange # GUI # Tabs # Layout | 3.007778 | 3 |
saleor/graphql/ushop/bulk_mutations.py | nlkhagva/saleor | 0 | 9099 | <reponame>nlkhagva/saleor
import graphene
from ...unurshop.ushop import models
from ..core.mutations import BaseBulkMutation, ModelBulkDeleteMutation
class UshopBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of ushop IDs to delete."
)
class Meta:
description = "Deletes shops."
model = models.Shop
permissions = ("page.manage_pages",)
class UshopBulkPublish(BaseBulkMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of ushop IDs to (un)publish."
)
is_published = graphene.Boolean(
required=True, description="Determine if ushops will be published or not."
)
class Meta:
description = "Publish ushops."
model = models.Shop
permissions = ("page.manage_pages",)
@classmethod
def bulk_action(cls, queryset, is_published):
queryset.update(is_published=is_published)
| import graphene
from ...unurshop.ushop import models
from ..core.mutations import BaseBulkMutation, ModelBulkDeleteMutation
class UshopBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of ushop IDs to delete."
)
class Meta:
description = "Deletes shops."
model = models.Shop
permissions = ("page.manage_pages",)
class UshopBulkPublish(BaseBulkMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of ushop IDs to (un)publish."
)
is_published = graphene.Boolean(
required=True, description="Determine if ushops will be published or not."
)
class Meta:
description = "Publish ushops."
model = models.Shop
permissions = ("page.manage_pages",)
@classmethod
def bulk_action(cls, queryset, is_published):
queryset.update(is_published=is_published) | none | 1 | 2.221384 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.