content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import keras
from sklearn.metrics import roc_auc_score
from src.predictionAlgorithms.machineLearning.helpers.validation import Validation
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import glob
class Callbacks(keras.callbacks.Callback):
validationSequences = []
algorithm = None
number = 1
validation_frequency = 1
size = 64
step = 1
base = 4
def set_step(self, step):
self.step = step
return self
def set_base(self, base):
self.base = base
return base
def set_size(self, size):
self.size = size
return self
def set_validation_frequency(self, frequency):
self.validation_frequency = frequency
return self
def set_validation_data(self, validation_data):
self.validationSequences = validation_data
return self
def set_algorithm(self, algorithm):
self.algorithm = algorithm
return self
def on_train_begin(self, logs={}):
# Initialize the lists for holding the logs, losses and accuracies
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
self.logs = []
epoch_graphs = glob.glob('../output/*')
for f in epoch_graphs:
os.remove(f)
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.number % self.validation_frequency != 0:
self.number += 1
return
validation = Validation()
validation.set_validation_data(self.validationSequences)\
.set_dimensions(self.size)\
.set_base(self.base)\
.set_step(self.step)\
.validate(self.algorithm)
self.number += 1
self.logs.append(logs)
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
if len(self.losses) > 1:
N = np.arange(0, len(self.losses))
plt.figure()
plt.plot(N, self.losses, label="train_loss")
plt.plot(N, self.acc, label="train_acc")
plt.plot(N, self.val_losses, label="val_loss")
plt.plot(N, self.val_acc, label="val_acc")
plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('../output/Epoch-{}.png'.format(epoch))
plt.close()
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
| 28 | 82 | 0.597595 | [
"MIT"
] | aivaras-ciurlionis/meteo | src/predictionAlgorithms/machineLearning/helpers/callbacks.py | 2,828 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tokenize
from hacking import core
LOCALS_TEXT_MAP = {
'locals': 'locals()',
'self': 'self.__dict__'
}
@core.flake8ext
def hacking_no_locals(logical_line, physical_line, tokens, noqa):
"""Do not use locals() or self.__dict__ for string formatting.
Okay: 'locals()'
Okay: 'locals'
Okay: locals()
Okay: print(locals())
H501: print("%(something)" % locals())
H501: LOG.info(_("%(something)") % self.__dict__)
Okay: print("%(something)" % locals()) # noqa
"""
if noqa:
return
for_formatting = False
for token_type, text, start, _, _ in tokens:
if text == "%" and token_type == tokenize.OP:
for_formatting = True
if for_formatting and token_type == tokenize.NAME:
for k, v in LOCALS_TEXT_MAP.items():
if text == k and v in logical_line:
yield (start[1],
"H501: Do not use %s for string formatting" % v)
| 32.617021 | 76 | 0.643183 | [
"Apache-2.0"
] | UbuntuEvangelist/hacking | hacking/checks/dictlist.py | 1,533 | Python |
# Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-11-30
# Python 3.4
"""
第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码
(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
"""
import uuid
def generate_key():
key_list = []
for i in range(200):
uuid_key = uuid.uuid3(uuid.NAMESPACE_DNS, str(uuid.uuid1()))
key_list.append(str(uuid_key).replace('-', ''))
return key_list
if __name__ == '__main__':
print(generate_key())
| 19.076923 | 68 | 0.663306 | [
"MIT"
] | 11deepak0209/python | renzongxian/0001/0001.py | 606 | Python |
from art import logo_blackjack
from replit import clear
import random
def deal_card():
"""Return random card"""
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
card = random.choice(cards)
return card
def calculate_score(cards):
"""Take a list of cards and return the score"""
if sum(cards) == 21 and len(cards) == 2:
return 0
if 11 in cards and sum(cards) > 21:
cards.remove(11)
cards.append(1)
return sum(cards)
def compare(current_score_of_user, current_score_of_computer):
if current_score_of_user > 21 and current_score_of_computer > 21:
return "You went over. You lose"
if current_score_of_user == current_score_of_computer:
return "DRAW"
elif current_score_of_computer == 0:
return "You lose. Opponent has a blackjack"
elif current_score_of_user == 0:
return "You win with blackjack"
elif current_score_of_user > 21:
return "You went over. You lose"
elif current_score_of_computer > 21:
return "Opponent went over. You win"
elif current_score_of_user > current_score_of_computer:
return "You win"
else:
return "You lose"
def play_game():
print(logo_blackjack)
user_cards = []
computer_cards = []
is_game_over = False
for i in range(2):
user_cards.append(deal_card())
computer_cards.append(deal_card())
while not is_game_over:
current_score_of_user = calculate_score(user_cards)
current_score_of_computer = calculate_score(computer_cards)
print(f"Your cards: {user_cards} and current score of yours: {current_score_of_user}")
print(f"Computer's first card: [{computer_cards[0]}]")
if current_score_of_user == 0 or current_score_of_computer == 0 or current_score_of_user > 21:
is_game_over = True
else:
want_card = input("To get another card type 'y', to pass type 'n': ")
if want_card == "y":
user_cards.append(deal_card())
else:
is_game_over = True
while current_score_of_computer != 0 and current_score_of_computer < 17:
computer_cards.append(deal_card())
current_score_of_computer = calculate_score(computer_cards)
print(f"Your final hand: {user_cards} and final score: {current_score_of_user}")
print(f"Computer's final hand: {computer_cards}, final score: {current_score_of_computer}")
print(compare(current_score_of_user, current_score_of_computer))
while input("Do you want to play a game of blackjack? Type 'y' or 'n': ") == "y":
clear()
play_game()
| 36.123288 | 102 | 0.665908 | [
"Apache-2.0"
] | Yunram/python_training | Programs/day_11_blackjack.py | 2,637 | Python |
#quest by zerghase
import sys
from com.l2jfrozen import Config
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "43_HelpTheSister"
COOPER=30829
GALLADUCCI=30097
CRAFTED_DAGGER=220
MAP_PIECE=7550
MAP=7551
PET_TICKET=7584
SPECTER=20171
SORROW_MAIDEN=20197
MAX_COUNT=30
MIN_LEVEL=26
class Quest (JQuest) :
def onEvent(self, event, st):
htmltext=event
if event=="1":
htmltext="30829-01.htm"
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event=="3" and st.getQuestItemsCount(CRAFTED_DAGGER):
htmltext="30829-03.htm"
st.takeItems(CRAFTED_DAGGER,1)
st.set("cond","2")
elif event=="4" and st.getQuestItemsCount(MAP_PIECE)>=MAX_COUNT:
htmltext="30829-05.htm"
st.takeItems(MAP_PIECE,MAX_COUNT)
st.giveItems(MAP,1)
st.set("cond", "4")
elif event=="5" and st.getQuestItemsCount(MAP):
htmltext="30097-06.htm"
st.takeItems(MAP,1)
st.set("cond","5")
elif event=="7":
htmltext="30829-07.htm"
st.giveItems(PET_TICKET,1)
st.setState(COMPLETED)
st.exitQuest(0)
return htmltext
def onTalk(self, npc, player):
htmltext="<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId=npc.getNpcId()
id=st.getState()
if id==CREATED:
if player.getLevel()>=MIN_LEVEL:
htmltext="30829-00.htm"
else:
st.exitQuest(1)
htmltext="<html><body>This quest can only be taken by characters that have a minimum level of %s. Return when you are more experienced.</body></html>" % MIN_LEVEL
elif id==STARTED:
cond=st.getInt("cond")
if npcId==COOPER:
if cond==1:
if not st.getQuestItemsCount(CRAFTED_DAGGER):
htmltext="30829-01a.htm"
else:
htmltext="30829-02.htm"
elif cond==2:
htmltext="30829-03a.htm"
elif cond==3:
htmltext="30829-04.htm"
elif cond==4:
htmltext="30829-05a.htm"
elif cond==5:
htmltext="30829-06.htm"
elif npcId==GALLADUCCI:
if cond==4 and st.getQuestItemsCount(MAP):
htmltext="30097-05.htm"
elif id==COMPLETED:
st.exitQuest(0)
htmltext="<html><body>This quest has already been completed.</body></html>"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
cond=st.getInt("cond")
if cond==2:
numItems,chance = divmod(100*Config.RATE_QUESTS_REWARD,100)
if st.getRandom(100) < chance :
numItems = numItems +1
pieces=st.getQuestItemsCount(MAP_PIECE)
if pieces + numItems >= MAX_COUNT :
numItems = MAX_COUNT - pieces
if numItems != 0:
st.playSound("ItemSound.quest_middle")
st.set("cond", "3")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(MAP_PIECE,int(numItems))
return
QUEST=Quest(43,qn,"Help The Sister!")
CREATED=State('Start', QUEST)
STARTED=State('Started', QUEST)
COMPLETED=State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(COOPER)
QUEST.addTalkId(COOPER)
QUEST.addTalkId(GALLADUCCI)
QUEST.addKillId(SPECTER)
QUEST.addKillId(SORROW_MAIDEN) | 28.919355 | 170 | 0.652259 | [
"Unlicense"
] | DigitalCoin1/L2SPERO | datapack/data/scripts/quests/43_HelpTheSister/__init__.py | 3,586 | Python |
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.nn.linear."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class LinearTest(parameterized.TestCase):
def test_dense(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 4.))
def test_dense_extra_batch_dims(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 4), 4.))
def test_dense_no_bias(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
use_bias=False,
kernel_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_is_dense_general(self):
x = jax.random.normal(random.PRNGKey(0), (5, 3))
dense_module = nn.Dense(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y1, _ = dense_module.init_with_output(dict(params=random.PRNGKey(1)), x)
dg_module = nn.DenseGeneral(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y2, _ = dg_module.init_with_output(dict(params=random.PRNGKey(1)), x)
np.testing.assert_allclose(y1, y2)
def test_dense_general_batch_dim_raises(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3, 2, 5))
with self.assertRaises(ValueError):
dg_module = nn.DenseGeneral(
features=4,
batch_dims=(0, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
dg_module.init_with_output(rng, x)
def test_dense_general_two_out(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dg_module = nn.DenseGeneral(
features=(2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 2), 4.))
def test_dense_general_two_in(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 2))
dg_module = nn.DenseGeneral(
features=3,
axis=(-2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 3), 5.))
def test_dense_general_batch_dim(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((2, 1, 3, 5))
state = {'counter': 0.}
def _counter_init(rng, shape, dtype, state):
del rng, dtype
state['counter'] += 1.
return jnp.full(shape, state['counter'])
counter_init = functools.partial(_counter_init, state=state)
dg_module = nn.DenseGeneral(
features=7,
axis=(3, -2),
batch_dims=0,
bias_init=initializers.ones,
kernel_init=counter_init,
)
y, _ = dg_module.init_with_output(rng, x)
target = np.concatenate(
[np.full((1, 1, 7), 16.), np.full((1, 1, 7), 31.)], axis=0)
np.testing.assert_allclose(y, target)
@parameterized.parameters([((-2, 3), (), 'bijk,jklm->bilm'),
((3, -2), (), 'bijk,jklm->bilm'),
((-2, 3), (0,), 'bijk,bjklm->bilm')])
def test_dense_general_vs_numpy(self, axis, batch_dims, einsum_expr):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((16, 8, 9, 10))
dg_module = nn.DenseGeneral(
features=(11, 12),
axis=axis,
batch_dims=batch_dims,
bias_init=initializers.ones,
kernel_init=initializers.normal(),
)
y, initial_params = dg_module.init_with_output(rng, x)
target = np.einsum(einsum_expr, x, initial_params['params']['kernel']) + 1.
np.testing.assert_allclose(y, target, atol=1e-6)
@parameterized.parameters([((3,),), (3,)])
def test_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_group_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 4))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
feature_group_count=2,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 2, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 7.))
@parameterized.parameters([((3,),), (3,)])
def test_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]]])
np.testing.assert_allclose(y, correct_ans)
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]])
np.testing.assert_allclose(y, correct_ans)
def test_embed(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.arange(4)[None]
dummy_embedding = jnp.broadcast_to(
jnp.arange(4)[..., None], (4, 3)).astype(jnp.float32)
embed_module = nn.Embed(
num_embeddings=4,
features=3,
embedding_init=lambda rng, shape, dtype: dummy_embedding,
)
y, initial_params = embed_module.init_with_output(rng, x)
np.testing.assert_allclose(y, dummy_embedding[None])
z = embed_module.apply(initial_params, jnp.ones((3,)), method=embed_module.attend)
np.testing.assert_allclose(z, 3. * jnp.arange(4))
def test_non_final_axis(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=1, name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (2, 8, 6))
def test_non_final_axes(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=(0, 1), name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (2, 4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (8, 6))
if __name__ == '__main__':
absltest.main()
| 34.315789 | 86 | 0.592216 | [
"Apache-2.0"
] | pschuh/flax | tests/linen/linen_linear_test.py | 10,432 | Python |
import os, sys, shutil, glob, math
import regex as re
from doconce import globals
from .doconce import read_file, write_file, doconce2format, handle_index_and_bib, preprocess
from .misc import option, help_print_options, check_command_line_options, system, _abort, \
find_file_with_extensions, folder_checker, doconce_version, _rmdolog, errwarn, debugpr
from .common import INLINE_TAGS, remove_code_and_tex
import json
from .ipynb import img2ipynb
from .html import movie2html
docstring_jupyterbook = ('Usage:\n'
'\033[1mdoconce jupyterbook <file>[.do.txt] [options]\033[0m\n'
'Create directories and files for Jupyter Book version: 0.8\n'
'\n'
'Example:\n'
'doconce jupyterbook filename.do.txt --sep=chapter --sep_section=subsection --show_titles\n')
_registered_cmdline_opts_jupyterbook = [
('-h', 'Show this help page'),
('--help', 'Show this help page'),
('--sep=', 'Specify separator for DocOnce file into jupyter-book chapters. [chapter|section|subsection]'),
('--sep_section=', 'Specify separator for DocOnce file into jupyter-book sections. '
'[chapter|section|subsection], optional'),
('--dest=', 'Destination folder for the content'),
('--dest_toc=', 'Destination folder for the _toc.yml file'),
('--show_titles', 'Print out the titles detected based on the separator headers. '
'This can be helpful for the file passed to the --titles option'),
('--titles=', 'File with page titles, i.e. titles in TOC on the left side of the page. Default is \'auto\': '
'assign titles based on the separator headers')
]
# Get the list of options for doconce jupyterbook
_legal_cmdline_opts_jupyterbook, _ = list(zip(*_registered_cmdline_opts_jupyterbook))
_legal_cmdline_opts_jupyterbook = list(_legal_cmdline_opts_jupyterbook)
# Get the list of opitions for doconce in general
_legal_command_line_options = [opt for opt, help in globals._registered_command_line_options]
def jupyterbook():
"""
Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro
This function is called directly from bin/doconce
"""
# Print help
if len(sys.argv) < 2:
doconce_version()
print(docstring_jupyterbook)
print("Try 'doconce jupyterbook --help' for more information.")
sys.exit(1)
if option('help') or '-h' in sys.argv:
print_help_jupyterbook()
sys.exit(1)
# Check options
# NB: _legal_command_line_options allows options defined in misc.py/global.py
if not check_command_line_options(1, option_list=_legal_cmdline_opts_jupyterbook + _legal_command_line_options):
_abort()
# Destination directories
dest = option('dest=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest = folder_checker(dest)
dest_toc = option('dest_toc=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest_toc = folder_checker(dest_toc)
# Get options
sep = option('sep=', default='section', option_list=_legal_cmdline_opts_jupyterbook)
sep_section = option('sep_section=', default='', option_list=_legal_cmdline_opts_jupyterbook)
globals.encoding = option('encoding=', default='')
titles_opt = option('titles=', default='auto', option_list=_legal_cmdline_opts_jupyterbook)
show_titles_opt = option('show_titles', default=False, option_list=_legal_cmdline_opts_jupyterbook)
# Check if the file exists, then read it in
dirname, basename, ext, filename = find_file_with_extensions(sys.argv[1], allowed_extensions=['.do.txt'])
if not filename:
errwarn('*** error: file %s does not exist' % globals.filename)
_abort()
globals.dirname = dirname
if dirname:
# cd into the DocOnce file's directory, then fix dest and dest_toc
os.chdir(dirname)
errwarn('*** doconce format now works in directory %s' % dirname)
# fix dest, dest_roc, and finally dirname
dest = os.path.relpath(dest or '.', start=dirname) + '/'
if dest.startswith('./'):
dest = dest[2:]
dest_toc = os.path.relpath(dest_toc or '.', start=dirname) + '/'
if dest_toc.startswith('./'):
dest_toc = dest_toc[2:]
#dirname = ''
globals.filename = filename
globals.dofile_basename = basename
# NOTE: The following is a reworking of code from doconce.py > format_driver
_rmdolog() # always start with clean log file with errors
preprocessor_options = [arg for arg in sys.argv[1:]
if not arg.startswith('--')]
format = 'pandoc'
filename_preprocessed = preprocess(globals.filename, format, preprocessor_options)
# Run parts of file2file code in format_driver.
# Cannot use it directly because file2file writes to file. Consider to modularize file2file
filestr = read_file(filename_preprocessed, _encoding=globals.encoding)
# Remove pandoc's title/author/date metadata, which does not get rendered appropriately in
# markdown/jupyter-book. Consider to write this metadata to the _config.yml file
for tag in 'TITLE', 'AUTHOR', 'DATE':
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing heading with %s. Consider to place it in _config.yml' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
# Remove TOC tag
tag = 'TOC'
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing the %s tag' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
# Format citations and add bibliography in DocOnce's html format
pattern_tag = r'[\w _\-]*'
pattern = r'cite(?:(\[' + pattern_tag + '\]))?\{(' + pattern_tag + ')\}'
if re.search(pattern, filestr):
filestr = handle_index_and_bib(filestr, 'html')
# Delete any non-printing characters, commands, and comments
# Using regex:
m = re.search(r'\A\s*^(?:#.*\s*|!split\s*)*', filestr, re.MULTILINE)
if m:
filestr = filestr[m.end():]
# No-regex method. This could be an alternative to the previous regex
'''skip = ''
for line in filestr.splitlines():
if not line.strip():
skip += line + '\n'
elif not line.startswith('#') and not line.startswith('!'):
break
else:
skip += line +'\n'
filestr = filestr[len(skip):]
'''
# Description of relevant variables
# sep : Divide the text in jupyter-book chapters, see --sep
# chapters : ['whole chapter 1', 'whole chapter 2', 'summary']
# chapter_titles : ['Chapter 1', 'Chapter 2', 'Summary']
# chapter_titles_auto : ['Header 1', 'Header 2', 'Last Header in DocOnce file']
# chapter_basenames : ['01_mybook', '02_mybook', '03_mybook']
#
# If sep_section is not empty, these variables become relevant
# sep_section : Subdivide the jupyter-book chapters in sections, see --sep_section
# sec_list : [['subsection1','subsection2], ['subsection1'] , []]
# sec_title_list : [['Subsection 1.1', 'Subsection 1.2'], ['Subsection 2.1'], []]
# sec_title_list_auto : [['Subheader 1.1', 'Subheader 1.2'], ['Subheader 2.1'], ['Last Subheader in DocOnce file']]
# sec_basename_list : [['01_01_mybook', '01_02_mybook'], ['02_01_mybook'], []]
# Split the DocOnce file in jupyter-book chapters
chapters = split_file(filestr, INLINE_TAGS[sep])
sec_list = [[]] * len(chapters)
sec_title_list_auto = None
# Extract all jupyter-book sections based on --sep_section
if sep_section:
for c, chap in enumerate(chapters):
# Any text before the first jupyter-book section is part of a jupyter-book chapter,
# the rest consists in jupyter-book sections
m = re.search(INLINE_TAGS[sep_section], chap, flags=re.MULTILINE)
if m:
pos_sep_section = m.start() if m else 0
# Write text before the first jupyter-book section as chapter
chapters[c] = split_file(chap[:pos_sep_section:], INLINE_TAGS[sep_section])[0]
# The text after the first match of sep_section are jupyter-book sections
sec_list[c] = split_file(chap[pos_sep_section:], INLINE_TAGS[sep_section])
# Get titles from title file in options
chapter_titles, sec_title_list = read_title_file(titles_opt, chapters, sec_list)
# Extract and write titles to each jupyter-book chapter/section.
# Also get the basenames for the files to be created later
def int_formatter(_list):
return '%0' + str(max(2, math.floor(math.log(len(_list) + 0.01, 10)) + 1)) + 'd_'
chapter_formatter = int_formatter(chapters)
chapters, chapter_titles, chapter_titles_auto = titles_to_chunks(chapters, chapter_titles, sep=sep,
chapter_formatter=chapter_formatter, tags=INLINE_TAGS)
chapter_basenames = [chapter_formatter % (i + 1) + basename for i in range(len(chapters))]
sec_basename_list = [[]] * len(chapters)
if sep_section:
# The following contains section titles extracted automatically
sec_title_list_auto = [[]] * len(sec_title_list)
for c, sections in enumerate(sec_list):
section_formatter = chapter_formatter % (c + 1) + int_formatter(sections)
sec_list[c], section_titles, section_titles_auto = titles_to_chunks(sections, sec_title_list[c],
sep=sep_section, sep2=sep,
chapter_formatter=section_formatter, tags=INLINE_TAGS)
sec_title_list[c] = section_titles
sec_title_list_auto[c] = section_titles_auto
sec_basename_list[c] = [section_formatter % (i + 1) + basename for i in range(len(sections))]
# Print out the detected titles if --show_titles was used
if show_titles_opt:
if sep_section == '':
print('\n===== Titles detected using the %s separator:' % sep)
else:
print('\n===== Titles detected using the %s and %s separators:' % (sep, sep_section))
for c in range(len(chapter_titles_auto)):
print(chapter_titles_auto[c])
if sep_section:
for s in range(len(sec_title_list_auto[c])):
print(sec_title_list_auto[c][s])
print('=====')
# Description of relevant variables
# all_texts : ['====== Chapter 1 ======\n Some text', '====== Subsection 1.1 ======\n Some text', ..]
# all_basenames : ['01_mybook','01_01_mybook','01_02_mybook','02_mybook']
# all_suffix : ['.md','.md','.ipynb','.md']
# all_fnames : ['01_mybook.md','01_01_mybook.md','01_02_mybook.ipynb','02_mybook.md']
# all_titles : ['Chapter 1','Subsection 1.1', 'Subsection 1.2','Chapter 2']
# all_nestings : [0, 1, 1, 0] # 0 or 1 for jupyter-book chapters or sections, respectively
#
# filestr_md : DocOnce input formatted to pandoc
# filestr_ipynb : DocOnce input formatted to ipynb
# all_texts_md : list of all chapters and sections from filestr_md
# all_texts_ipynb : list of all chapters and sections from filestr_ipynb
# all_texts_formatted : list of chapters and sections from filestr_ipynb
# Flatten all texts, basenames, titles, etc for jupyter-book chapters and sections
all_texts = []
all_basenames = []
all_titles = []
all_nestings = []
for c in range(len(chapters)):
all_texts.append(chapters[c])
all_basenames.append(chapter_basenames[c])
all_titles.append(chapter_titles[c])
all_nestings.append(0)
for s in range(len(sec_list[c])):
all_texts.append(sec_list[c][s])
all_basenames.append(sec_basename_list[c][s])
all_titles.append(sec_title_list[c][s])
all_nestings.append(1)
# Create markdown or ipynb filenames for each jupyter-book chapter section
all_suffix = identify_format(all_texts)
all_fnames = [b + s for b, s in zip(all_basenames,all_suffix)]
# Mark the beginning of each jupyter-book chapter and section with its filename in a comment
all_markings = list(map(lambda x: '!split\n<!-- jupyter-book %s -->\n' % x, all_fnames))
all_texts = [m + t for m, t in zip(all_markings, all_texts)]
# Merge all jupyter-book chapters and sections back to a single DocOnce text.
# Then convert to pandoc and ipynb
filestr = ''.join(all_texts)
filestr_md, bg_session = doconce2format(filestr, 'pandoc')
filestr_ipynb, bg_session = doconce2format(filestr, 'ipynb')
# Split the texts (formatted to md and ipynb) to individual jupyter-book chapters/sections
all_texts_md = split_file(filestr_md, '<!-- !split -->\n<!-- jupyter-book .* -->\n')
all_texts_ipynb = split_ipynb(filestr_ipynb, all_fnames)
if len(all_texts_md) != len(all_texts_ipynb):
errwarn('*** error : the lengths of .md and .ipynb files should be the same')
_abort()
# Flatten the formatted texts
all_texts_formatted = [[]] * len(all_fnames)
for i in range(len(all_fnames)):
all_texts_formatted[i] = all_texts_md[i]
if all_fnames[i].endswith('.ipynb'):
all_texts_formatted[i] = all_texts_ipynb[i]
# Fix all links whose destination is in a different document
# e.g. <a href="#Langtangen_2012"> to <a href="02_jupyterbook.html#Langtangen_2012">
all_texts_formatted = resolve_links_destinations(all_texts_formatted, all_basenames)
# Fix the path of FIGUREs and MOVIEs.
# NB: at the time of writing (03-2021) movies are not supported by Jupyter Book
all_texts_formatted = [fix_media_src(t, '', dest) for t in all_texts_formatted]
# Write chapters and sections to file
for i in range(len(all_texts_formatted)):
write_file(all_texts_formatted[i], dest + all_fnames[i], _encoding=globals.encoding)
# Create the _toc.yml file
yml_text = create_toc_yml(all_basenames, titles=all_titles, nesting_levels=all_nestings, dest=dest, dest_toc=dest_toc)
write_file(yml_text, dest_toc + '_toc.yml', _encoding=globals.encoding)
print('\nWrote _toc.yml and %d chapter files to these folders:\n %s\n %s' %
(len(all_fnames), os.path.realpath(dest_toc), os.path.realpath(dest)))
def split_file(filestr, separator):
"""Split the text of a doconce file by a regex string.
Split the text of a doconce file by a separator regex (e.g. the values of
the INLINE_TAGS dictionary from common.py) and return the chunks of text.
Note that the first chunk contains any text before the first separator.
:param str filestr: text string
:param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py
:return: list of text chunks
:rtype: list[str]
"""
chunks = []
c = re.compile(separator, flags=re.MULTILINE)
if re.search(c, filestr) is None:
print('pattern of separator not found in file')
chunks.append(filestr)
else:
pos_prev = 0
for m in re.finditer(c, filestr):
if m.start() == 0:
continue
# Skip separators used for illustration of doconce syntax inside !bc and !ec directives
if filestr[:m.start()].rfind('!bc') > filestr[:m.start()].rfind('!ec'):
errwarn('*** warning : skipped a separator, '
'which appeared to be inside the !bc and !ec directives')
continue
chunk = filestr[pos_prev:m.start()]
chunks.append(chunk)
pos_prev = m.start()
chunk = filestr[pos_prev:]
chunks.append(chunk)
return chunks
def split_ipynb(ipynb_text, filenames):
"""Split a Jupyter notebook based on filenames present in its blocks
Given the text of a Jupyter notebook marked with the output filename
in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of
Jupyter notebooks separated accordingly.
:param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->
:param list[str] filenames: filenames
:return: ipynb_texts with the ipynb code for each block
:rtype: list[str]
"""
# An ipynb is a python dictionary
ipynb_dict = json.loads(ipynb_text)
cells = ipynb_dict.pop('cells')
# Find the markings with filename in the ipynb blocks
ind_fname = []
block_sources = [''.join(c['source']) for c in cells]
for fname in filenames:
marking = '<!-- jupyter-book % s -->' % fname
for b, block in enumerate(block_sources):
if block.find(marking) > -1:
ind_fname.append(b)
break
if len(ind_fname) != len(filenames):
errwarn('*** error : could not find all markings in ipynb')
_abort()
# For each file create a dictionary with the relevant ipynb blocks, then convert to text
ipynb_texts = [''] * len(filenames)
for i, ind_start in enumerate(ind_fname):
ind_end = None
if i + 1 < len(ind_fname):
ind_end = ind_fname[i + 1]
block_dict = ipynb_dict.copy()
block_dict['cells'] = cells[ind_start:ind_end]
ipynb_texts[i] = json.dumps(block_dict, indent=1, separators=(',', ':'))
return ipynb_texts
def read_title_file(titles_opt, chapters, sec_list):
"""Helper function to read and process a file with titles
Read the file containing titles and process them according to the number of jupyter-book chapters and sections.
len(sec_list) should be the same as len(chapters), and its elements can be empty lists
:param str titles_opt: 'auto' or file containing titles
:param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters
:param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.
:return: tuple with chapter and section titles
:rtype: (list[str], list[list[str]])
"""
chapter_titles = []
sec_title_list = [[]] * len(chapters)
if titles_opt != 'auto':
chapter_titles = [''] * len(chapters)
input_titles = read_to_list(titles_opt)
for c in range(len(chapters)):
chapter_titles[c] = input_titles.pop(0) if len(input_titles) else ''
section = []
for _ in range(len(sec_list[c])):
section.append(input_titles.pop(0) if len(input_titles) else '')
sec_title_list[c] = section
if len(input_titles):
errwarn('*** warning : number of titles is larger than chapters and sections detected. '
'These titles will be ignored')
return chapter_titles, sec_title_list
def titles_to_chunks(chunks, title_list, sep, sep2=None, chapter_formatter='%02d_', tags=INLINE_TAGS):
"""Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)
Jupyter-book files must have a # header with the title (see doc jupyter-book >
Types of content source files > Rules for all content types). This function
extracts title from the title file or from the headers given by the separator
provided in the options. If no title is found, provide a default title as e.g.
03_mydoconcefile.
:param list[str] chunks: list of text string
:param list[str] title_list: titles for the chunks. Empty if --titles is us
:param str sep: separator: chapter|section|subsection
:param str sep2: second separator in case the first fails: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:param str chapter_formatter: formatter for default filenames
:return: tuple with the chunks of text having a # header, titles, titles detected
:rtype: (list[str], list[str], list[str])
"""
title_list_out = title_list.copy()
# title list can be empty (when --titles='auto')
if not len(title_list_out):
title_list_out = [''] * len(chunks)
title_list_detected = [''] * len(chunks)
# Process each chunk: detect and write title in the header of a chapter/section
for i, chunk in enumerate(chunks):
title = ''
# Try to find and remove any title from headers in each chunk
if title == '':
chunk, title = create_title(chunk, sep, tags)
# Same, this time using the second optional separator
if title == '' and sep2:
chunk, title = create_title(chunk, sep2, tags)
# Set default title
if title == '':
title = chapter_formatter % (i + 1) + globals.dofile_basename
# Keep any detected title before overriding them with the file indicated in --titles
title_list_detected[i] = title
# Use title from the titles files. This gets skipped if there is no title file
if i < len(title_list):
# Skip any empty line in title file
if title_list[i]:
title = title_list[i]
# Write to title list and chunk
# NB: create_title above removed any detected title from chunk, thus avoiding duplicate titles
title_list_out[i] = title
chunk = '=' * 9 + ' ' + title + ' ' + '=' * 9 + '\n' + chunk
chunks[i] = chunk
return chunks, title_list_out, title_list_detected
def create_title(chunk, sep, tags):
"""Helper function to allow doconce jupyterbook to automatically assign titles in the TOC
If a chunk of text starts with the section specified in sep, lift it up
to a chapter section. This allows doconce jupyterbook to automatically use the
section's text as title in the TOC on the left
:param str chunk: text string
:param str sep: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:return: tuple with the chunk stripped of its section header, and title
:rtype: (str, str)
"""
title = ''
m = re.search(tags[sep], chunk, flags=re.MULTILINE)
if m and m.start() == 0:
name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3}
s = name2s[sep]
header_old = '=' * s
pattern = r'^ *%s +(.+?) +%s' % (header_old, header_old)
# Get the title
mt = re.match(pattern, chunk)
if mt:
title = mt.group(1)
chunk = re.sub(pattern, '', chunk, flags=re.MULTILINE, count=1)
return chunk, title
def identify_format(text_list):
"""Identify the appropriate formats to convert a list of DocOnce texts.
Given a list of DocOnce texts, check if they contain code. If so, return the suffix
'.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for
the pandoc markdown format).
:param list[str] text_list: list of strings using DocOnce syntax
:return: list of formats
:rtype: list[str]
"""
chunk_formats = [''] * len(text_list)
for i, text in enumerate(text_list):
# Convert each text to pandoc, or to ipynb if the text contains any computation
format = 'pandoc'
_filestr, code_blocks, code_block_types, tex_blocks = \
remove_code_and_tex(text, format)
if len(code_blocks):
format = 'ipynb'
chunk_formats[i] += '.md' if format == 'pandoc' else '.ipynb'
return chunk_formats
def create_toc_yml(basenames, nesting_levels, titles, dest='./', dest_toc='./', section_paths=None, section_titles=None):
"""Create the content of a _toc.yml file
Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file
:param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.
strings that can be used after the `file:` section in a _toc.yml
:param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used
after the `title:` section in a _toc.yml
:param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book
chapters or sections, respectively
:param str dest: destination folder for _toc.yml
:param str dest_toc: destination folder for the chapter files
:return: content of a _toc.yml file
:rtype: str
"""
def escape_chars(title):
"""Wrap title in quotes if it contains colons, asterisks, bacticks"""
if re.search(':', title) or re.search('\*', title) or re.search('\`', title):
title = title.replace('"', '\\"')
title = '"' + title + '"'
return title
# Get the relative path between the destination folders
relpath = os.path.relpath(dest, start=dest_toc)
if relpath == '.':
relpath = ''
else:
relpath += '/'
# Produce the text for _toc.yml
yml_text = ""
nesting_prev = 0
for i, cfname in enumerate(basenames):
ctitle = escape_chars(titles[i])
if ctitle:
nesting = nesting_levels[i]
if nesting == 0:
yml_text += '\n'
yml_text += yml_titledpage(relpath + cfname, ctitle, numbered=False)
else:
# Write the sections
if nesting_prev == 0:
yml_text += yml_section(nesting_level=nesting)
yml_text += yml_nested_section(relpath + cfname, ctitle, nesting_level=nesting)
nesting_prev = nesting
yml_text = yml_text.strip('\n')
return yml_text
def print_help_jupyterbook():
"""Pretty print help string and command line options
Help function to print help and formatted command line options for doconce jupyterbook
"""
print(docstring_jupyterbook)
print('Options:')
help_print_options(cmdline_opts=_registered_cmdline_opts_jupyterbook)
def read_to_list(file):
"""Read the content of a file to list
Verify the existence of a file, then read it to a list by
stripping newlines. The function aborts the program if the file does not exist.
:param str file: Path to an existing file
:return: list of strings
:rtype: list[str]
"""
if not os.path.isfile(file):
errwarn('*** error: file "%s" does not exist!' % file)
_abort()
with open(file, 'r') as f:
out = f.read().splitlines()
return out
def get_link_destinations(chunk):
"""Find any target of a link in HTML code
Use regex to find tags with the id or name attribute, which makes them a possible target of a link
:param str chunk: text string
:return: destinations, destination_tags
:rtype: Tuple[list[str], list[str]]
"""
destinations, destination_tags = [], []
# html links. label{} has already been converted
pattern_tag = r'[\w _\-:]'
pattern_backslash = '[\\\]'
pattern = r'<' + pattern_tag + \
'+ (id|name)=' + pattern_backslash + '["\']' + \
'(' + pattern_tag + '+)' + pattern_backslash + '["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return destinations, destination_tags
def fix_links(chunk, tag2file):
"""Find and fix the the destinations of hyperlinks using HTML or markdown syntax
Fix any link in a string text so that they can target a different html document.
First use regex on a HTML text to find any HTML or markdown hyperlinks
(e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the
filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">)
:param str chunk: text string
:param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook'
:return: chunk with fixed links
:rtype: str
"""
chunk_out = chunk
# html links
pattern_tag = r'[\w _\-:]'
pattern = r'<' + pattern_tag + '+ href=[\\\]{0,2}["\']#(' + pattern_tag + '+)[\\\]{0,2}["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' +tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
# markdown links
pattern = r'\[' + pattern_tag + '+\]\(#(' + pattern_tag + '+)\)'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' + tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out
def resolve_links_destinations(chunks, chunk_basenames):
"""Fix links in jupyter-book chapters/sections so that they can target destinations in other files
Prepend a filename to all links' destinations e.g. <a href="#Langtangen_2012"> becomes
<a href="02_jupyterbook.html#Langtangen_2012">
:param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections
:param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections
:return: chunks with corrected links
:rtype: Tuple[list[str], list[list[str]]]
"""
# Flatten the texts and filenames, then get the basenames from filenames
def strip_end(text, suffix):
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
return text
all_sects = chunks #+ flatten(sec_list)
all_basenames = chunk_basenames #+ flatten(sec_basename_list)
all_basenames = list(map(lambda fname: strip_end(fname, '.md'), all_basenames))
all_basenames = list(map(lambda fname: strip_end(fname, '.ipynb'), all_basenames))
# Find all link destinations and create a dictionary tag2file[tag] = destination file
tag2file = {}
for i in range(len(all_sects)):
ch_destinations, ch_destination_tags = get_link_destinations(all_sects[i])
basename_list = [all_basenames[i]] * len(ch_destinations)
tag2file.update(zip(ch_destination_tags, basename_list))
# Fix all href in links by prepending the destination filename
for c in range(len(chunks)):
chunks[c] = fix_links(chunks[c], tag2file)
return chunks
def fix_media_src(filestr, dirname, dest):
"""Fix the (relative) path to any figure and movie in the DocOnce file.
The generated .md and .ipynb files will be created in the path passed to `--dest`.
This method fixes the paths of the image and movie files so that they can be found
in generated .md and .ipynb files.
:param str filestr: text string
:param str dirname: Path to an existing folder
:param str dest: directory name
:return: filestr with new paths
:rtype: str
"""
patterns = [
# movies in .md and .ipynb. NB: jupyterbook does not support movies
movie2html['movie_regex'],
# images in .md
r'\!\[<p><em>(.*)</em></p>\]\((.*)\)',
# images in .ipynb. See ipynb.py
img2ipynb['imgtag_regex'],
# images in MarkDown syntax
img2ipynb['md_regex'],
# commented images and movies in ipynb. See ipynb.py
r'<!-- (?:dom:)(FIGURE|MOVIE): \[(.*)',
# commented images in md
r'<!-- <(\w+) src="(.*)" .*>(?=[<|\\n])',
]
filestr_out = filestr
for i,pattern in enumerate(patterns):
for m in re.finditer(pattern, filestr):
match = m.group()
tag = m.group(1)
src = m.group(2)
# Warn that FIGUREs cannot work in Jupyter Book
if pattern == movie2html['movie_regex']:
errwarn('*** warning : To make images work consider to add this extensions to _config.yml:\n',
('parse:\n'
' myst_enable_extensions:\n'
' - html_image\n'))
if not src.startswith('/'):
if dirname != '' and not dirname.endswith('/'):
dirname += '/'
src_new = os.path.relpath(dirname + src, start=dest)
replacement = match.replace(src, src_new, 1)
filestr_out = filestr_out.replace(match, replacement, 1)
return filestr_out
def yml_file(file):
return "- file: %s\n\n" % file
def yml_untitledpage(file, numbered=False):
return "- file: %s\n numbered: %s\n" % (file, str(numbered).lower())
def yml_titledpage(file, title, numbered=False):
return "- file: %s\n title: %s\n numbered: %s\n" % (file, title, str(numbered).lower())
def yml_section(nesting_level=1):
return "%ssections:\n" % (' ' * nesting_level)
def yml_nested_section(file, title, nesting_level=1):
return '%s - file: %s\n' % (' ' * nesting_level, file) + \
'%s title: %s\n' % (' ' * nesting_level, title)
def yml_part(part, *files):
yml = "- part: %s\n chapters:\n" % part
for file in files:
yml += ' - file: %s\n' % file
return yml + '\n'
def yml_ext_link(url, nesting_level=0, numbered=False):
return "%s- external: %s\n numbered: %s\n" % (url, ' ' * nesting_level, numbered)
def yml_header(header):
return "- header: %s\n" % header
def yml_chapter(file, title, sections, numbered='false'):
return "- title: %s\n file: %s\n numbered: %s\n sections: %s\n" % \
(title, file, numbered, sections)
| 46.156379 | 125 | 0.642118 | [
"BSD-3-Clause"
] | aless80/doconce | lib/doconce/jupyterbook.py | 33,648 | Python |
from collections import defaultdict
from hsst.utility import search
from hsst.utility.graph import SemanticGraph
class SubgraphEnumeration(object):
def __init__(self, graph, node_set_size_limit=0):
self.full_node_set = graph.nodes
self.full_edge_set = graph.edges
self.current_node_set = set()
self.current_edge_set = set()
self.visited_states = set()
self.subgraphs = []
self.node_set_size_limit = node_set_size_limit
# Create fast lookup structures
self.edges_by_source = defaultdict(set)
self.edges_by_destination = defaultdict(set)
self.edges_by_both = defaultdict(set)
self.labels = defaultdict(list)
for edge in self.full_edge_set:
self.labels[(edge.from_node, edge.to_node)].append(edge)
self.edges_by_source[edge.from_node].add(edge.to_node)
self.edges_by_destination[edge.to_node].add(edge.from_node)
self.edges_by_both[edge.from_node].add(edge.to_node)
self.edges_by_both[edge.to_node].add(edge.from_node)
def generate_moves(self):
# Generate all possible moves
# Each move consists of a single node and the set of edges that connect that node to the nodes
# in the currentNodeSet E.g. ( node, { (label1, node, node1), (label2, node2, node) ... } )
# Moves are temporarily stored as a dictionary so that the full set of edges associated with each move
# can be constructed
moves = []
temporary_moves = {}
# Check if the limit for the currentNodeSet size has been reached
if 0 < self.node_set_size_limit <= len(self.current_node_set):
return moves
# The initial step is handled separately
if not self.current_node_set:
for node in self.full_node_set:
moves.append((node, set()))
return moves
# The set of possible nodes consists of nodes that are not yet in the currentNodeSet
possible_nodes = self.full_node_set - self.current_node_set
# For every possible node, we need to check that it shares an edge with a node in the currentNodeSet
# Otherwise we would violate the 'connected' constraint
for possible_node in possible_nodes:
destination_nodes = self.edges_by_source[possible_node] & self.current_node_set
source_nodes = self.edges_by_destination[possible_node] & self.current_node_set
if len(destination_nodes) > 0 or len(source_nodes) > 0:
# There is at least one node in the current node set that we can connect the possible_node to
# Check if this state has been explored already
if self.id(node=possible_node) in self.visited_states:
continue
# If not, it is an acceptable move and we just need to construct the edge set that connects
# the possible_node to the current node set
edges = set(
edge for source_node in source_nodes for edge in self.labels[(source_node, possible_node)]) | \
set(edge for destination_node in destination_nodes for edge in
self.labels[(possible_node, destination_node)])
temporary_moves[possible_node] = edges
for move in temporary_moves:
moves.append((move, temporary_moves[move]))
return moves
def move(self, move):
# Move is a tuple (node, edge_set)
node, edge_set = move
self.current_node_set.add(node)
self.current_edge_set |= edge_set
self.visited_states.add(self.id())
self.subgraphs.append((self.current_node_set.copy(), self.current_edge_set.copy()))
def undo_move(self, move):
# Move is a tuple (node, edge_set)
node, edge_set = move
self.current_node_set.remove(node)
self.current_edge_set -= edge_set
def solved(self):
return False
def id(self, node=None):
if node:
return " ".join(str(x) for x in sorted(self.current_node_set | {node}, key=lambda x: x.node_id))
else:
return " ".join(str(x) for x in sorted(self.current_node_set, key=lambda x: x.node_id))
def enumerate_dfs_subgraphs(graph, df_limit=100):
enumeration = SubgraphEnumeration(graph, node_set_size_limit=df_limit)
search.df(enumeration, df_limit)
return set(SemanticGraph(nodes, edges, nonterminal_count=0) for nodes, edges in enumeration.subgraphs)
| 38.898305 | 115 | 0.652505 | [
"MIT"
] | matichorvat/hsst | hsst/utility/dfs_subgraph_enumeration.py | 4,590 | Python |
import os
import h5py
import numpy as np
from keras import backend as K
from keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \
Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D
from keras import regularizers
from keras.layers import Average as KerasAverage
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
from keras.engine.topology import Layer
from .layers import LayerNormalization, CustomSoftmax
from .tf_implementations.loss_functions import loss_factory
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(
x if x != -1 else None
for x in self.target_shape
)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == "tensorflow":
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError("TopKAverage is not implemented for "
" %s backend" % (K.backend(),))
def reducer_factory(reducer, k=3):
# Set the type of the reducer to be used
if reducer == "max":
return Max()
elif reducer == "average":
return Average()
elif reducer == "topK":
return TopKAverage(k)
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(
K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),
K.floatx()
))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization()
])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization()
])
def create_dilated_cnn_receptive_field_25(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_dilated_cnn_receptive_field_25_with_tanh(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([
Conv2D(filters=32, kernel_size=5, input_shape=input_shape),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2))
])
def cnn_factory(name):
cnn_factories = {
"simple_cnn": create_simple_cnn,
"simple_cnn_ln": create_simple_cnn_ln,
"dilated_cnn_receptive_field_25":
create_dilated_cnn_receptive_field_25,
"dilated_cnn_receptive_field_25_with_tanh":
create_dilated_cnn_receptive_field_25_with_tanh,
"hartmann_cnn": create_hartmann_cnn
}
return cnn_factories[name]
def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):
# Set the type of optimizer to be used
if optimizer == "Adam":
return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)
elif optimizer == "SGD":
return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,
clipvalue=clipvalue)
def kernel_regularizer_factory(regularizer_factor):
if regularizer_factor == 0.0:
return None
else:
return regularizers.l2(regularizer_factor)
def build_simple_cnn(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="mse",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
# TODO: Maybe change this to 3, because we finally need only the
# patch_shape?
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
model = create_cnn(
input_shape=(None, None, C),
kernel_regularizer=weight_decay
)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss)
)
# If there is a weight file specified load the weights
if weight_file:
try:
f = h5py.File(weight_file, "r")
keys = [os.path.join(model.name, w.name)
for l in model.layers for w in l.weights]
weights = [f[os.path.join("model_weights", k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="emd",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
# Create the two stream inputs
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
# Reshape them for input in the CNN
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
# Create the CNN and extract features from both streams
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
# Compute a kind of similarity between the features of the two streams
x = Dot(axes=-1, normalize=(merge_layer == "cosine-similarity"))([x1, x2])
# Reshape them back into their semantic shape (depth planes, patches, etc)
x = TotalReshape((-1, D, N))(x)
# Compute the final similarity scores for each depth plane
x = reducer_factory(reducer)(x)
# Compute the final output
y = Activation("softmax")(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss),
metrics=["accuracy", mae, mde]
)
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def build_hartmann_network(
input_shape,
create_cnn=create_hartmann_cnn,
optimizer="SGD",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss=None,
reducer=None,
merge_layer=None,
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 3
# Unpack the input shape to make the code more readable
H, W, C = input_shape
# Create the feature extracting CNN
cnn = create_hartmann_cnn(input_shape=(None, None, C))
# Create the similarity CNN
sim = Sequential([
Conv2D(
filters=2048,
kernel_size=5,
input_shape=K.int_shape(cnn.output)[1:]
),
Activation("relu"),
Conv2D(filters=2048, kernel_size=1),
Activation("relu"),
Conv2D(filters=2, kernel_size=1),
Activation("softmax")
])
# Create the joint model for training
x_in = [Input(shape=input_shape) for i in range(5)]
x = [cnn(xi) for xi in x_in]
x = KerasAverage()(x)
y = sim(x)
model = Model(inputs=x_in, outputs=y)
# Compile all the models
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss="categorical_crossentropy",
metrics=["accuracy"]
)
cnn.compile("sgd", "mse") # Just so that we can run predict()
sim.compile("sgd", "mse")
# Attach the cnn and sim to the model in case someone wants to use them
model.cnn = cnn
model.sim = sim
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def get_nn(name):
models = {
"simple_cnn": build_simple_cnn,
"simple_nn_for_training": build_simple_nn_for_training,
"hartmann": build_hartmann_network
}
return models[name]
| 26.539583 | 79 | 0.605464 | [
"MIT"
] | paschalidoud/raynet | raynet/models.py | 12,739 | Python |
from models import Supervisor
import unittest
class SupervisorTestCase(unittest.TestCase):
def setUp(self):
self.supervisor = Supervisor.login('Mohammad', '1234', '0123456')
self.sample = Supervisor.sample()
def test_all_data(self):
self.assertIsInstance(self.supervisor, Supervisor,
"Sample does not return proper instance")
self.assertTrue(hasattr(self.supervisor, 'username'),
"Instance does not have username")
self.assertTrue(hasattr(self.supervisor, 'password'),
"Instance does not have password")
self.assertTrue(hasattr(self.supervisor, 'phone_number'),
"Instance does not have phone_number")
self.assertFalse(self.sample.logged_in,
"Login is not false by default")
def test_supervisor_protected_method(self):
self.assertIsNone(self.sample.protected(),
"Not raised on protected method")
self.assertListEqual(self.supervisor.protected(), [1, 2, 3],
"Protected data do not match")
| 41.357143 | 73 | 0.609672 | [
"MIT"
] | mhgzadeh/unit-testing-python | main.py | 1,158 | Python |
"""
module init
"""
from flask import Flask
<<<<<<< HEAD
from config import config_options
from flask_sqlalchemy import SQLAlchemy
import os
=======
from config import DevelopmentConfig
from .views import orders_blue_print
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
def create_app(DevelopmentConfig):
"""
Function create_app:
creates app and gives it the import name
holds the configuration being used.
registers the orders blueprint
:return: app:
"""
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
app.register_blueprint(orders_blue_print)
<<<<<<< HEAD
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# set the configurations
app.config.from_object(os.environ['APP_SETTINGS'])
db=SQLAlchemy(app)
# initialiaze the database
db.init_app(app)
with app.app_context():
from .import routes
db.create_all
# register your blueprints here
from app.main import main
from app.auth import auth
app.register_blueprint(main)
app.register_blueprint(auth)
@app.route('/')
def hello():
return "Hello World!"
return app
=======
return app
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
| 21.118644 | 56 | 0.693419 | [
"MIT"
] | codebr3ak/Fast-Food-Fast | app/__init__.py | 1,246 | Python |
# DO NOT EDIT THIS FILE!
#
# All configuration must be done in the `configuration.py` file.
# This file is part of the Peering Manager code and it will be overwritten with
# every code releases.
from __future__ import unicode_literals
import os
import socket
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured
try:
from peering_manager import configuration
except ImportError:
raise ImproperlyConfigured(
'Configuration file is not present. Please define peering_manager/configuration.py per the documentation.')
VERSION = '0.99-dev'
SECRET_KEY = getattr(configuration, 'SECRET_KEY', '')
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS', [])
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
DEBUG = getattr(configuration, 'DEBUG', False)
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 20)
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
MY_ASN = getattr(configuration, 'MY_ASN', -1)
if MY_ASN == -1:
raise ImproperlyConfigured(
'The MY_ASN setting must be set to a valid AS number.')
# PeeringDB URLs
PEERINGDB_API = 'https://peeringdb.com/api/'
PEERINGDB = 'https://peeringdb.com/asn/'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
from peering_manager.ldap_config import *
LDAP_CONFIGURED = True
except ImportError:
LDAP_CONFIGURED = False
# If LDAP is configured, load the config
if LDAP_CONFIGURED:
try:
import ldap
import django_auth_ldap
# Prepend LDAPBackend to the default ModelBackend
AUTHENTICATION_BACKENDS = [
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
except ImportError:
raise ImproperlyConfigured(
'LDAP authentication has been configured, but django-auth-ldap is not installed. You can remove peering_manager/ldap_config.py to disable LDAP.'
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'django_tables2',
'peering',
'peeringdb',
'utils',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'utils.middleware.RequireLoginMiddleware',
]
ROOT_URLCONF = 'peering_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utils.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'peering_manager.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Django logging
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s | %(levelname)s | %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peering-manager.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'peeringdb_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peeringdb.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'napalm_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/napalm.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
},
'loggers': {
'peering.manager.peering': {
'handlers': ['file'],
'level': 'DEBUG',
},
'peering.manager.peeringdb': {
'handlers': ['peeringdb_file'],
'level': 'DEBUG',
},
'peering.manager.napalm': {
'handlers': ['napalm_file'],
'level': 'DEBUG',
},
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication URL
LOGIN_URL = '/{}login/'.format(BASE_PATH)
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static/'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'project-static'),
)
# Django filters
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = '0'
try:
HOSTNAME = socket.gethostname()
except Exception:
HOSTNAME = 'localhost'
| 28.195745 | 156 | 0.641111 | [
"Apache-2.0"
] | amtypaldos/peering-manager | peering_manager/settings.py | 6,626 | Python |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains an enumerated type and helper functions related
to different types of training entry points (Python package, Python
script, bash script, etc.)
"""
import enum
import os
class _EntryPointType(enum.Enum):
"""Enumerated type consisting of valid types of training entry points."""
PYTHON_PACKAGE = "PYTHON_PACKAGE"
PYTHON_PROGRAM = "PYTHON_PROGRAM"
COMMAND = "COMMAND"
PYTHON_PACKAGE = _EntryPointType.PYTHON_PACKAGE
PYTHON_PROGRAM = _EntryPointType.PYTHON_PROGRAM
COMMAND = _EntryPointType.COMMAND
def get(path, name): # type: (str, str) -> _EntryPointType
"""
Args:
path (string): Directory where the entry point is located.
name (string): Name of the entry point file.
Returns:
(_EntryPointType): The type of the entry point.
"""
if name.endswith(".sh"):
return _EntryPointType.COMMAND
elif "setup.py" in os.listdir(path):
return _EntryPointType.PYTHON_PACKAGE
elif name.endswith(".py"):
return _EntryPointType.PYTHON_PROGRAM
else:
return _EntryPointType.COMMAND
| 32.627451 | 77 | 0.722957 | [
"Apache-2.0"
] | ChaiBapchya/sagemaker-training-toolk | src/sagemaker_training/_entry_point_type.py | 1,664 | Python |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
"""Tests TF linear interpolation mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
"""Tests TF 1d linear interpolation not mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
"""Helper performing 1d linear interpolation using SciPy."""
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
"""Helper performing 1d linear interpolation using Tensorflow."""
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
"""Tests parity of TF linear interpolation with SciPy for simple mapping."""
length = 10
np_x = np.linspace(0, 1, length)
# Mapping all numbers to 0.5
np_y_map = np.repeat(0.5, length)
# Scipy and TF interpolations
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
"""Tests parity of TF linear interpolation with SciPy."""
length = 10
np_x = np.linspace(0, 1, length)
# Requires interpolation over 0.5 to 1 domain
np_y_interp = np.linspace(0.5, 1, length)
# Scipy interpolation for comparison
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
"""Adds a function approximation to calibration proto for a class id."""
# Per-class calibration.
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
# Class-agnostic calibration.
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
"""Tests that calibration produces correct class-agnostic values."""
# Generate fake calibration proto. For this interpolation, any input on
# [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
# 0.25 subtracted from it.
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
# Everything should map to 0.5 if classes are ignored.
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
"""Tests that calibration produces correct multiclass values."""
# Background class (0-index) maps all predictions to 0.5.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
# Class id 1 will interpolate using these values.
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
"""Tests that calibration produces correct temperature scaling values."""
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
"""Tests that graph fails when parameters not present for all classes."""
# Only adding calibration parameters for class id = 0, even though class id
# 1 is present in the data.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
| 44.179487 | 80 | 0.667054 | [
"Apache-2.0"
] | zhaowt96/models | research/object_detection/builders/calibration_builder_test.py | 10,338 | Python |
"""
Leetcode 70.
Climbing Stairs.
DP.
类似斐波那契数列:
转移方程: f(n) = f(n-1) + f(n-2).
时间复杂度:O(n)
还是没看明白这跟DP有啥关系,就是递归而已。
"""
class Solution:
def climbStairs(self, n: int) -> int:
res = [-1] * (n)
def dfs(n):
if n == 1:
return 1
if n == 2:
return 2
if res[n-1] == -1:
res[n-1] = dfs(n-1) + dfs(n-2)
return res[n-1]
else:
return res[n-1]
ans = dfs(n)
return ans
| 19.148148 | 46 | 0.40619 | [
"MIT"
] | vandesa003/leetcode_algo | dp/climbing_stairs.py | 597 | Python |
import apache
if apache.version == (2, 2):
from apache22.util_script import *
else:
raise RuntimeError('Apache version not supported.')
| 20.714286 | 55 | 0.717241 | [
"Apache-2.0"
] | GrahamDumpleton-abandoned/apswigpy | apache/util_script.py | 145 | Python |
import logging
from collections import Counter
from itertools import chain
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from pysrc.papers.analysis.text import get_frequent_tokens
logger = logging.getLogger(__name__)
def compute_topics_similarity_matrix(papers_vectors, comps):
logger.debug('Computing mean similarity between topics embeddings')
n_comps = len(set(comps))
distances = pairwise_distances(papers_vectors)
similarity_matrix = np.zeros(shape=(n_comps, n_comps))
indx = {i: np.flatnonzero([c == i for c in comps]).tolist() for i in range(n_comps)}
for i in range(n_comps):
for j in range(i, n_comps):
mean_distance = np.mean(distances[indx[i], :][:, indx[j]])
similarity_matrix[i, j] = similarity_matrix[j, i] = 1 / (1 + mean_distance)
return similarity_matrix
def cluster_and_sort(x, max_clusters, min_cluster_size):
"""
:param x: object representations (X x Features)
:param max_clusters:
:param min_cluster_size:
:return: List[cluster], Hierarchical dendrogram of splits.
"""
logger.debug('Looking for an appropriate number of clusters,'
f'min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')
if x.shape[1] == 0:
return [0] * x.shape[0], None
r = min(int(x.shape[0] / min_cluster_size), max_clusters) + 1
l = 1
if l >= r - 2:
return [0] * x.shape[0], None
prev_min_size = None
while l < r - 1:
n_clusters = int((l + r) / 2)
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)
clusters_counter = Counter(model.labels_)
min_size = clusters_counter.most_common()[-1][1]
logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, '
f'prev_min_size={prev_min_size}, min_size={min_size}')
if min_size < min_cluster_size:
if prev_min_size is not None and min_size <= prev_min_size:
break
r = n_clusters + 1
else:
l = n_clusters
prev_min_size = min_size
logger.debug(f'Number of clusters = {n_clusters}')
logger.debug(f'Min cluster size = {prev_min_size}')
logger.debug('Reorder clusters by size descending')
reorder_map = {c: i for i, (c, _) in enumerate(clusters_counter.most_common())}
return [reorder_map[c] for c in model.labels_], model.children_
def get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"""
Get words from abstracts that describe the components the best way
using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance
"""
logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')
# Since some of the components may be skipped, use this dict for continuous indexes'
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
# In cases with less than 2 components, return frequencies
if len(comp_idx) < 2:
comp = list(comp_idx.keys())[0]
if ignore_comp is None:
most_frequent = get_frequent_tokens(chain(*chain(*corpus)))
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words]}
else:
most_frequent = get_frequent_tokens(
chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))]))
)
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words],
ignore_comp: []}
# Pass paper indices (for corpus_tokens and corpus_counts) instead of paper ids
comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for comp, comp_pids in comps.items()}
result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs])) for comp, vs in result.items()]
logger.debug('Description\n' + '\n'.join(f'{comp}: {kwd}' for comp, kwd in kwds))
return result
def _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"""
Select words with the frequency vector that is the closest to the 'ideal' frequency vector
([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance
"""
logger.debug('Compute average tokens counts per components')
# Since some of the components may be skipped, use this dict for continuous indexes
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)
for comp, comp_ids in comps.items():
if comp != ignore_comp: # Not ignored
tokens_freqs_per_comp[comp_idx[comp], :] = \
np.sum(corpus_counts[comp_ids, :], axis=0)
# Calculate total number of occurrences for each word
tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)
# Normalize frequency vector for each word to have length of 1
tokens_freqs_norm = np.sqrt(np.diag(tokens_freqs_per_comp.T @ tokens_freqs_per_comp))
tokens_freqs_per_comp = tokens_freqs_per_comp / tokens_freqs_norm
logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')
# Calculate cosine distance between the frequency vector and [0, ..., 0, 1, 0, ..., 0] for each cluster
cluster_mask = np.eye(len(comp_idx))
distance = tokens_freqs_per_comp.T @ cluster_mask
# Add some weight for more frequent tokens to get rid of extremely rare ones in the top
adjusted_distance = distance.T * np.log(tokens_freqs_total)
result = {}
for comp in comps.keys():
if comp == ignore_comp:
result[comp] = [] # Ignored component
continue
c = comp_idx[comp] # Get the continuous index
cluster_tokens_idx = np.argsort(-adjusted_distance[c, :])[:n_words].tolist()
result[comp] = [(corpus_tokens[i], adjusted_distance[c, i]) for i in cluster_tokens_idx]
return result
| 46.117647 | 118 | 0.67331 | [
"Apache-2.0"
] | JetBrains-Research/pubtrends | pysrc/papers/analysis/topics.py | 6,272 | Python |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.resnet import ResNetBase
class BasicBlockShallow(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(BasicBlockShallow, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=1, stride=stride, dilation=dilation, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=1, stride=1, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels=3, out_channels=20, bn_momentum=0.1, D=3, **kwargs):
self.bn_momentum=bn_momentum
for name, value in kwargs.items():
if name != "self":
try:
setattr(self, name, value)
except:
print(name, value)
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4], momentum=self.bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5], momentum=self.bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6], momentum=self.bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7], momentum=self.bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, in_dict, return_feats=False):
# print(in_dict['feats'].shape, in_dict['coords'].shape)
if self.quantization_mode == 'average':
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
elif self.quantization_mode == 'random':
quantization_mode=ME.SparseTensorQuantizationMode.RANDOM_SUBSAMPLE
in_field = ME.TensorField(
features=in_dict['feats'],
coordinates=in_dict['coords'],
quantization_mode=quantization_mode,
# minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
minkowski_algorithm=ME.MinkowskiAlgorithm.MEMORY_EFFICIENT,
device=in_dict['feats'].device,
)
# print(in_field.device)
# x = ME.SparseTensor(in_dict['feats'], in_dict['coords'])
# print(in_field)
# print(in_dict['feats'].shape)
x = in_field.sparse()
out = self.conv0p1s1(x)
# print(out.coordinates.shape)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
# print(out.coordinates.shape)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
# print(out.coordinates.shape)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
# print(out.coordinates.shape)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
# print(out.coordinates.shape)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat(out, out_b3p8)
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat(out, out_b2p4)
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat(out, out_b1p2)
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat(out, out_p1)
out_feats = self.block8(out)
out = self.final(out_feats)
# if in_dict['rand_shift'] is not None:
# coords = []
# for i in range(len(in_dict['rand_shift'])):
# coords.append( out.coordinates_at(i) - in_dict['rand_shift'][i])
# feats = out.decomposed_features
# else:
# coords, feats = out.decomposed_coordinates_and_features
feats = out.slice(in_field).F
# feats = out.F
# feats = torch.cat(feats, axis=0)
if return_feats:
# return feats, out_feats, in_field
return feats, out_feats.slice(in_field).F
return feats
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("MinkUNet")
parser.add_argument("--quantization_mode", type=str, default='average')
# parser.add_argument("--out_channels", type=int, default=32)
return parent_parser
def convert_sync_batchnorm(self):
self = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(self)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet34Shallow(MinkUNetBase):
BLOCK = BasicBlockShallow
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class MinkUNet34CShallow(MinkUNet34Shallow):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96) | 35.194286 | 99 | 0.624696 | [
"MIT"
] | zarzarj/MinkowskiEngine | examples/minkunet.py | 12,318 | Python |
#! /usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the build configuration tools of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL21$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see http://www.qt.io/terms-conditions. For further
## information use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## As a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys;
import re;
import os;
import subprocess;
import errno;
instructions = "This script can be used as follows:\n\
a) if run from tests/auto without any arguments it runs unit tests and then integration tests\n\
b) if run from tests/auto/unit, it runs unit tests\n\
c) if run from tests/auto/integration, it runs integration tests\n\
d) if run from tests/auto with \"unit\" it runs unit tests, and correspondingly for \"integration\""
# Colors
red="\033[41;37m";
redfg="\033[31m";
norm="\033[0m";
green="\033[32m";
grey="\033[37m";
yellow="\033[33m";
# Variables
curtest = "";
numpasses = [0];
numfails = [0];
numcrashes = 0;
numx = [0];
runTests = []
notRunTests = []
# Do not run the tests in these directories.
exclusionList = ["qdeclarativevideo", "qmultimedia_common"]
# Helper function for replacing stuffs
def print_color_string(string, color, match, index):
if index > 0:
print string[:match.start(index)] + color + string[match.start(index):match.end(index)] + norm + string[match.end(index):],
else:
print color + string[:-1] + norm
# AWK translation
awkfoo = [
(re.compile("\*\*\*\*\*\*\*\*\* Start testing of (\S+)"), yellow, 1, curtest),
(re.compile("^(PASS) "), green, 1, numpasses),
(re.compile("^(FAIL!) "), red, 0, numfails),
(re.compile("^(XFAIL) "), redfg, 1, numx),
(re.compile("^(XPASS) "), redfg, 1, numx),
(re.compile("^(QFATAL) "), red, 0, numx),
(re.compile("^(QDEBUG) "), grey, 0, None),
(re.compile("^(QWARN) "), yellow, 1, None),
(re.compile("\*\*\*\*\*\*\*\*\* Finished testing of (\S+)"), yellow, 1, curtest),
]
#
# This method runs the test cases, color codes the output from the test cases and adds up the passes,
# fails etc.
#
def resultSummary(arg):
try:
pp = subprocess.Popen(arg, shell=False,stderr=subprocess.STDOUT,stdout=subprocess.PIPE);
p = pp.stdout;
try:
while True:
line = p.readline()
if len(line) == 0:
break
for (re, color, index, var) in awkfoo:
m = re.match(line)
if m:
break
if m:
print_color_string(line, color, m, index)
if isinstance(var, list):
var[0] = var[0] + 1;
else:
var = m.groups(index)
else:
print line,
finally:
rc = p.close();
pp.wait();
if pp.returncode < 0:
print red + "Error: '%s' exited with signal %d" % (arg, -pp.returncode) + norm
numcrashes = numcrashes + 1
except OSError, e:
if e.errno == errno.ENOENT:
print red + "Test '%s' not found." % arg + norm;
else:
print red + "Got an exception running '%s': %s " % (arg, e.strerror) + norm
numcrashes = numcrashes + 1
#
# This method finds the test cases that should be run and runs them.
#
def runAllTests(test):
for filename in os.listdir(test):
if(re.search("^q", filename)):
#Skip the dir if it is in the exclusion list.
exclude = False
for dir in exclusionList:
if(re.search(dir, filename)):
exclude = True
if(not(exclude)):
#Set path to this if on Windows
if(os.name=="nt"):
exePath = test+"\\"+filename+"\\debug\\tst_"+filename+".exe"
#Set path on OS X
if(sys.platform=="darwin"):
exePath = test +"/"+filename+"/tst_"+filename
if not (os.path.exists(exePath)):
exePath = test + "/"+filename+"/tst_"+filename+".app/Contents/MacOS/tst_"+filename
#Set path to this if on Unix
else:
exePath = test +"/"+filename+"/tst_"+filename
if(os.path.exists(exePath)):
runTests.append(filename)
resultSummary(exePath);
else:
notRunTests.append(filename)
arguments = sys.argv[1:]
count = len(arguments)
# Find the current working directory.
cwd = os.getcwd()
if(count == 0):
if re.search("auto$", cwd):
x = 0
runAllTests("unit")
runAllTests("integration")
elif re.search("unit$", cwd):
runAllTests(cwd)
elif re.search("integration$", cwd):
runAllTests(cwd)
else:
print "You are running this script from the wrong directory! " + instructions
exit()
elif(count == 1):
if os.path.exists(sys.argv[1]):
runAllTests(sys.argv[1])
else:
print sys.argv[1] + " test cases do not exist! " + instructions
exit()
else:
print "You have passed too many arguments! " + instructions
exit()
print "Total of all tests: %d passes, %d failures, %d unexpected, %d badnesses." % (numpasses[0], numfails[0], numx[0], numcrashes);
if runTests:
print "The following test cases were run: "
for testCase in runTests:
print testCase
else:
print "No test cases were run!"
if notRunTests:
print "The following test cases could not be run: "
for testCase in notRunTests:
print testCase
else:
print "All test cases were run."
| 34.45098 | 132 | 0.581816 | [
"Apache-2.0"
] | wgnet/wds_qt | qtmultimedia/tests/auto/runautotests.py | 7,028 | Python |
import warnings
import numpy as np
from nilearn.plotting import cm
from nilearn.plotting.js_plotting_utils import decode
from nilearn.plotting import html_connectome
from .test_js_plotting_utils import check_html
def test_prepare_line():
e = np.asarray([0, 1, 2, 3], dtype=int)
n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int)
pe, pn = html_connectome._prepare_line(e, n)
assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all()
assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all()
def _make_connectome():
adj = np.diag([1.5, .3, 2.5], 2)
adj += adj.T
adj += np.eye(5)
coord = np.arange(5)
coord = np.asarray([coord * 10, -coord, coord[::-1]]).T
return adj, coord
def test_get_connectome():
adj, coord = _make_connectome()
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
expected_x = np.asarray(
[0, 0, 0,
0, 20, 0,
10, 10, 0,
10, 30, 0,
20, 0, 0,
20, 20, 0,
20, 40, 0,
30, 10, 0,
30, 30, 0,
40, 20, 0,
40, 40, 0], dtype='<f4')
assert (con_x == expected_x).all()
assert {'_con_x', '_con_y', '_con_z', '_con_w', 'colorscale'
}.issubset(connectome.keys())
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
adj[adj == 0] = np.nan
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
assert (con_x == expected_x).all()
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
def test_view_connectome():
adj, coord = _make_connectome()
html = html_connectome.view_connectome(adj, coord)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(adj, coord, '85.3%',
title="SOME_TITLE")
check_html(html, False, 'connectome-plot')
assert "SOME_TITLE" in html.html
html = html_connectome.view_connectome(adj, coord, '85.3%',
linewidth=8.5, node_size=4.2)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(
adj, coord, '85.3%', linewidth=8.5, marker_size=np.arange(len(coord)))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_connectome():
deprecated_params = {'coords': 'node_coords',
'threshold': 'edge_threshold',
'cmap': 'edge_cmap',
'marker_size': 'node_size',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
adj, coord = _make_connectome()
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_connectome(adjacency_matrix=adj,
coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5, node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
marker_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adj,
coord,
'85.3%',
cm.cyan_orange,
8.5,
4.2,
)
old_params = ['coords', 'threshold', 'cmap', 'marker_size']
raised_warning_messages = ''.join(
str(warning.message) for warning in raised_warnings)
print(raised_warning_messages)
for old_param_ in old_params:
assert warning_msgs[old_param_] in raised_warning_messages
def test_get_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
markers = html_connectome._get_markers(coords, colors)
assert markers["marker_color"] == [
'#ff0000', '#007f00', '#000000', '#ffffff']
assert markers['markers_only']
con_x = decode(markers['_con_x'], '<f4')
assert np.allclose(con_x, coords[:, 0])
def test_view_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
html = html_connectome.view_markers(coords, colors)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords, marker_size=15)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=np.arange(len(coords)))
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=list(range(len(coords))))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_markers():
""" Tests whether use of deprecated keyword parameters of view_markers
raise corrrect warnings.
"""
deprecated_params = {'coords': 'marker_coords',
'colors': 'marker_color',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_markers(coords=coords,
marker_color=colors,
)
html_connectome.view_markers(marker_coords=coords,
colors=colors,
)
html_connectome.view_markers(marker_coords=coords,
marker_color=colors,
)
html_connectome.view_markers(coords,
colors,
)
old_params = ['coords', 'colors']
assert len(raised_warnings) == 2
for old_param_, raised_warning_ in zip(old_params, raised_warnings):
assert warning_msgs[old_param_] == str(raised_warning_.message)
assert raised_warning_.category is DeprecationWarning
| 40.436893 | 78 | 0.508283 | [
"BSD-2-Clause"
] | JohannesWiesner/nilearn | nilearn/plotting/tests/test_html_connectome.py | 8,330 | Python |
import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
dataset = load_boston()
X = dataset.data
y = dataset.target
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X-mean)/std
# print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n_train = X_train.shape[0]
n_features = X_train.shape[1]
# 权重初始化
w = np.random.rand(n_features)
b = 1.1
lr = 0.001
epoches = 3000
def model(x):
y_hat = w.dot(x)+b
return y_hat
def loss_funtion(X, y):
total_loss = 0
n_samples = len(X)
for i in range(n_samples):
xi = X[i]
yi = y[i]
yi_hat = model(xi)
total_loss += abs(yi_hat-yi)**2
avg_loss = (1/n_samples)*total_loss
return avg_loss
reg = 0.5
for epoch in range(epoches):
sum_w = 0.0
sum_b = 0.0
for i in range(n_train):
xi = X_train[i]
yi = y_train[i]
yi_hat = model(xi)
sum_w += (yi_hat-yi)*xi
sum_b += (yi_hat - yi)
grad_w = (2/n_train)*sum_w+(2.0*reg*w)
grad_b = (2/n_train)*sum_b # 偏置项不做正则化处理
w = w-lr*grad_w
b = b-lr*grad_b
train_loss = loss_funtion(X_train, y_train)
test_loss = loss_funtion(X_test, y_test)
print(train_loss)
print(test_loss)
| 21.271186 | 72 | 0.636653 | [
"Apache-2.0"
] | ZXTFINAL/deeplearning | 1_boston.py | 1,285 | Python |
# Name:
# Date:
# proj02: sum
# Write a program that prompts the user to enter numbers, one per line,
# ending with a line containing 0, and keep a running sum of the numbers.
# Only print out the sum after all the numbers are entered
# (at least in your final version). Each time you read in a number,
# you can immediately use it for your sum,
# and then be done with the number just entered.
# Example:
# Enter a number to sum, or 0 to indicate you are finished: 4
# Enter a number to sum, or 0 to indicate you are finished: 5
# Enter a number to sum, or 0 to indicate you are finished: 2
# Enter a number to sum, or 0 to indicate you are finished: 10
# Enter a number to sum, or 0 to indicate you are finished: 0
# The sum of your numbers is: 21
input_sum = 0
var = 1
while var != 0:
input1 = raw_input("Enter a number to sum, or 0 to indicate you are finished: ")
input_sum = int(input1) + input_sum
if int(input1) == 0:
var = 0
print"The sum of your numbers is: " + str(input_sum)
| 35.137931 | 84 | 0.697743 | [
"MIT"
] | ryanaspears/VSA | proj02_loops/proj02_01.py | 1,019 | Python |
#!/usr/bin/env python
"""
convert corpus to annotated corpus
This script uses nltk for dependency parsing, which is based on stanford corenlp.
"""
import os
from nltk.parse.stanford import *
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('corenlp_path',
help='Directory to stanford corenlp') # /home/lbf/Documents/stanford-corenlp-full-2017-06-09/
parser.add_argument('--max_block_size', '-mbs', default=1000000, type=int,
help='indicate how much charactors a parser deals at one time, bigger max_block_size will consume more memeory, but should be faster.')
parser.add_argument('--corpus_path', default='./news.toy.txt',
help='Directory to corpus')
parser.add_argument('--annotated_corpus_path', default='./news.toy.annotated.txt',
help='Directory to annotated corpus')
parser.add_argument('--parser_model', '-o', choices=['edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz', 'edu/stanford/nlp/models/parser/nndep/english_UD.gz'],
default='edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz',
help='stanford parser model')
args = parser.parse_args()
class dependency_parser():
def __init__(self, path_to_jar, path_to_models_jar, model_path):
if 'nndep/' in model_path:
self.parser = StanfordNeuralDependencyParser( #StanfordNeuralDependencyParser
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx5g') # , corenlp_options='-model modelOutputFile.txt.gz'
if 'lexparser/' in model_path:
self.parser = StanfordDependencyParser(
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx10g')
def preprocess_text(self, text):
# hack for nltk
text = text.replace('/', '-')
# hack for output format
text = text.replace('{', '-')
text = text.replace('}', '-')
text = text.replace('[', '-')
text = text.replace(']', '-')
return text
def parse(self, text):
text = self.preprocess_text(text)
out = ''
# print(text)
try:
parse_results = self.parser.raw_parse(text) #, properties={'annotators' : 'depparse'}
for dependency_tree in parse_results:
for index, node in dependency_tree.nodes.items():
if node['word'] is None: # skip root node
continue
dependency_str = ''
for dep, index in node['deps'].items():
dependency_str += ',{}/{}'.format(str(index[0] - 1), dep)
dependency_str = dependency_str[1:]
dependency_str = '{}/{}'.format(node['rel'], node['head'])
out += '{}/{}[{}] '.format(node['word'], node['tag'], dependency_str)
out += "\n"
return out
except AssertionError as e:
print('error when parse "{}"'.format(text))
return ''
dependency_parser = dependency_parser(
path_to_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0.jar"),
path_to_models_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0-models.jar"),
model_path=args.parser_model)
# edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz
# edu/stanford/nlp/models/parser/nndep/english_UD.gz
start_time = time.time()
print(dependency_parser.parse("Alice's dog also likes eating sausage from Russia"))
# dependency_parser.parse('Information about the stages 50km to 80km), booking for food and accommodation (R450-38 per night) and downloadable maps are on the Freedom Challenge website call 00 27 84 567 4152 ')
block_size = 0
text = ''
with open(args.corpus_path, "r") as corpus_file, open(args.annotated_corpus_path, "w") as annotated_corpus_file:
for line in corpus_file:
text += line + "\n"
block_size += len(line)
if block_size > args.max_block_size:
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
block_size = 0
text = ''
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
end_time = time.time()
print('spend {} minutes'.format((end_time - start_time) / 60))
| 43.784314 | 211 | 0.631438 | [
"Apache-2.0"
] | berntham/vsmlib | vsmlib/embeddings/bofang/annotate_corpus_nltk.py | 4,466 | Python |
#!/usr/bin/python
#
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import library functions we need
import RPi.GPIO as GPIO
import struct
import sys
import os
import subprocess
from time import sleep
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
bounce = 25
if len(sys.argv) > 2:
cmd = sys.argv[1].lower()
pin = int(sys.argv[2])
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
if cmd == "pwm":
#print("Initialised pin "+str(pin)+" to PWM")
try:
freq = int(sys.argv[3])
except:
freq = 100
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, freq)
p.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
p.ChangeDutyCycle(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "buzz":
#print("Initialised pin "+str(pin)+" to Buzz")
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.stop()
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
elif float(data) == 0:
p.stop()
else:
p.start(50)
p.ChangeFrequency(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "out":
#print("Initialised pin "+str(pin)+" to OUT")
GPIO.setup(pin,GPIO.OUT)
if len(sys.argv) == 4:
GPIO.output(pin,int(sys.argv[3]))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except:
if len(sys.argv) == 4:
data = int(sys.argv[3])
else:
data = 0
if data != 0:
data = 1
GPIO.output(pin,data)
elif cmd == "in":
#print("Initialised pin "+str(pin)+" to IN")
bounce = float(sys.argv[4])
def handle_callback(chan):
sleep(bounce/1000.0)
print(GPIO.input(chan))
if sys.argv[3].lower() == "up":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_UP)
elif sys.argv[3].lower() == "down":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_DOWN)
else:
GPIO.setup(pin,GPIO.IN)
print(GPIO.input(pin))
GPIO.add_event_detect(pin, GPIO.BOTH, callback=handle_callback, bouncetime=int(bounce))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
elif cmd == "byte":
#print("Initialised BYTE mode - "+str(pin)+)
list = [7,11,13,12,15,16,18,22]
GPIO.setup(list,GPIO.OUT)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
for bit in range(8):
if pin == 1:
mask = 1 << (7 - bit)
else:
mask = 1 << bit
GPIO.output(list[bit], data & mask)
elif cmd == "borg":
#print("Initialised BORG mode - "+str(pin)+)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
r = GPIO.PWM(11, 100)
g = GPIO.PWM(13, 100)
b = GPIO.PWM(15, 100)
r.start(0)
g.start(0)
b.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
c = data.split(",")
r.ChangeDutyCycle(float(c[0]))
g.ChangeDutyCycle(float(c[1]))
b.ChangeDutyCycle(float(c[2]))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
elif cmd == "mouse": # catch mice button events
file = open( "/dev/input/mice", "rb" )
oldbutt = 0
def getMouseEvent():
global oldbutt
global pin
buf = file.read(3)
pin = pin & 0x07
button = ord( buf[0] ) & pin # mask out just the required button(s)
if button != oldbutt: # only send if changed
oldbutt = button
print(button)
while True:
try:
getMouseEvent()
except:
file.close()
sys.exit(0)
elif cmd == "kbd": # catch keyboard button events
try:
while not os.path.isdir("/dev/input/by-path"):
sleep(10)
infile = subprocess.check_output("ls /dev/input/by-path/ | grep -m 1 'kbd'", shell=True).strip()
infile_path = "/dev/input/by-path/" + infile
EVENT_SIZE = struct.calcsize('llHHI')
file = open(infile_path, "rb")
event = file.read(EVENT_SIZE)
while event:
(tv_sec, tv_usec, type, code, value) = struct.unpack('llHHI', event)
#if type != 0 or code != 0 or value != 0:
if type == 1:
# type,code,value
print("%u,%u" % (code, value))
event = file.read(EVENT_SIZE)
print("0,0")
file.close()
sys.exit(0)
except:
file.close()
sys.exit(0)
elif len(sys.argv) > 1:
cmd = sys.argv[1].lower()
if cmd == "rev":
print(GPIO.RPI_REVISION)
elif cmd == "ver":
print(GPIO.VERSION)
elif cmd == "info":
print(GPIO.RPI_INFO)
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
print(" only ver (gpio version) and info (board information) accept no pin parameter.")
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
| 32.091667 | 108 | 0.497923 | [
"Apache-2.0"
] | 3anology/node-red | packages/node_modules/@node-red/nodes/core/hardware/nrgpio.py | 7,702 | Python |
# Import Basic modules
import numpy as np
import os
# Import everything needed to edit video clips
from moviepy.editor import *
from moviepy.Clip import *
from moviepy.video.VideoClip import *
from moviepy.config import get_setting # ffmpeg, ffmpeg.exe, etc...
class AudioProcessing:
# documentation string, which can be accessed via ClassName.__doc__ (slide_detection.__doc__ )
""" This class include all required attributes and methods for slide detection.
It includes different algorithms for slide detection such as harris corner detection,
Histogram thresholding, Hough Transform, sum of differences of all frames and etc.
The input of the functions is the input image/frame/video and the output is the four
coordinates of the position of the detected slide.
Built-In Class Attributes:
Every Python class keeps following built-in attributes and they can be accessed using
dot operator like any other attribute:
__dict__ : Dictionary containing the class's namespace.
__doc__ : Class documentation string or None if undefined.
__name__: Class name.
__module__: Module name in which the class is defined. This attribute is "__main__" in interactive mode.
__bases__ : A possibly empty tuple containing the base classes, in the order of their occurrence
in the base class list."""
def __init__(self, inputFile):
self.inputFile = inputFile
#def template_matching(self):
def equalizer(self):
'''
This function serves for Haris Corner Detector
Inputs:
Outputs:
Example:
'''
def signal_improvement(self):
'''
This function serves for sum of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_coding(self, bitrate, codecformat):
'''
This function serves for max of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_clip(self):
'''
This function serves for max of all frames
Inputs:
Outputs:
Example:
'''
if __name__ == '__main__':
print "done"
| 20.57265 | 108 | 0.617366 | [
"Unlicense"
] | papar22/livius | livius/audio/audioProcessing.py | 2,407 | Python |
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
from collections import defaultdict
from collections import OrderedDict
import itertools
import os
from pathlib import Path
from colcon_core.package_selection import add_arguments \
as add_packages_arguments
from colcon_core.package_selection import get_package_descriptors
from colcon_core.package_selection import select_package_decorators
from colcon_core.plugin_system import satisfies_version
from colcon_core.topological_order import topological_order_packages
from colcon_core.verb import VerbExtensionPoint
class GraphVerb(VerbExtensionPoint):
"""Generate a visual representation of the dependency graph."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(VerbExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def add_arguments(self, *, parser): # noqa: D102
# only added so that package selection arguments can be used
# which use the build directory to store state information
parser.add_argument(
'--build-base',
default='build',
help='The base path for all build directories (default: build)')
add_packages_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--dot',
action='store_true',
default=False,
help='Output topological graph in DOT '
'(e.g. pass the output to dot: ` | dot -Tpng -o graph.png`), '
'legend: blue=build, red=run, tan=test, dashed=indirect')
group.add_argument(
'--density',
action='store_true',
default=False,
help='Output density of the graph (only without --dot)')
parser.add_argument(
'--legend',
action='store_true',
default=False,
help='Output legend for the graph')
parser.add_argument(
'--dot-cluster',
action='store_true',
default=False,
help='Cluster packages by their filesystem path (only affects '
'--dot)')
parser.add_argument(
'--dot-include-skipped',
action='store_true',
default=False,
help='Also output skipped packages (only affects --dot)')
def main(self, *, context): # noqa: D102
args = context.args
descriptors = get_package_descriptors(args)
decorators = topological_order_packages(
descriptors, recursive_categories=('run', ))
select_package_decorators(args, decorators)
if not args.dot:
if args.legend:
print('+ marks when the package in this row can be processed')
print('* marks a direct dependency '
'from the package indicated by the + in the same column '
'to the package in this row')
print('. marks a transitive dependency')
print()
# draw dependency graph in ASCII
shown_decorators = list(filter(lambda d: d.selected, decorators))
max_length = max([
len(m.descriptor.name) for m in shown_decorators] + [0])
lines = [
m.descriptor.name.ljust(max_length + 2)
for m in shown_decorators]
depends = [
m.descriptor.get_dependencies() for m in shown_decorators]
rec_depends = [
m.descriptor.get_recursive_dependencies(
[d.descriptor for d in decorators],
recursive_categories=('run', ))
for m in shown_decorators]
empty_cells = 0
for i, decorator in enumerate(shown_decorators):
for j in range(len(lines)):
if j == i:
# package i is being processed
lines[j] += '+'
elif shown_decorators[j].descriptor.name in depends[i]:
# package i directly depends on package j
lines[j] += '*'
elif shown_decorators[j].descriptor.name in rec_depends[i]:
# package i recursively depends on package j
lines[j] += '.'
else:
# package i doesn't depend on package j
lines[j] += ' '
empty_cells += 1
if args.density:
empty_fraction = \
empty_cells / (len(lines) * (len(lines) - 1)) \
if len(lines) > 1 else 1.0
# normalize to 200% since half of the matrix should be empty
density_percentage = 200.0 * (1.0 - empty_fraction)
print('dependency density %.2f %%' % density_percentage)
print()
else: # --dot
lines = ['digraph graphname {']
decorators_by_name = defaultdict(set)
for deco in decorators:
decorators_by_name[deco.descriptor.name].add(deco)
selected_pkg_names = [
m.descriptor.name for m in decorators
if m.selected or args.dot_include_skipped]
has_duplicate_names = \
len(selected_pkg_names) != len(set(selected_pkg_names))
selected_pkg_names = set(selected_pkg_names)
# collect selected package decorators and their parent path
nodes = OrderedDict()
for deco in reversed(decorators):
if deco.selected or args.dot_include_skipped:
nodes[deco] = Path(deco.descriptor.path).parent
# collect direct dependencies
direct_edges = defaultdict(set)
for deco in reversed(decorators):
if (
not deco.selected and
not args.dot_include_skipped
):
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
if dep not in selected_pkg_names:
continue
# store the category of each dependency
# use the decorator
# since there might be packages with the same name
direct_edges[(deco, dep)].add(category)
# collect indirect dependencies
indirect_edges = defaultdict(set)
for deco in reversed(decorators):
if not deco.selected:
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
# ignore direct dependencies
if dep in selected_pkg_names:
continue
# ignore unknown dependencies
if dep not in decorators_by_name.keys():
continue
# iterate over recursive dependencies
for rdep in itertools.chain.from_iterable(
d.recursive_dependencies
for d in decorators_by_name[dep]
):
if rdep not in selected_pkg_names:
continue
# skip edges which are redundant to direct edges
if (deco, rdep) in direct_edges:
continue
indirect_edges[(deco, rdep)].add(category)
try:
# HACK Python 3.5 can't handle Path objects
common_path = os.path.commonpath(
[str(p) for p in nodes.values()])
except ValueError:
common_path = None
def get_node_data(decorator):
nonlocal args
nonlocal has_duplicate_names
if not has_duplicate_names:
# use name where possible so the dot code is easy to read
return decorator.descriptor.name, \
'' if (
decorator.selected or
not args.dot_include_skipped
) else '[color = "gray" fontcolor = "gray"]'
# otherwise append the descriptor id to make each node unique
descriptor_id = id(decorator.descriptor)
return (
'{decorator.descriptor.name}_{descriptor_id}'
.format_map(locals()),
' [label = "{decorator.descriptor.name}"]'
.format_map(locals()),
)
if not args.dot_cluster or common_path is None:
# output nodes
for deco in nodes.keys():
if (
not deco.selected and
not args.dot_include_skipped
):
continue
node_name, attributes = get_node_data(deco)
lines.append(
' "{node_name}"{attributes};'.format_map(locals()))
else:
# output clusters
clusters = defaultdict(set)
for deco, path in nodes.items():
clusters[path.relative_to(common_path)].add(deco)
for i, cluster in zip(range(len(clusters)), clusters.items()):
path, decos = cluster
if path.name:
# wrap cluster in subgraph
lines.append(
' subgraph cluster_{i} {{'.format_map(locals()))
lines.append(
' label = "{path}";'.format_map(locals()))
indent = ' '
else:
indent = ' '
for deco in decos:
node_name, attributes = get_node_data(deco)
lines.append(
'{indent}"{node_name}"{attributes};'
.format_map(locals()))
if path.name:
lines.append(' }')
# output edges
color_mapping = OrderedDict((
('build', '#0000ff'), # blue
('run', '#ff0000'), # red
('test', '#d2b48c'), # tan
))
for style, edges in zip(
('', ', style="dashed"'),
(direct_edges, indirect_edges),
):
for (deco_start, node_end), categories in edges.items():
start_name, _ = get_node_data(deco_start)
for deco in decorators_by_name[node_end]:
end_name, _ = get_node_data(deco)
edge_alpha = '' \
if deco_start.selected and deco.selected else '77'
colors = ':'.join([
color + edge_alpha
for category, color in color_mapping.items()
if category in categories])
lines.append(
' "{start_name}" -> "{end_name}" '
'[color="{colors}"{style}];'.format_map(locals()))
if args.legend:
lines.append(' subgraph cluster_legend {')
lines.append(' color=gray')
lines.append(' label="Legend";')
lines.append(' margin=0;')
# invisible nodes between the dependency edges
lines.append(' node [label="", shape=none];')
previous_node = '_legend_first'
# an edge for each dependency type
for dependency_type, color in color_mapping.items():
next_node = '_legend_' + dependency_type
lines.append(
' {previous_node} -> {next_node} '
'[label="{dependency_type} dep.", color="{color}"];'
.format_map(locals()))
previous_node = next_node
lines.append(
' {previous_node} -> _legend_last '
'[label="indirect dep.", style="dashed"];'
.format_map(locals()))
# layout all legend nodes on the same rank
lines.append(' {')
lines.append(' rank=same;')
lines.append(' _legend_first;')
for dependency_type in color_mapping.keys():
lines.append(
' _legend_{dependency_type};'
.format_map(locals()))
lines.append(' _legend_last;')
lines.append(' }')
lines.append(' }')
lines.append('}')
for line in lines:
print(line)
| 42.496855 | 79 | 0.492896 | [
"Apache-2.0"
] | chapulina/colcon-package-information | colcon_package_information/verb/graph.py | 13,514 | Python |
#!/usr/bin/env python
# encoding: utf-8
'''
@project : MSRGCN
@file : cmu_runner.py
@author : Droliven
@contact : [email protected]
@ide : PyCharm
@time : 2021-07-28 13:29
'''
from datas import CMUMotionDataset, get_dct_matrix, reverse_dct_torch, define_actions_cmu, draw_pic_gt_pred
from nets import MSRGCN, MSRGCNShortTerm
from configs.config import Config
from torch.utils.data import DataLoader
import torch.optim as optim
import torch
import os
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from pprint import pprint
def L2NormLoss_test(gt, out, frame_ids): # (batch size,feature dim, seq len)
'''
gt: B, 66, 25
'''
t_3d = np.zeros(len(frame_ids))
batch_size, features, seq_len = gt.shape
gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3
out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3
for k in np.arange(0, len(frame_ids)):
j = frame_ids[k]
t_3d[k] = torch.mean(torch.norm(gt[:, j, :, :].contiguous().view(-1, 3) - out[:, j, :, :].contiguous().view(-1, 3), 2, 1)).cpu().data.numpy() * batch_size
return t_3d
def L2NormLoss_train(gt, out):
'''
# (batch size,feature dim, seq len)
等同于 mpjpe_error_p3d()
'''
batch_size, _, seq_len = gt.shape
gt = gt.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
out = out.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
loss = torch.mean(torch.norm(gt - out, 2, dim=-1))
return loss
def lr_decay(optimizer, lr_now, gamma):
lr = lr_now * gamma
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class CMURunner():
def __init__(self, exp_name="cmu", input_n=10, output_n=10, dct_n=15, device="cuda:0", num_works=0, test_manner="all", debug_step=1):
super(CMURunner, self).__init__()
# 参数
self.start_epoch = 1
self.best_accuracy = 1e15
self.cfg = Config(exp_name=exp_name, input_n=input_n, output_n=output_n, dct_n=dct_n, device=device, num_works=num_works, test_manner=test_manner)
print("\n================== Configs =================")
pprint(vars(self.cfg), indent=4)
print("==========================================\n")
with open(os.path.join(self.cfg.ckpt_dir, "config.txt"), 'w', encoding='utf-8') as f:
f.write(str(self.cfg.__dict__))
# 模型
if self.cfg.output_n == 25:
self.model = MSRGCN(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
elif self.cfg.output_n == 10:
self.model = MSRGCNShortTerm(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
if self.cfg.device != "cpu":
self.model.cuda(self.cfg.device)
print(">>> total params: {:.2f}M\n".format(
sum(p.numel() for p in self.model.parameters()) / 1000000.0))
self.lr = self.cfg.lr
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
# 数据
dct_m, i_dct_m = get_dct_matrix(self.cfg.seq_len)
self.dct_m = torch.from_numpy(dct_m).float()
self.i_dct_m = torch.from_numpy(i_dct_m).float()
if self.cfg.device != "cpu":
self.dct_m = self.dct_m.cuda(self.cfg.device, non_blocking=True)
self.i_dct_m = self.i_dct_m.cuda(self.cfg.device, non_blocking=True)
train_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions="all", mode_name="train", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=0, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=0, global_min=0, device=self.cfg.device, debug_step=debug_step)
print("train data shape {}".format(train_dataset.gt_all_scales['p32'].shape[0]))
self.train_loader = DataLoader(
dataset=train_dataset,
batch_size=self.cfg.train_batch_size,
shuffle=True,
num_workers=self.cfg.num_works,
pin_memory=True)
self.global_max = train_dataset.global_max
self.global_min = train_dataset.global_min
self.test_loader = dict()
for act in define_actions_cmu("all"):
test_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions=act, mode_name="test", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=1, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=self.global_max, global_min=self.global_min, device=self.cfg.device, debug_step=debug_step)
self.test_loader[act] = DataLoader(
dataset=test_dataset,
batch_size=self.cfg.test_batch_size,
shuffle=False,
num_workers=self.cfg.num_works,
pin_memory=True)
print(">>> test {} data {}".format(act, test_dataset.gt_all_scales['p32'].shape[0]))
self.summary = SummaryWriter(self.cfg.ckpt_dir)
def save(self, checkpoint_path, best_err, curr_err):
state = {
"lr": self.lr,
"best_err": best_err,
"curr_err": curr_err,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
torch.save(state, checkpoint_path)
def restore(self, checkpoint_path):
state = torch.load(checkpoint_path, map_location=self.cfg.device)
self.model.load_state_dict(state["model"])
self.optimizer.load_state_dict(state["optimizer"])
self.lr = state["lr"]
best_err = state['best_err']
curr_err = state["curr_err"]
print("load from lr {}, curr_avg {}, best_avg {}.".format(state["lr"], curr_err, best_err))
def train(self, epoch):
self.model.train()
average_loss = 0
for i, (inputs, gts) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
# skip the last batch if only have one sample for batch_norm layers
if b == 1:
continue
self.global_step = (epoch - 1) * len(self.train_loader) + i + 1
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
outputs = self.model(inputs)
losses = None
for k in outputs:
# 反 Norm
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
# 回转空间
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
# loss
loss_curr = L2NormLoss_train(gts[k], outputs[k])
if losses is None:
losses = loss_curr
else:
losses = losses + loss_curr
self.summary.add_scalar(f"Loss/{k}", loss_curr, self.global_step)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
average_loss += losses.cpu().data.numpy()
average_loss /= (i + 1)
return average_loss
def test(self, epoch=0):
self.model.eval()
frame_ids = self.cfg.frame_ids
total_loss = np.zeros((len(define_actions_cmu("all")), len(frame_ids)))
for act_idx, act in enumerate(define_actions_cmu("all")):
count = 0
for i, (inputs, gts) in enumerate(self.test_loader[act]):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
with torch.no_grad():
outputs = self.model(inputs)
# 反 Norm
for k in outputs:
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
# 回转空间
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
# 开始计算
mygt = gts['p32'].view(-1, self.cfg.origin_noden, 3, self.cfg.seq_len).clone()
myout = outputs['p22'].view(-1, self.cfg.final_out_noden, 3, self.cfg.seq_len)
mygt[:, self.cfg.dim_used_3d, :, :] = myout
mygt[:, self.cfg.dim_repeat_32, :, :] = myout[:, self.cfg.dim_repeat_22, :, :]
mygt = mygt.view(-1, self.cfg.origin_noden*3, self.cfg.seq_len)
loss = L2NormLoss_test(gts['p32'][:, :, self.cfg.input_n:], mygt[:, :, self.cfg.input_n:], self.cfg.frame_ids)
total_loss[act_idx] += loss
# count += 1
count += mygt.shape[0]
# ************ 画图
if act_idx == 0 and i == 0:
pred_seq = outputs['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
gt_seq = gts['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
for t in range(self.cfg.seq_len):
draw_pic_gt_pred(gt_seq[:, :, t], pred_seq[:, :, t], self.cfg.I22_plot, self.cfg.J22_plot, self.cfg.LR22_plot, os.path.join(self.cfg.ckpt_dir, "images", f"{epoch}_{act}_{t}.png"))
total_loss[act_idx] /= count
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/{act}/{frame}", total_loss[act_idx][fidx], epoch)
self.summary.add_scalar("Test/average", np.mean(total_loss), epoch)
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/avg{frame}", np.mean(total_loss[:, fidx]), epoch)
return total_loss
def run(self):
for epoch in range(self.start_epoch, self.cfg.n_epoch + 1):
if epoch % 2 == 0:
self.lr = lr_decay(self.optimizer, self.lr, self.cfg.lr_decay)
self.summary.add_scalar("LR", self.lr, epoch)
average_train_loss = self.train(epoch)
if average_train_loss < self.best_accuracy:
self.best_accuracy = average_train_loss
self.save(
os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_best_epoch{}_err{:.4f}.pth'.format(self.cfg.exp_name,
self.cfg.input_n,
self.cfg.output_n,
self.cfg.dct_n, epoch,
average_train_loss)), self.best_accuracy, average_train_loss)
self.save(os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_last.pth'.format(self.cfg.exp_name, self.cfg.input_n,
self.cfg.output_n, self.cfg.dct_n)),
self.best_accuracy, average_train_loss)
if epoch % 1 == 0:
loss_l2_test = self.test(epoch)
print('Epoch: {}, LR: {}, Current err test avg: {}'.format(epoch, self.lr, np.mean(loss_l2_test)))
if __name__ == '__main__':
pass | 45.378182 | 215 | 0.550845 | [
"MIT"
] | Droliven/MSRGCN | run/cmu_runner.py | 12,529 | Python |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef WAZ_CHAINPARAMSSEEDS_H\n')
g.write('#define WAZ_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the wuazi network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // WAZ_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.395683 | 98 | 0.581118 | [
"MIT"
] | 26rahulsingh/new | contrib/seeds/generate-seeds.py | 4,364 | Python |
class Type:
def __init__(self):
pass
def get_repr(self):
return self
def __repr__(self):
return self.get_repr().stringify()
def stringify(self):
return ""
def put_on_stack(self, stack):
stack.put(self.get_repr())
def take_from_stack(self, stack):
stack.take(self.get_repr())
def get_as_single_constant(self):
repr = self.get_repr()
if isinstance(repr, TypeConstant):
return repr
return None
class TypeConstant(Type):
def __init__(self, name):
self.name = name
def stringify(self):
return self.name
class TypeArrow(Type):
def __init__(self, left, right, name = None):
self.left = left
self.right = right
self.name = name
def stringify(self):
return "(" + str(self.left) + ")->" + str(self.right)
def put_on_stack(self, stack):
self.left.take_from_stack(stack)
self.right.put_on_stack(stack)
def take_from_stack(self, stack):
raise ArrowOnTheLeftOfArrowError("Arrow type on the left hand side of the arrow type", self)
class TypeTuple(Type):
def __init__(self, args):
self.args = args
def stringify(self):
return "(" + str.join(", ", map(str, self.args)) + ")"
def put_on_stack(self, stack):
for arg in self.args:
arg.put_on_stack(stack)
def take_from_stack(self, stack):
for arg in self.args:
arg.take_from_stack(stack)
class TypeVar(Type):
def __init__(self, name):
self.name = name
self.rank = 0
self.parent = self
def union(self, other):
self_repr = self.get_repr()
other_repr = other.get_repr()
if self_repr == other_repr:
return
if isinstance(other, TypeVar):
other_rank = other.rank
self_rank = self.rank
if self_rank < other_rank:
self.parent = other_repr
elif self_rank > other_rank:
other.parent = self_repr
else:
other.parent = self_repr
self.rank = self.rank + 1
else:
self.parent = other_repr
def get_repr(self):
if self.parent != self:
self.parent = self.parent.get_repr()
return self.parent
def stringify(self):
return "@" + self.name
class ArrowOnTheLeftOfArrowError(RuntimeError):
def __init__(self, message, type):
RuntimeError.__init__(self, message)
self.message = message
self.type = type
def __str__(self):
return self.message + " " + str(self.type)
class UnifiactionError(RuntimeError):
def __init__(self, message):
RuntimeError.__init__(self, message)
self.message = message
self.unify_stack = []
def add(self, type_a, type_b):
self.unify_stack.append((type_a, type_b))
def __str__(self):
return "Unification error: " + self.message + "\n" + str.join("\n", map(lambda p : "In unification of '%s' and '%s'" % p, self.unify_stack))
def types_equal(a, b):
a = a.get_repr()
b = b.get_repr()
if a == b:
return True
if isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
return False
return all(map(types_equal, zip(a.args, b.args)))
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
return types_equal(a.left, b.left) and types_equal(a.right, b.right)
return False
def types_unify(a, b):
try:
a = a.get_repr()
b = b.get_repr()
if isinstance(a, TypeVar):
a.union(b)
elif isinstance(b, TypeVar):
b.union(a)
elif isinstance(a, TypeConstant) and isinstance(b, TypeConstant):
if a != b:
raise UnifiactionError("Different basic types")
elif isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
raise UnifiactionError("Tuples size mismatch")
for (a,b) in zip(a.args, b.args):
types_unify(a, b)
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
types_unify(a.left, b.left)
types_unify(a.right, b.right)
else:
raise UnifiactionError("Different kinds")
except UnifiactionError as e:
e.add(a, b)
raise
def is_simple_arrow(a):
a = a.get_repr()
if isinstance(a, TypeArrow):
lhs = a.left
rhs = a.right
if lhs.get_repr() == rhs.get_repr():
return True
return False
def is_type_empty(type):
type = type.get_repr()
return isinstance(type, TypeTuple) and len(type.args) == 0
def split_arrow(type):
type = type.get_repr()
lhs = []
while isinstance(type, TypeArrow):
lhs.append(type.left)
type = type.right
return (lhs, type)
class TypeStack:
def __init__(self):
self.given = []
self.taken = []
def take(self, type):
if not isinstance(type, TypeConstant):
raise RuntimeError("Non-constant type placed into typestack: %s" % type)
if len(self.given) > 0:
last = self.given.pop()
types_unify(type, last)
else:
self.taken.append(type)
def put(self, type):
self.given.append(type)
def form_type(self):
if len(self.given) == 1:
rhs = self.given[0]
else:
rhs = TypeTuple(self.given)
t = rhs
for type in reversed(self.taken):
t = TypeArrow(type, t)
return t
#Takes a sequence of types, produces a signle type matching the sequence
def infer_type_from_sequence(seq):
stack = TypeStack()
for type in seq:
type.put_on_stack(stack)
return stack.form_type()
if __name__ == "__main__":
pass | 29.182266 | 148 | 0.58339 | [
"MIT"
] | dstep/old_jf_compiler | utils/parsxv2/typesystem.py | 5,924 | Python |
"""
logan.runner
~~~~~~~~~~~~
:copyright: (c) 2012 David Cramer.
:license: Apache License 2.0, see NOTICE for more details.
"""
import argparse
import os
import re
import sys
from django.core import management
from nautobot import __version__
from . import importer
from .settings import create_default_settings
__configured = False
def sanitize_name(project):
project = project.replace(" ", "-")
return re.sub("[^A-Z0-9a-z_-]", "-", project)
def parse_command_args(args):
"""
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
"""
index = None
for arg_i, arg in enumerate(args):
if not arg.startswith("-"):
index = arg_i
break
# Unable to parse any arguments
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1) :])
def is_configured():
global __configured
return __configured
def configure_app(
config_path=None,
project=None,
default_config_path=None,
default_settings=None,
settings_initializer=None,
settings_envvar=None,
initializer=None,
allow_extras=True,
config_module_name=None,
runner_name=None,
on_configure=None,
):
"""
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
"""
global __configured
project_filename = sanitize_name(project)
if default_config_path is None:
default_config_path = "~/%s/%s.conf.py" % (project_filename, project_filename)
if settings_envvar is None:
settings_envvar = project_filename.upper() + "_CONF"
if config_module_name is None:
config_module_name = project_filename + "_config"
# normalize path
if settings_envvar in os.environ:
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if not config_path:
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if not os.path.exists(config_path):
if runner_name:
raise ValueError(
"Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)
)
raise ValueError("Configuration file does not exist at %r" % (config_path,))
os.environ["DJANGO_SETTINGS_MODULE"] = config_module_name
def settings_callback(settings):
if initializer is None:
return
try:
initializer(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
except Exception:
# XXX: Django doesn't like various errors in this path
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(
config_module_name,
config_path,
default_settings,
allow_extras=allow_extras,
callback=settings_callback,
)
__configured = True
# HACK(dcramer): we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, "INSTALLED_APPS")
if on_configure:
on_configure(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
class VerboseHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
"""Argparse Formatter that includes newlines and shows argument defaults."""
def run_app(**kwargs):
sys_args = sys.argv
# The established command for running this program
runner_name = os.path.basename(sys_args[0])
default_config_path = kwargs.get("default_config_path")
# Primary parser
parser = management.CommandParser(
description=kwargs.pop("description"),
formatter_class=VerboseHelpFormatter,
add_help=False,
)
parser.add_argument(
"-c",
"--config",
metavar="CONFIG",
help="Path to the configuration file",
)
parser.add_argument(
"--version",
action="version",
version=__version__,
)
# This block of code here is done in this way because of the built in Django
# management command parsing not playing well unless you have a Django
# config with SECRET_KEY defined.
# Parse out `--config` here first capturing any unparsed args for passing to
# Django parser.
args, unparsed_args = parser.parse_known_args()
# Now add the sub-parser for "init" command
subparsers = parser.add_subparsers(help=False, dest="command", metavar="")
init_parser = subparsers.add_parser(
"init",
help="Initialize a new configuration",
)
init_parser.add_argument(
"config_path",
default=default_config_path,
nargs="?",
help="Path to output generated configuration file",
)
# Try to use our parser first, to process custom arguments
try:
args = parser.parse_args()
command = args.command
command_args = sys.argv[1:]
# Fallback to passing through to Django management commands
# except RuntimeError as err:
except management.CommandError as err:
if "invalid choice" not in str(err):
raise
# Rewrite sys_args to have the unparsed args (if any)
sys_args = sys_args[:1] + unparsed_args
_, command, command_args = parse_command_args(sys_args[1:])
# If we don't get a command of some sort, print help and exit dirty
if not command:
parser.print_help()
parser.exit(1)
# The `init` command is reserved for initializing configuration
if command == "init":
settings_initializer = kwargs.get("settings_initializer")
config_path = os.path.expanduser(args.config_path)
# Check if the config already exists; alert user and exit if exists.
if os.path.exists(config_path):
print(
f"A configuration already exists at {config_path}. Please backup and remove it or choose another path."
)
return
# Create the config
try:
create_default_settings(config_path, settings_initializer)
except OSError as e:
raise e.__class__("Unable to write default settings file to %r" % config_path)
print("Configuration file created at %r" % config_path)
return
# Fetch config path from `--config` if provided, otherwise we want it to
# default to None so that the underlying machinery in `configure_app` will
# process default path or environment variable.
config_path = args.config
# Overlay our config w/ defautls
try:
configure_app(config_path=config_path, **kwargs)
except ValueError as err:
parser.exit(status=2, message=str(err) + "\n")
# Call Django management command
management.execute_from_command_line([runner_name, command] + command_args)
# Exit cleanly
sys.exit(0)
if __name__ == "__main__":
run_app()
| 29.083942 | 119 | 0.651148 | [
"Apache-2.0"
] | Joezeppe/nautobot | nautobot/core/runner/runner.py | 7,969 | Python |
from __future__ import annotations
from typing import Optional, Type, TYPE_CHECKING
import actor
from actions.ai import BasicMonster
import graphic
from inventory import Inventory
if TYPE_CHECKING:
from actions import Action
from location import Location
class Fighter(graphic.Graphic):
render_order = 0
hp: int = 0
power: int = 0
defense: int = 0
DEFAULT_AI: Type[Action] = BasicMonster
def __init__(self, inventory: Optional[Inventory] = None) -> None:
self.alive = True
self.max_hp = self.hp
self.inventory = inventory or Inventory()
@classmethod
def spawn(
cls, location: Location, ai_cls: Optional[Type[Action]] = None
) -> actor.Actor:
self = cls()
return actor.Actor(location, self, ai_cls or cls.DEFAULT_AI)
| 22.722222 | 70 | 0.683374 | [
"MIT"
] | Belvarm/roguelike-tutorial | races/__init__.py | 818 | Python |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from copy import deepcopy
from nltk import word_tokenize
from tqdm import tqdm
import nemo.collections.nlp.data.text_normalization.constants as constants
__all__ = ['read_data_file', 'normalize_str']
def read_data_file(fp):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str, lang):
""" Normalize an input string """
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
def basic_tokenize(input_str, lang):
"""
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
"""
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
| 34.069444 | 111 | 0.6596 | [
"Apache-2.0"
] | JMichaelStringer/NeMo | nemo/collections/nlp/data/text_normalization/utils.py | 2,453 | Python |
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| 32.675381 | 78 | 0.62295 | [
"MIT"
] | bopopescu/JobSniperRails | gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py | 14,998 | Python |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 15:54:01 2018
@author: shinyonsei2
"""
import numpy as np
import imageio
def read_pfm(fpath, expected_identifier="Pf"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
def _get_next_line(f):
next_line = f.readline().decode('utf-8').rstrip()
# ignore comments
while next_line.startswith('#'):
next_line = f.readline().rstrip()
return next_line
with open(fpath, 'rb') as f:
# header
identifier = _get_next_line(f)
if identifier != expected_identifier:
raise Exception('Unknown identifier. Expected: "%s", got: "%s".' % (expected_identifier, identifier))
try:
line_dimensions = _get_next_line(f)
dimensions = line_dimensions.split(' ')
width = int(dimensions[0].strip())
height = int(dimensions[1].strip())
except:
raise Exception('Could not parse dimensions: "%s". '
'Expected "width height", e.g. "512 512".' % line_dimensions)
try:
line_scale = _get_next_line(f)
scale = float(line_scale)
assert scale != 0
if scale < 0:
endianness = "<"
else:
endianness = ">"
except:
raise Exception('Could not parse max value / endianess information: "%s". '
'Should be a non-zero number.' % line_scale)
try:
data = np.fromfile(f, "%sf" % endianness)
data = np.reshape(data, (height, width))
data = np.flipud(data)
with np.errstate(invalid="ignore"):
data *= abs(scale)
except:
raise Exception('Invalid binary values. Could not create %dx%d array from input.' % (height, width))
return data
def load_LFdata(dir_LFimages,hci_root):
traindata_all=np.zeros((len(dir_LFimages), 512, 512, 9, 9, 3),np.uint8)
traindata_label=np.zeros((len(dir_LFimages), 512, 512),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
print(dir_LFimage)
for i in range(81):
try:
tmp = np.float32(imageio.imread(hci_root + dir_LFimage+'/input_Cam0%.2d.png' % i)) # load LF images(9x9)
except:
print(hci_root + dir_LFimage+'/input_Cam0%.2d.png..does not exist' % i )
traindata_all[image_id,:,:,i//9,i-9*(i//9),:]=tmp
del tmp
try:
tmp = np.float32(read_pfm(hci_root +dir_LFimage+'/gt_disp_lowres.pfm')) # load LF disparity map
except:
print(hci_root + dir_LFimage+'/gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:]=tmp
del tmp
image_id=image_id+1
return traindata_all, traindata_label
def load_depth_gts(gt_dir,dir_LFimages):
w_views = 9
n_views = w_views**2
traindata_label=np.zeros((len(dir_LFimages), 512, 512, n_views),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
sample_name = dir_LFimage.split('/')[-1]
print("loading additional gt.. " + sample_name)
for i in range(n_views):
# try: 0%.2d.png
tmp = np.float32(read_pfm(gt_dir +sample_name+'/gt_disp_lowres_Cam0%.2d.pfm' %i)) # load LF disparity map
# except:
# print(hci_root + dir_LFimage+'\gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:,i]=tmp
del tmp
image_id=image_id+1
return traindata_label
| 34.485981 | 122 | 0.566938 | [
"MIT"
] | marmus12/CornerView | epinet_fun/util.py | 3,690 | Python |
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
| 39.28259 | 119 | 0.573898 | [
"Apache-2.0"
] | RadiotherapyAI/platipy | platipy/dicom/io/crawl.py | 44,900 | Python |
"""
Conjuntos são chamados de set's
- Set não possui duplicidade
- Set não possui valor ordenado
- Não são acessados via indice, ou seja, não são indexados
Bons para armazenar elementos são ordenação, sem se preocupar com chaves, valores e itens duplicados.
Set's são referenciados por {}
Diferença de set e dict
- Dict tem chave:valor
- Set tem apenas valor
---------------------------------------------------------------------------------------------------------------------
# DEFENINDO SET
# Forma 1
s = set ({1, 2, 3, 4, 5, 4, 5, 2, 1}) # valores duplicados
print(type(s))
print(s)
# OBS.: Ao criar um set, se uma valor estiver repetido, ele é ignorado, sem gerar erro.
# Forma 2 - Mais comum
set = {1, 2, 3, 4, 5, 4, 5, 2, 1} # valores duplicados
print(type(set))
print(set)
# Sem valores duplicados e sem ordenação entre eles
# Pode-se colocar todos os tipos de dados
---------------------------------------------------------------------------------------------------------------------
# PODE-SE ITERAR SOBRE UM SET
set = {1, 2, 3, 4, 5, 4, 5, 2, 1}
for valor in set:
print(valor)
---------------------------------------------------------------------------------------------------------------------
# USOS INTERESSANTES COM SET'S
# Imagine que fizemos um formulario de cadastro de visitantes em um museu, onde as pessoas informam manualmente
# sua cidade de origem
# Nos adicionamos cada cidade em uma lista Python, ja que em lista pode-se adicionar novos elementos e ter repetição
cidade = ['Lavras', 'Bagé', 'Caçapava', 'Lavras', 'Bagé']
print(type(cidade))
print(cidade)
print(len(cidade)) # para saber quantos visitantes teve
print(len(set(cidade))) # para saber quantas cidades distintas foram visitar
---------------------------------------------------------------------------------------------------------------------
# ADICIONANDO ELEMENTOS EM UM SET
s = {1, 2, 3}
s.add(4)
print(s)
---------------------------------------------------------------------------------------------------------------------
# REMOVANDO ELEMENTOS DE UM SET
# Forma 1
conj = {1, 2, 3}
conj.remove(3) # se tentar remover um valor que não existe, gera um erro.
print(conj)
# Forma 2
conj.discard(2) # se o elemento não existir, não vai gerar erro
print(conj)
---------------------------------------------------------------------------------------------------------------------
# COPIANDO UM SET PARA OUTRO
conj = {1, 2, 3}
# Forma 1 - Deep Copy (o novo conjunto fica independente)
novo = conj.copy()
print(novo)
novo.add(4)
print(conj, novo)
# Forma 2 - Shallow Copy (o novo conjunto fica interligado ao primeiro)
novo2 = conj
print(novo2)
novo2.add(5)
print(conj, novo2)
---------------------------------------------------------------------------------------------------------------------
# REMOVER TODOS OS DADOS DE UM SET
conj = {1, 2, 3}
conj.clear()
print(conj)
---------------------------------------------------------------------------------------------------------------------
# METODOS MATEMÁTICOS DE CONJUNTOS
# Dois conjuntos de estudantes, Python e Java.
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Precisamos juntar em um set, os alunos dos dois cursos, mas apenas nomes únicos
# Forma 1 - usando union
unicos = python.union(java)
print(unicos)
# Forma 2 - Usando o caracter pipe "|"
unicos2 = python|java
print(unicos2)
---------------------------------------------------------------------------------------------------------------------
# GERANDO SET DE ESTUDANTES QUE ESTÃO NOS DOIS CURSOS
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Forma 1 - usando intersection
ambos = python.intersection(java)
print(ambos)
# Forma 2 - usando &
ambos2 = python & java
print(ambos2)
---------------------------------------------------------------------------------------------------------------------
# GERAR SET DE ESTUDANTES QUE ESTÃ EM UM CURSO, MAS QUE NÃO ESTÃO NO OUTRO
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
so_python = python.difference(java)
print(so_python)
---------------------------------------------------------------------------------------------------------------------
# SOMA*, MÁXIMO*, MÍNIMO*, TAMANHO.
# * -> somente valores inteiros ou float
conj = {1, 2, 3, 4, 5}
print(sum(conj))
print(max(conj))
print(min(conj))
print(len(conj))
---------------------------------------------------------------------------------------------------------------------
"""
| 31.863014 | 117 | 0.479364 | [
"MIT"
] | PauloFTeixeira/curso_python | Secao7_ColecoesPython/Conjutos.py | 4,683 | Python |
version = '1.6.4'
| 9 | 17 | 0.555556 | [
"MIT"
] | caniko/tridesclous | tridesclous/version.py | 18 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import builderutils.parser as parser
import builderutils.renderer as renderer
import builderutils.dom as dom
@click.group()
def cli():
pass
@click.command()
@click.option("--configfile", type=click.Path(), help="Builder config", required=True)
def create(configfile):
print("create command!")
parserObj = parser.ConfigParser(configfile)
print("Parser Obj: ", parserObj)
domObj = dom.DomManager(parserObj)
domObj.buildDomTree()
dom.DomManager.parseDomTree(dom.SAMPLE_DOM)
# parserObj = parser.BuilderParser(configfile)
# renderObj = renderer.Renderer()
# renderObj.build_staging_environment(parserObj.parsedData)
# userConfig = parserObj.parsedData["user_config"]
# htmlTemplate = parserObj.parsedData["html_template"]
# flaskTemplate = parserObj.parsedData["flask_template"]
# renderObj.build_html_documents(htmlTemplate, userConfig)
# renderObj.build_flask_app(flaskTemplate, userConfig)
def main():
cli.add_command(create)
cli()
if __name__ == "__main__":
main()
| 25.181818 | 86 | 0.723827 | [
"Apache-2.0"
] | bdastur/builder | builder/builder.py | 1,108 | Python |
from .treasury_yields import Treasury_Yield_Task
from .mariadb import Mariadb_Task
from .bea import BEA_Task
from .yfinance import Yfinance_Task | 36.75 | 49 | 0.85034 | [
"MIT"
] | samsea18/Treasury-Yield-Analysis | treasury_yield_analysis/task/__init__.py | 147 | Python |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import download as download_internal
@click.command()
@click.argument("campaign_id", type=str)
@click.option("--batch_no", "batch_no", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def download(
campaign_id: str,
batch_no: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(download_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = download_internal(
campaign_id=campaign_id,
batch_no=batch_no,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"download failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
download.operation_id = "download"
download.is_deprecated = False
| 30.054054 | 88 | 0.718525 | [
"MIT"
] | AccelByte/accelbyte-python-sdk | samples/cli/accelbyte_py_sdk_cli/platform/_download.py | 2,224 | Python |
import unittest
import numpy as np
import string
from pyffm.util import Map
class TestMap(unittest.TestCase):
def test_basic(self):
map1 = Map()
map_size_to_test = 1000
all_letters = string.ascii_uppercase + string.ascii_lowercase
counter = 0
for char in "".join(
all_letters[np.random.choice(len(all_letters))]
for _ in range(map_size_to_test)
):
if char not in map1:
counter += 1
map_index = map1.add(char)
self.assertEqual(map_index, map1._map_dict[char])
self.assertEqual(len(map1), counter)
| 26.5 | 69 | 0.610063 | [
"MIT"
] | mascaroa/pyffm | pyffm/test/test_utils.py | 636 | Python |
"""
Credentials used when making CLIs.
"""
from pathlib import Path
from dcos_e2e.cluster import Cluster
DEFAULT_SUPERUSER_USERNAME = 'bootstrapuser'
DEFAULT_SUPERUSER_PASSWORD = 'deleteme'
def add_authorized_key(cluster: Cluster, public_key_path: Path) -> None:
"""
Add an authorized key to all nodes in the given cluster.
"""
nodes = {
*cluster.masters,
*cluster.agents,
*cluster.public_agents,
}
for node in nodes:
node.run(
args=['echo', '', '>>', '/root/.ssh/authorized_keys'],
shell=True,
)
node.run(
args=[
'echo',
public_key_path.read_text(),
'>>',
'/root/.ssh/authorized_keys',
],
shell=True,
)
| 22 | 72 | 0.540541 | [
"Apache-2.0"
] | dcos/dcos-e2e | src/dcos_e2e_cli/common/credentials.py | 814 | Python |
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from scipy.spatial import KDTree, Voronoi, voronoi_plot_2d
np.random.seed(42)
data = np.random.rand(25, 2)
vor = Voronoi(data)
print('Using scipy.spatial.voronoi_plot_2d, wait...')
voronoi_plot_2d(vor)
xlim = plt.xlim()
ylim = plt.ylim()
pml.savefig('knnVoronoiMesh.pdf')
plt.show()
print('Using scipy.spatial.KDTree, wait a few seconds...')
plt.figure()
tree = KDTree(data)
x = np.linspace(xlim[0], xlim[1], 200)
y = np.linspace(ylim[0], ylim[1], 200)
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
plt.plot(data[:, 0], data[:, 1], 'ko')
plt.pcolormesh(x, y, tree.query(xy)[1].reshape(200, 200), cmap='jet')
pml.savefig('knnVoronoiColor.pdf')
plt.show()
| 25.166667 | 69 | 0.701987 | [
"MIT"
] | Drishttii/pyprobml | scripts/knn_voronoi_plot.py | 755 | Python |
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
| 27.180818 | 97 | 0.607161 | [
"Apache-2.0"
] | DKilkenny/OpenMDAO | openmdao/utils/general_utils.py | 34,574 | Python |
import json
import os
srt_path = '/home/lyp/桌面/MAE_论文逐段精读【论文精读】.457423264.zh-CN.srt'
json_path = '/home/lyp/桌面/caption.json'
txt_path = '/home/lyp/桌面'
def srt2txt(path):
out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(path,'r+') as f:
with open(out_path, 'w+') as out:
for index,lines in enumerate(f.readlines()):
if(index%5 == 2):
out.write(lines.split('>')[1].split('<')[0]+'\n')
def json2txt(path):
out_path = out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(out_path,'w+') as out:
with open(json_path,'r+') as f:
caption_dict = json.load(f)
# print(len(caption_dict['body']))
for content_dict in caption_dict['body']:
out.write(content_dict['content']+'\n')
if __name__ == '__main__':
srt2txt(srt_path)
json2txt(json_path) | 36.36 | 73 | 0.585259 | [
"MIT"
] | lyp2333/External-Attention-pytorch | test_model/utils/caption2txt.py | 945 | Python |
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.981172 | 204 | 0.585933 | [
"Apache-2.0"
] | asun-ps/purity_fb_python_client | purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py | 20,545 | Python |
"""
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of SEQUENCES OF SUB-SEQUENCES.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Lucas D'Alesio.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the other functions to test them. """
#run_test_largest_number()
#run_test_largest_negative_number()
run_test_first_is_elsewhere_too()
def run_test_largest_number():
""" Tests the largest_number function. """
# -------------------------------------------------------------------------
# DONE: 2. Implement this TEST function.
# It TESTS the largest_number function defined below.
# Include at least ** 1 ** ADDITIONAL test beyond those we wrote.
# -------------------------------------------------------------------------
print()
print('-------------------------------------')
print('Testing the LARGEST_NUMBER function:')
print('-------------------------------------')
# Test 1:
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
# Test 2:
expected = -1111111111111111
answer = largest_number(([], [-1111111111111111], []))
print('Expected and actual are:', expected, answer)
# Test 3:
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
# DONE 2 (continued): Add your ADDITIONAL test(s) here:
# Test 3:
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
def largest_number(seq_seq):
"""
Returns the largest number in the subsequences of the given
sequence of sequences. Returns None if there are NO numbers
in the subsequences.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]]
then this function returns 13.
As another example, if the given argument is:
([], [-1111111111111111], [])
then this function returns -1111111111111111.
As yet another example, if the given argument is:
([], [], [])
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
x = None
for j in range (len(seq_seq)):
for k in range(len(seq_seq[j])):
x = j
y = k
for l in range(len(seq_seq)):
for o in range(len(seq_seq[l])):
if seq_seq[l][o] > seq_seq[x][y]:
x = l
y = o
if x == None:
return None
return seq_seq[x][y]
def run_test_largest_negative_number():
""" Tests the largest_negative_number function. """
# -------------------------------------------------------------------------
# DONE: 4. Implement this TEST function.
# It TESTS the largest_negative_number function defined below.
#
# Include enough tests to give you confidence that your solution
# to this challenging problem is indeed correct.
# -------------------------------------------------------------------------
print()
print('-------------------------------------------------')
print('Testing the LARGEST_NEGATIVE_NUMBER function:')
print('-------------------------------------------------')
# Test 1:
expected = 11
answer = largest_number([(3, 1, 4),
(-13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
# Test 2:
expected = -2
answer = largest_number(([-10], [-1111111111111111], [-2]))
print('Expected and actual are:', expected, answer)
# Test 3:
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
def largest_negative_number(seq_seq):
"""
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# CHALLENGE: Try to solve this problem with no additional sequences
# being constructed (so the SPACE allowed is limited to the
# give sequence of sequences plus any non-list variables you want).
# -------------------------------------------------------------------------
s = []
for k in range(len(seq_seq)):
s2 = seq_seq[k]
if s2 != []:
s = s + [max(s2)]
return max(s)
def run_test_first_is_elsewhere_too():
""" Tests the first_is_elsewhere_too function. """
# -------------------------------------------------------------------------
# We have supplied tests for you. No additional tests are required,
# although you are welcome to supply more tests if you choose.
# -------------------------------------------------------------------------
print()
print('-------------------------------------')
print('Testing the FIRST_IS_ELSEWHERE_TOO function:')
print('-------------------------------------')
# FYI: The notation below constructs what is called a DICTIONARY.
# It is like a list, but the indices can be any immutable
# objects (here, True or False), not just 0, 1, 2, ... as in lists.
message = {True: 'Your code PASSED this test.\n',
False: 'Your code FAILED this test.\n'}
no_failures = True
# Test 1:
expected = True
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 2:
expected = False
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 3:
expected = False
answer = first_is_elsewhere_too([[], [1, 2], [1, 2]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 4:
expected = True
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'a'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected]) # Test 1:
no_failures = no_failures and (answer == expected)
# Test 5:
expected = False
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'aa'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 6:
expected = False
answer = first_is_elsewhere_too([('a', 'a', 'b', 'b', 'a', 'b')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 7:
expected = False
answer = first_is_elsewhere_too([()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 8:
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 9:
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 10:
expected = False
answer = first_is_elsewhere_too([('a'), (), (), (), ('b'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 11:
expected = True
answer = first_is_elsewhere_too(['hello', 'goodbye'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 12:
expected = False
answer = first_is_elsewhere_too(['hello', 'xxxxxxxxxxx'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 13:
expected = False
answer = first_is_elsewhere_too(['1234567890',
'one two three',
'i am free',
'four five six',
'get my sticks',
'seven eight nine',
'i am fine'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 14:
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
'b'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 15:
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
(700 * 'eee') + 'b' + (90 * 'd'),
(800 * 'c') + 'd' + 1200 * 'c'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 16:
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234a',
'eeee'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 17:
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
'a' + (800 * '1') + '234',
'123'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 18:
test1 = [(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234',
'123']
for k in range(95):
test1.append(k * chr(k))
test2 = []
for k in range(30):
test2.append(k * chr(k))
expected = True
answer = first_is_elsewhere_too(test1 + ['a'] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 19 (continues test 18):
expected = False
answer = first_is_elsewhere_too(test1 + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 20 (continues test 18):
expected = True
a_inside = (100 * 'b') + 'a' + (100 * 'b')
answer = first_is_elsewhere_too(test1 + [a_inside] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
if no_failures:
print('*** Your code PASSED all')
else:
print('!!! Your code FAILED some')
print(' of the tests for first_is_elsewhere_too')
def first_is_elsewhere_too(seq_seq):
"""
Given a sequence of subsequences:
-- Returns True if any element of the first (initial) subsequence
appears in any of the other subsequences.
-- Returns False otherwise.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]]
then this function returns True because 3 appears
in the first subsequence and also in the third subsequence.
As another example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]]
then this function returns False because 3 does not appear in
any subsequence except the first, 1 does not appear in any
subsequence except the first, and 4 does not appear in any
subsequence except the first.
As yet another example, if the given argument is:
([], [1, 2], [1, 2])
then this function returns False since no element of the first
subsequence appears elsewhere.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences.
"""
# -------------------------------------------------------------------------
# DONE: 6. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use anything but comparison (==) in judging
# membership. In particular, you may NOT use:
# -- the IN operator
# (example: 7 in [9, 6, 7, 9] returns True)
# -- the COUNT method
# (example: [9, 6, 7, 9].count(9) returns 2)
# -- the INDEX method
# (example: [9, 6, 7, 9, 6, 1].index(6) returns 1)
# in this problem, as doing so would defeat the goal of providing
# practice at loops within loops (within loops within ...)
# -------------------------------------------------------------------------
for j in range(len(seq_seq[0])):
for k in range(1, len(seq_seq)):
for i in range(len(seq_seq[k])):
if seq_seq[k][i] == seq_seq[0][j]:
return True
return False
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 36.935035 | 79 | 0.519002 | [
"MIT"
] | dalesil/19-MoreLoopsWithinLoops | src/m3_more_nested_loops_in_sequences.py | 15,919 | Python |
import unittest
import textwrap
from typing import Any, List
from hstest.check_result import CheckResult
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
class FeedbackOnExceptionTest4(StageTest):
def generate(self) -> List[TestCase]:
return [
TestCase(feedback_on_exception={
ZeroDivisionError: 'Do not divide by zero!',
AttributeError: 'Attribute Error raised!',
Exception: 'Base ex raised'
})
]
def check(self, reply: str, attach: Any) -> CheckResult:
return CheckResult(True, '')
class Test(unittest.TestCase):
def test(self):
status, feedback = FeedbackOnExceptionTest4(
'tests.outcomes.feedback_on_exception_test_4.program'
).run_tests()
self.assertEqual(textwrap.dedent('''\
Exception in test #1
Base ex raised
Traceback (most recent call last):
File "program.py", line 1, in <module>
raise Exception()
Exception'''), feedback)
self.assertEqual(status, -1)
| 27.571429 | 65 | 0.606218 | [
"MIT"
] | AlexandruPopa88/Hypercar-Service-Center | Hypercar Service Center/task/hypercar/hs-test-python-2.0.1/tests/outcomes/feedback_on_exception_test_4/test.py | 1,158 | Python |
'''
BVH Parser Class
By Omid Alemi
Created: June 12, 2017
Based on: https://gist.github.com/johnfredcee/2007503
'''
import re
import numpy as np
from data import Joint, MocapData
class BVHScanner:
'''
A wrapper class for re.Scanner
'''
def __init__(self):
def identifier(scanner, token):
return 'IDENT', token
def operator(scanner, token):
return 'OPERATOR', token
def digit(scanner, token):
return 'DIGIT', token
def open_brace(scanner, token):
return 'OPEN_BRACE', token
def close_brace(scanner, token):
return 'CLOSE_BRACE', token
self.scanner = re.Scanner([
(r'[a-zA-Z_]\w*', identifier),
#(r'-*[0-9]+(\.[0-9]+)?', digit), # won't work for .34
#(r'[-+]?[0-9]*\.?[0-9]+', digit), # won't work for 4.56e-2
#(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'-*[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'}', close_brace),
(r'}', close_brace),
(r'{', open_brace),
(r':', None),
(r'\s+', None)
])
def scan(self, stuff):
return self.scanner.scan(stuff)
class BVHParser():
'''
A class to parse a BVH file.
Extracts the skeleton and channel values
'''
def __init__(self, filename=None):
self.reset()
def reset(self):
self._skeleton = {}
self.bone_context = []
self._motion_channels = []
self._motions = []
self.current_token = 0
self.framerate = 0.0
self.root_name = ''
self.scanner = BVHScanner()
self.data = MocapData()
def parse(self, filename):
self.reset()
with open(filename, 'r') as bvh_file:
raw_contents = bvh_file.read()
tokens, remainder = self.scanner.scan(raw_contents)
self._parse_hierarchy(tokens)
self.current_token = self.current_token + 1
self._parse_motion(tokens)
self.data.skeleton = self._skeleton
self.data.channel_names = self._motion_channels
self.data.values = self._to_DataFrame()
self.data.root_name = self.root_name
self.data.framerate = self.framerate
return self.data
def _to_DataFrame(self):
'''Returns all of the channels parsed from the file as a pandas DataFrame'''
import pandas as pd
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
frames = [f[1] for f in self._motions]
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels]
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
def _new_bone(self, parent, name):
bone = {'parent': parent, 'channels': [], 'offsets': [],'children': []}
return bone
def _push_bone_context(self,name):
self.bone_context.append(name)
def _get_bone_context(self):
return self.bone_context[len(self.bone_context)-1]
def _pop_bone_context(self):
self.bone_context = self.bone_context[:-1]
return self.bone_context[len(self.bone_context)-1]
def _read_offset(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'OFFSET'):
return None, None
token_index = token_index + 1
offsets = [0.0] * 3
for i in range(3):
offsets[i] = float(bvh[token_index][1])
token_index = token_index + 1
return offsets, token_index
def _read_channels(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'CHANNELS'):
return None, None
token_index = token_index + 1
channel_count = int(bvh[token_index][1])
token_index = token_index + 1
channels = [""] * channel_count
for i in range(channel_count):
channels[i] = bvh[token_index][1]
token_index = token_index + 1
return channels, token_index
def _parse_joint(self, bvh, token_index):
end_site = False
joint_id = bvh[token_index][1]
token_index = token_index + 1
joint_name = bvh[token_index][1]
token_index = token_index + 1
parent_name = self._get_bone_context()
if (joint_id == "End"):
joint_name = parent_name+ '_Nub'
end_site = True
joint = self._new_bone(parent_name, joint_name)
if bvh[token_index][0] != 'OPEN_BRACE':
print('Was expecting brance, got ', bvh[token_index])
return None
token_index = token_index + 1
offsets, token_index = self._read_offset(bvh, token_index)
joint['offsets'] = offsets
if not end_site:
channels, token_index = self._read_channels(bvh, token_index)
joint['channels'] = channels
for channel in channels:
self._motion_channels.append((joint_name, channel))
self._skeleton[joint_name] = joint
self._skeleton[parent_name]['children'].append(joint_name)
while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'):
self._push_bone_context(joint_name)
token_index = self._parse_joint(bvh, token_index)
self._pop_bone_context()
if bvh[token_index][0] == 'CLOSE_BRACE':
return token_index + 1
print('Unexpected token ', bvh[token_index])
def _parse_hierarchy(self, bvh):
self.current_token = 0
if bvh[self.current_token] != ('IDENT', 'HIERARCHY'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token] != ('IDENT', 'ROOT'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][0] != 'IDENT':
return None
root_name = bvh[self.current_token][1]
root_bone = self._new_bone(None, root_name)
self.current_token = self.current_token + 2 #skipping open brace
offsets, self.current_token = self._read_offset(bvh, self.current_token)
channels, self.current_token = self._read_channels(bvh, self.current_token)
root_bone['offsets'] = offsets
root_bone['channels'] = channels
self._skeleton[root_name] = root_bone
self._push_bone_context(root_name)
for channel in channels:
self._motion_channels.append((root_name, channel))
while bvh[self.current_token][1] == 'JOINT':
self.current_token = self._parse_joint(bvh, self.current_token)
self.root_name = root_name
def _parse_motion(self, bvh):
if bvh[self.current_token][0] != 'IDENT':
print('Unexpected text')
return None
if bvh[self.current_token][1] != 'MOTION':
print('No motion section')
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frames':
return None
self.current_token = self.current_token + 1
frame_count = int(bvh[self.current_token][1])
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frame':
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Time':
return None
self.current_token = self.current_token + 1
frame_rate = float(bvh[self.current_token][1])
self.framerate = frame_rate
self.current_token = self.current_token + 1
frame_time = 0.0
self._motions = [()] * frame_count
for i in range(frame_count):
channel_values = []
for channel in self._motion_channels:
channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1])))
self.current_token = self.current_token + 1
self._motions[i] = (frame_time, channel_values)
frame_time = frame_time + frame_rate
| 33.831967 | 152 | 0.586796 | [
"MIT"
] | seanschneeweiss/RoSeMotion | app/resources/pymo/pymo/parsers.py | 8,255 | Python |
import requests
import logging
import os
import selenium
import unittest
import time
import requests, re
from django.core.management.base import BaseCommand
from search.models import Product, Category, DetailProduct
from django.db import IntegrityError
from django.core.exceptions import MultipleObjectsReturned
from logging.handlers import RotatingFileHandler
from logging import handlers
from configparser import ConfigParser
from django.test import RequestFactory
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.http import request, HttpRequest
from django.utils.http import base36_to_int, int_to_base36
from django.utils.http import urlsafe_base64_encode
from django.db.models.query_utils import Q
from django.utils.encoding import force_bytes
from django.contrib.auth import get_user_model
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
class Command(BaseCommand):
help = "Tests Selenium"
def __init__(self):
if os.environ.get("ENV") == "DEV":
self.driver = webdriver.Firefox("/Users/david/Projets/selenium driver/")
self.url = "http://127.0.0.1:8000/"
self.driver.maximize_window()
if os.environ.get("ENV") == "TRAVIS":
self.BROWSERSTACK_URL = 'https://davidbarat1:[email protected]/wd/hub'
self.desired_cap = {
'os' : 'Windows',
'os_version' : '10',
'browser' : 'Chrome',
'browser_version' : '80',
'name' : "P8 Test"
}
self.driver = webdriver.Remote(
command_executor=self.BROWSERSTACK_URL,
desired_capabilities=self.desired_cap)
self.driver.maximize_window()
self.url = "http://167.99.212.10/"
self.search = "Nutella"
self.user = "[email protected]"
self.password = "007Test!"
self.newpassword = "newpassword456"
def handle(self, *args, **options):
self.testMyProducts()
self.testMentionsContacts()
# self.testResetPassword()
self.tearDown()
def testResetPassword(self):
# self.driver.maximize_window()
self.driver.get(self.url)
time.sleep(5)
self.elem = self.driver.find_element_by_id("login")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("id_username")
self.elem.send_keys(self.user)
self.elem = self.driver.find_element_by_id("id_password")
self.elem.send_keys(self.password)
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.elem = self.driver.find_element_by_id("logout")
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.elem = self.driver.find_element_by_id("login")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("resetpassword")
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.elem = self.driver.find_element_by_id("id_email")
self.elem.send_keys(self.user)
time.sleep(3)
self.user_filter = User.objects.filter(Q(email=self.user))
for self.user in self.user_filter:
print(self.user)
self.token = default_token_generator.make_token(self.user)
print(self.token)
self.uid = urlsafe_base64_encode(force_bytes(self.user.pk))
print(self.uid)
self.driver.get(self.url + "reset/%s/%s/" % (self.uid, self.token))
time.sleep(3)
self.driver.find_element_by_id("id_new_password1").send_keys(self.newpassword)
self.driver.find_element_by_id("id_new_password2").send_keys(self.newpassword)
self.elem = self.driver.find_element_by_id("id_new_password2")
time.sleep(3)
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.driver.quit()
def testMyProducts(self):
# self.driver.maximize_window()
self.driver.get(self.url)
self.elem = self.driver.find_element_by_id("myproducts")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("id_username")
self.elem.send_keys(self.user)
self.elem = self.driver.find_element_by_id("id_password")
self.elem.send_keys(self.password)
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
def testMentionsContacts(self):
# self.driver.maximize_window()
self.driver.get(self.url)
self.elem = self.driver.find_element_by_id("mentions")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("contact")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
def tearDown(self):
self.driver.quit()
| 38.470149 | 112 | 0.668477 | [
"MIT"
] | pythonmentor/david-p8 | search/management/commands/test_selenium.py | 5,155 | Python |
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import zipfile
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.bot_instance import BotInstance
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.embed import DiscordEmbed
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.markov import Markov
from src.message import Msg
from src.reminder import Reminder
from src.repl import Repl
from src.utils import Util
from src.voice import VoiceRoutine
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
self.loop.create_task(self._repl_routine())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.broadcast_command("close")
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
async def _repl_routine(self) -> None:
self.repl = Repl(self.config.repl["port"])
await self.repl.start()
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
self._load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
def _load_plugins(self) -> None:
for plugin_name in bc.plugin_manager.get_plugins_list():
if plugin_name not in self.config.plugins.keys():
self.config.plugins[plugin_name] = {
"autostart": False,
}
for plugin_name, plugin_state in self.config.plugins.items():
if plugin_state["autostart"]:
asyncio.create_task(bc.plugin_manager.send_command(plugin_name, "init"))
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"timer",
"stopwatch",
"vqpush",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{' '.join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data['author']['username']}#"
f"{payload.data['author']['discriminator']} -> {payload.data['content']}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
config = None
secret_config = None
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Selecting YAML parser
bc.yaml_loader, bc.yaml_dumper = Util.get_yaml(verbose=True)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Executing patch tool if it is necessary
if args.patch:
cmd = f"'{sys.executable}' '{os.path.dirname(__file__) + '/../tools/patch.py'}' all"
log.info("Executing patch tool: " + cmd)
subprocess.call(cmd)
# Read configuration files
config = Util.read_config_file(const.CONFIG_PATH)
if config is None:
config = Config()
secret_config = Util.read_config_file(const.SECRET_CONFIG_PATH)
if secret_config is None:
secret_config = SecretConfig()
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None and os.path.isdir("backup"):
# Check available backups
markov_backups = sorted(
[x for x in os.listdir("backup") if x.startswith("markov_") and x.endswith(".zip")])
if markov_backups:
# Restore Markov model from backup
with zipfile.ZipFile("backup/" + markov_backups[-1], 'r') as zip_ref:
zip_ref.extractall(".")
log.info(f"Restoring Markov model from backup/{markov_backups[-1]}")
shutil.move(markov_backups[-1][:-4], "markov.yaml")
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None:
bc.markov = Markov()
log.warning("Failed to restore Markov model from backup. Creating new Markov model...")
if bc.markov is None:
bc.markov = Markov()
log.info("Created empty Markov model")
# Check config versions
ok = True
ok &= Util.check_version(
"discord.py", discord.__version__, const.DISCORD_LIB_VERSION,
solutions=[
"execute: python -m pip install -r requirements.txt",
])
ok &= Util.check_version(
"Config", config.version, const.CONFIG_VERSION,
solutions=[
"run patch tool",
"remove config.yaml (settings will be lost!)",
])
ok &= Util.check_version(
"Markov config", bc.markov.version, const.MARKOV_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove markov.yaml (Markov model will be lost!)",
])
ok &= Util.check_version(
"Secret config", secret_config.version, const.SECRET_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove secret.yaml (your Discord authentication token will be lost!)",
])
if main_bot and not ok:
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
config.commands.update()
# Checking authentication token
if secret_config.token is None:
secret_config = SecretConfig()
if not FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG"):
secret_config.token = input("Enter your token: ")
# Constructing bot instance
if main_bot:
intents = discord.Intents.all()
walbot = WalBot(args.name, config, secret_config, intents=intents)
else:
walbot = importlib.import_module("src.minibot").MiniWalBot(args.name, config, secret_config, args.message)
# Starting the bot
try:
walbot.run(secret_config.token)
except discord.errors.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
# After stopping the bot
log.info("Bot is disconnected!")
if main_bot:
config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}' '{os.path.dirname(os.path.dirname(__file__)) + '/walbot.py'}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
def stop(self, _, main_bot=True):
if not BotCache(main_bot).exists():
return log.error("Could not stop the bot (cache file does not exist)")
bot_cache = BotCache(main_bot).parse()
pid = bot_cache["pid"]
if pid is None:
return log.error("Could not stop the bot (cache file does not contain pid)")
if psutil.pid_exists(pid):
if sys.platform == "win32":
# Reference to the original solution:
# https://stackoverflow.com/a/64357453
import ctypes
kernel = ctypes.windll.kernel32
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
else:
os.kill(pid, signal.SIGINT)
while psutil.pid_exists(pid):
log.debug("Bot is still running. Please, wait...")
time.sleep(0.5)
log.info("Bot is stopped!")
else:
log.error("Could not stop the bot (bot is not running)")
BotCache(main_bot).remove()
| 46.596296 | 118 | 0.604443 | [
"MIT"
] | SergeyKonnov/walbot | src/bot.py | 25,162 | Python |
from typing import List, Dict
from DataObjects.ClassCluster import Cluster
from Murphi.ModularMurphi.MurphiTokens import MurphiTokens
from Murphi.ModularMurphi.TemplateClass import TemplateHandler
from DataObjects.ClassMachine import Machine
class GenModStateFunc(MurphiTokens, TemplateHandler):
def __init__(self, handler_dir: str):
TemplateHandler.__init__(self, handler_dir)
def gen_mod_state_func(self, clusters: List[Cluster]):
mod_state_func = "--" + __name__ + self.nl
machine_dict: Dict[str, Machine] = {}
for cluster in clusters:
for machine in cluster.system_tuple:
if machine.arch.get_unique_id_str() not in machine_dict:
machine_dict[machine.arch.get_unique_id_str()] = machine
for machine in machine_dict.values():
mod_state_func += self._stringReplKeys(self._openTemplate(self.fmodifystate),
[machine.arch.get_unique_id_str(), self.kmachines,
self.statesuf, self.instsuf, self.iState]) + self.nl
return mod_state_func + self.nl
| 39 | 104 | 0.660684 | [
"MIT"
] | icsa-caps/HieraGen | Murphi/ModularMurphi/GenModStateFunc.py | 1,170 | Python |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "LUMOD" not in os.environ:
os.environ["LUMOD"] = buildDir + '/src/lumocashd' + EXEEXT
if "LUMOCLI" not in os.environ:
os.environ["LUMOCLI"] = buildDir + '/src/lumocash-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs lumocash_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs lumocash_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs lumocash_hash to pass
'invalidtxrequest.py', # NOTE: needs lumocash_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs lumocash_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs lumocash_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs lumocash_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in LumoCash
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| 31.663043 | 163 | 0.638174 | [
"MIT"
] | LumoCash2018/LumoCash | qa/pull-tester/rpc-tests.py | 8,739 | Python |
'''
URL: https://leetcode.com/problems/maximum-nesting-depth-of-the-parentheses/
Difficulty: Easy
Description: Maximum Nesting Depth of the Parentheses
A string is a valid parentheses string (denoted VPS) if it meets one of the following:
It is an empty string "", or a single character not equal to "(" or ")",
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(C) = 0, where C is a string with a single character not equal to "(" or ")".
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's.
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS represented as string s, return the nesting depth of s.
Example 1:
Input: s = "(1+(2*3)+((8)/4))+1"
Output: 3
Explanation: Digit 8 is inside of 3 nested parentheses in the string.
Example 2:
Input: s = "(1)+((2))+(((3)))"
Output: 3
Example 3:
Input: s = "1+(2*3)/(2-1)"
Output: 1
Example 4:
Input: s = "1"
Output: 0
Constraints:
1 <= s.length <= 100
s consists of digits 0-9 and characters '+', '-', '*', '/', '(', and ')'.
It is guaranteed that parentheses expression s is a VPS.
'''
class Solution:
def maxDepth(self, s):
maxD = -float('inf')
currD = 0
for ch in s:
if ch not in ["(", ")"]:
continue
if ch == "(":
currD += 1
else:
maxD = max(maxD, currD)
currD -= 1
return maxD if maxD != -float('inf') else currD
| 25.115942 | 118 | 0.58569 | [
"MIT"
] | AtharvRedij/leetcode-solutions | 1614 Maximum Nesting Depth of the Parentheses.py | 1,733 | Python |
from django.core.management.base import BaseCommand
from django.contrib.admin.models import LogEntry
def clear_old_admin_logs():
logs = LogEntry.objects.all()
for i in range(2000, len(logs)):
logs[i].delete()
class Command(BaseCommand):
def handle(self, *args, **options):
clear_old_admin_logs()
| 21.933333 | 51 | 0.705167 | [
"MIT"
] | HelloMelanieC/FiveUp | courier/management/commands/clear_old_admin_logs.py | 329 | Python |
def remove_nan_entries(df, key_columns, verbose=True):
n_row = len(df)
for column in key_columns:
df = df[df[column] == df[column]]
if verbose:
print("Prune ({}/{}) rows.".format(n_row - len(df), n_row))
return df
def parse_relationship_path(relationship_path):
# TODO: get the relationship with a public function instead
relationship = relationship_path._relationships_with_direction[0][1]
return {
'parent_entity_id': relationship.parent_entity.id,
'parent_variable_id': relationship.parent_variable.id,
'child_entity_id': relationship.child_entity.id,
'child_variable_id': relationship.child_variable.id,
}
def get_forward_entities(entityset, entity_id):
ids = []
entity_id_pipe = [entity_id]
while len(entity_id_pipe):
entity_id = entity_id_pipe[0]
entity_id_pipe = entity_id_pipe[1:]
ids.append(entity_id)
for child_id, _ in entityset.get_forward_entities(entity_id):
entity_id_pipe.append(child_id)
return ids
def get_forward_attributes(entityset, target_entity, direct_id, interesting_ids=None):
info = []
entity_id_pipe = [(target_entity, direct_id)]
while len(entity_id_pipe):
entity_id, direct_id = entity_id_pipe.pop()
if interesting_ids is not None and entity_id not in interesting_ids:
continue
df = entityset[entity_id].df
info = [{'entityId': entity_id, 'items': df.loc[direct_id].fillna('N/A').to_dict()}] + info
for child_id, relationship_path in entityset.get_forward_entities(entity_id):
relation = parse_relationship_path(relationship_path)
entity_id_pipe.append((child_id, df.loc[direct_id][relation['parent_variable_id']]))
return info
def find_path(entityset, source_entity, target_entity):
"""Find a path of the source entity to the target_entity."""
nodes_pipe = [target_entity]
parent_dict = {target_entity: None}
while len(nodes_pipe):
parent_node = nodes_pipe.pop()
if parent_node == source_entity:
break
child_nodes = [e[0] for e in entityset.get_backward_entities(parent_node)] \
+ [e[0] for e in entityset.get_forward_entities(parent_node)]
for child in child_nodes:
if child not in parent_dict:
parent_dict[child] = parent_node
nodes_pipe.append(child)
node = source_entity
paths = [[node]]
while node != target_entity:
node = parent_dict[node]
paths.append(paths[-1] + [node])
return paths
def transfer_cutoff_times(entityset, cutoff_times, source_entity, target_entity,
reduce="latest"):
path = find_path(entityset, source_entity, target_entity)[-1]
for i, source in enumerate(path[:-1]):
target = path[i + 1]
options = list(filter(lambda r: (r.child_entity.id == source
and r.parent_entity.id == target)
or (r.parent_entity.id == source
and r.child_entity.id == target),
entityset.relationships))
if len(options) == 0:
raise ValueError("No Relationship between {} and {}".format(source, target))
r = options[0]
if target == r.child_entity.id:
# Transfer cutoff_times to "child", e.g., PATIENTS -> ADMISSIONS
child_df_index = r.child_entity.df[r.child_variable.id].values
cutoff_times = cutoff_times.loc[child_df_index]
cutoff_times.index = r.child_entity.df.index
elif source == r.child_entity.id:
# Transfer cutoff_times to "parent", e.g., ADMISSIONS -> PATIENTS
cutoff_times[r.child_variable.id] = r.child_entity.df[r.child_variable.id]
if reduce == "latest":
idx = cutoff_times.groupby(r.child_variable.id).time.idxmax().values
elif reduce == 'earist':
idx = cutoff_times.groupby(r.child_variable.id).time.idxmin().values
else:
raise ValueError("Unknown reduce option.")
cutoff_times = cutoff_times.loc[idx]
cutoff_times = cutoff_times.set_index(r.child_variable.id, drop=True)
return cutoff_times
def get_records(entityset, subject_id, entity_id, time_index=None, cutoff_time=None):
entity = entityset[entity_id].df
# select records by SUBJECT_ID
if 'SUBJECT_ID' in entity.columns:
entity_df = entity[entity['SUBJECT_ID'] == subject_id]
else:
entity_df = entity
# select records before or at the cutoff_time
if cutoff_time is not None and time_index is not None:
entity_df = entity_df[entity_df[time_index] <= cutoff_time]
# TODO filter records according to secondary time index
return entity_df
def get_item_dict(es):
item_dict = {'LABEVENTS': es['D_LABITEMS'].df.loc[:, 'LABEL'].to_dict()}
for entity_id in ['CHARTEVENTS', 'SURGERY_VITAL_SIGNS']:
df = es['D_ITEMS'].df
# TODO: Change 'LABEL' to 'LABEL_CN' for Chinese labels
items = df[df['LINKSTO'] == entity_id.lower()].loc[:, 'LABEL']
item_dict[entity_id] = items.to_dict()
return item_dict
| 41.375 | 99 | 0.643882 | [
"MIT"
] | sibyl-dev/VBridge | vbridge/utils/entityset_helpers.py | 5,296 | Python |
from django.test import TestCase
# Create your tests here.
class Account(TestCase):
def test_register(self):
self.assertTrue(True)
| 14.7 | 32 | 0.714286 | [
"MIT"
] | youngershen/hhcms | hhcms/apps/account/tests.py | 147 | Python |
import gc
import string
import random
class ActiveGarbageCollection:
def __init__(self, title):
assert gc.isenabled(), "Garbage collection should be enabled"
self.title = title
def __enter__(self):
self._collect("start")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._collect("completion")
def _collect(self, step):
n = gc.collect()
if n > 0:
print(f"{self.title}: freed {n} unreachable objects on {step}")
def is_corrupted(entity_max_eventid, last_eventid):
if last_eventid is None and entity_max_eventid is None:
# no events, no entities
return False
elif last_eventid is not None and entity_max_eventid is None:
# events but no data (apply has failed or upload has been aborted)
return False
elif entity_max_eventid is not None and last_eventid is None:
# entities but no events (data is corrupted)
return True
elif entity_max_eventid is not None and last_eventid is not None:
# entities and events, entities can never be newer than events
return entity_max_eventid > last_eventid
def get_event_ids(storage):
"""Get the highest event id from the entities and the eventid of the most recent event
:param storage: GOB (events + entities)
:return:highest entity eventid and last eventid
"""
with storage.get_session():
entity_max_eventid = storage.get_entity_max_eventid()
last_eventid = storage.get_last_eventid()
return entity_max_eventid, last_eventid
def random_string(length):
"""Returns a random string of length :length: consisting of lowercase characters and digits
:param length:
:return:
"""
assert length > 0
characters = string.ascii_lowercase + ''.join([str(i) for i in range(10)])
return ''.join([random.choice(characters) for _ in range(length)])
| 31.639344 | 95 | 0.684456 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Amsterdam/GOB-Upload | src/gobupload/utils.py | 1,930 | Python |
import tensorflow as tf
import cPickle as pickle
import rnn_model
import cnn_model
from dataloader import Dataloader
import os
import datetime
import numpy as np
import argparse
from cnn_model import unroll
def main():
parser = argparse.ArgumentParser(description='Evaluate .')
parser.add_argument('rundir', type=str, help='directory of tf checkpoint file')
parser.add_argument('--model', type=str, help="Neural network architecture. 'lstm', 'rnn' or 'cnn' (default lstm)", default='lstm')
parser.add_argument('--gpu', type=int, help="Select gpu (e.g. 0), via environment variable CUDA_VISIBLE_DEVICES (default None)", default=None)
args = parser.parse_args()
""" GPU management """
allow_gpu_mem_growth = True
gpu_memory_fraction = 1
gpu_id = args.gpu
if args.gpu is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
dataloader = Dataloader(datafolder="data/eval", batchsize=500)
#dataloader = Dataloader(conn=conn, batch_size=args.batchsize, sql_where=args.sqlwhere,
# debug=False,
# do_shuffle=False, do_init_shuffle=True, tablename=args.tablename)
"""
Load
parameters
from init_from model
"""
with open(os.path.join(args.rundir, "args.pkl"), "rb") as f:
modelargs = pickle.load(f)
"""
Create
new
model
object
with same parameter """
print("building model graph")
if args.model in ["rnn","lstm"]:
model = rnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"], batch_size=dataloader.batchsize,
adam_lr=modelargs["adam_lr"],rnn_cell_type=args.model , dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=0)
evaluate=evaluate_rnn
if args.model == "cnn":
model = cnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"],
adam_lr=1e-3, dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=gpu_id)
evaluate = evaluate_cnn
probabilities, targets, observations = evaluate(model,dataloader,
init_dir=args.rundir,
print_every=20,
gpu_memory_fraction=gpu_memory_fraction,
allow_gpu_mem_growth=allow_gpu_mem_growth)
#np.save(os.path.join(args.rundir, "eval_confusion_matrix.npy"), confusion_matrix)
np.save(os.path.join(args.rundir, "eval_probabilities.npy"), probabilities)
np.save(os.path.join(args.rundir, "eval_targets.npy"), targets)
np.save(os.path.join(args.rundir, "eval_observations.npy"), observations)
def evaluate_rnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
cm, scores, targets, obs = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs],
feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
#total_cm += cm
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
all_obs
def evaluate_cnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
with open(init_dir + "/steps.txt", "r") as f:
line = f.read()
step_, epoch_ = line.split(" ")
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
# unroll also index of observation. -> TODO integrate in unroll function, but need to update also dependencies
batch_size, max_seqlengths, n_input = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T
mask = mask_.reshape(-1)
obs_ = np.arange(0, max_seqlengths) * ones
obs = obs_.reshape(-1)[mask]
""" unroll data """
X, y = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
scores, targets = sess.run([model.scores, model.targets],
feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
obs
if __name__ == '__main__':
main() | 41.309524 | 192 | 0.594566 | [
"MIT"
] | TUM-LMF/fieldRNN | evaluate.py | 12,145 | Python |
#! /usr/bin/env python
# coding=utf-8
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from core.config import cfg
class YoloTrain(object):
def __init__(self): # 从config文件获取到一些变量
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data/log/train" # 日志保存地址
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'): # 定义输入层
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope("define_loss"): # 定义损失函数
self.model = YOLOV3(self.input_data, self.trainable)
self.net_var = tf.global_variables()
self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
self.label_sbbox, self.label_mbbox, self.label_lbbox,
self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
self.loss = self.giou_loss + self.conf_loss + self.prob_loss
with tf.name_scope('learn_rate'): # 定义学习率
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(self.global_step, 1.0)
'''
warmup_steps作用:
神经网络在刚开始训练的过程中容易出现loss=NaN的情况,为了尽量避免这个情况,因此初始的学习率设置得很低
但是这又使得训练速度变慢了。因此,采用逐渐增大的学习率,从而达到既可以尽量避免出现nan,又可以等训练过程稳定了再增大训练速度的目的。
'''
with tf.name_scope("define_weight_decay"): # 指数平滑,可以让算法在最后不那么震荡,结果更有鲁棒性
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
# 指定需要恢复的参数。层等信息, 位置提前,减少模型体积。
with tf.name_scope('loader_and_saver'):
variables_to_restore = [v for v in self.net_var if
v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']]
self.loader = tf.train.Saver(variables_to_restore)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.name_scope("define_first_stage_train"): # 第一阶段训练,只训练指定层
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope("define_second_stage_train"): # 第二阶段训练,释放所有层
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("total_loss", self.loss)
logdir = "./data/log/" # 日志保存地址
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
# 阶段学习率
for epoch in range(1, 1 + self.first_stage_epochs + self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
# tqdm is a visualization tool that displays an Iterable object in a progree bar
pbar = tqdm(self.trainset)
train_epoch_loss, test_epoch_loss = [], []
for train_data in pbar:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.global_step], feed_dict={
self.input_data: train_data[0],
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.trainable: True,
})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description("train loss: %.2f" % train_step_loss)
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[0],
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = "./checkpoint/yolov3_train_loss=%.4f.ckpt" % train_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
% (epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
self.saver.save(self.sess, ckpt_file, global_step=epoch)
if __name__ == '__main__': YoloTrain().train()
| 51.978142 | 130 | 0.610387 | [
"MIT"
] | Byronnar/tensorflow-serving-yolov3 | train.py | 9,978 | Python |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from collections import namedtuple
from flask import session
from flask.ext.login import current_user
from .user_permissions import UserPermissions
from ggrc.models import get_model
Permission = namedtuple('Permission', 'action resource_type context_id')
_contributing_resource_types = {}
# Return a list of resource types using the same context space.
# This is needed because permissions may be given for, e.g., "Contract", but
# the restriction on join is done knowing only "Directive".
def get_contributing_resource_types(resource_type):
resource_types = _contributing_resource_types.get(resource_type, None)
if resource_types is None:
resource_types = [resource_type]
resource_model = get_model(resource_type)
if resource_model:
resource_manager = resource_model._sa_class_manager
resource_types.extend(
manager.class_.__name__ for manager in
resource_manager.subclass_managers(True))
_contributing_resource_types[resource_type] = resource_types
return resource_types
class DefaultUserPermissionsProvider(object):
def __init__(self, settings):
pass
def permissions_for(self, user):
return DefaultUserPermissions()
class DefaultUserPermissions(UserPermissions):
# super user, context_id 0 indicates all contexts
ADMIN_PERMISSION = Permission(
'__GGRC_ADMIN__',
'__GGRC_ALL__',
0,
)
def _admin_permission_for_context(self, context_id):
return Permission(
self.ADMIN_PERMISSION.action,
self.ADMIN_PERMISSION.resource_type,
context_id)
def _permission_match(self, permission, permissions):
return permission.context_id in \
permissions\
.get(permission.action, {})\
.get(permission.resource_type, [])
def _is_allowed(self, permission):
if 'permissions' not in session:
return True
permissions = session['permissions']
if permissions is None:
return True
if self._permission_match(permission, permissions):
return True
if self._permission_match(self.ADMIN_PERMISSION, permissions):
return True
return self._permission_match(
self._admin_permission_for_context(permission.context_id),
permissions)
def is_allowed_create(self, resource_type, context_id):
"""Whether or not the user is allowed to create a resource of the specified
type in the context."""
return self._is_allowed(Permission('create', resource_type, context_id))
def is_allowed_read(self, resource_type, context_id):
"""Whether or not the user is allowed to read a resource of the specified
type in the context."""
return self._is_allowed(Permission('read', resource_type, context_id))
def is_allowed_update(self, resource_type, context_id):
"""Whether or not the user is allowed to update a resource of the specified
type in the context."""
return self._is_allowed(Permission('update', resource_type, context_id))
def is_allowed_delete(self, resource_type, context_id):
"""Whether or not the user is allowed to delete a resource of the specified
type in the context."""
return self._is_allowed(Permission('delete', resource_type, context_id))
def _get_contexts_for(self, action, resource_type):
# FIXME: (Security) When applicable, we should explicitly assert that no
# permissions are expected (e.g. that every user has ADMIN_PERMISSION).
if 'permissions' not in session:
return None
permissions = session['permissions']
if permissions is None:
return None
if self._permission_match(self.ADMIN_PERMISSION, permissions):
return None
# Get the list of contexts for a given resource type and any
# superclasses
resource_types = get_contributing_resource_types(resource_type)
ret = []
for resource_type in resource_types:
ret.extend(permissions.get(action, {}).get(resource_type, ()))
# Extend with the list of all contexts for which the user is an ADMIN
admin_list = list(
permissions.get(self.ADMIN_PERMISSION.action, {})\
.get(self.ADMIN_PERMISSION.resource_type, ()))
ret.extend(admin_list)
return ret
def create_contexts_for(self, resource_type):
"""All contexts in which the user has create permission."""
return self._get_contexts_for('create', resource_type)
def read_contexts_for(self, resource_type):
"""All contexts in which the user has read permission."""
return self._get_contexts_for('read', resource_type)
def update_contexts_for(self, resource_type):
"""All contexts in which the user has update permission."""
return self._get_contexts_for('update', resource_type)
def delete_contexts_for(self, resource_type):
"""All contexts in which the user has delete permission."""
return self._get_contexts_for('delete', resource_type)
| 37.8 | 79 | 0.737997 | [
"ECL-2.0",
"Apache-2.0"
] | sriharshakappala/ggrc-core | src/ggrc/rbac/permissions_provider.py | 5,103 | Python |
"""
Django settings for session_words project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u@cj5-77l85mz0t186p6@1c(d607sgv(0t5lm!4h$ok8to&h@v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'session_words.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'session_words.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.696721 | 91 | 0.698565 | [
"MIT"
] | justnclrk/Python | Django/session_words/session_words/settings.py | 3,135 | Python |
import unittest
import tfexpt
import expt
from tensorlog import matrixdb
from tensorlog import program
from tensorlog import dataset
class TestNative(unittest.TestCase):
def setUp(self):
(self.n,self.maxD,self.epochs) = (16,8,20)
(self.factFile,trainFile,testFile) = expt.genInputs(self.n)
# (self.factFile,self.trainFile,self.testFile) = ('inputs/g16.cfacts','inputs/g16-train.exam','inputs/g16-test.exam')
self.db = matrixdb.MatrixDB.loadFile(self.factFile)
self.prog = program.Program.loadRules("grid.ppr",self.db)
self.trainData = dataset.Dataset.loadExamples(self.prog.db,trainFile)
self.testData = dataset.Dataset.loadExamples(self.prog.db,testFile)
def testIt(self):
acc,loss = expt.accExpt(self.prog,self.trainData,self.testData,self.n,self.maxD,self.epochs)
print('acc',acc)
self.assertTrue(acc >= 0.85)
times = expt.timingExpt(self.prog)
for t in times:
print('time',t)
self.assertTrue(t < 0.05)
class TestAccTF(unittest.TestCase):
def setUp(self):
(self.n,self.maxD,self.epochs) = (16,8,20)
(self.factFile,self.trainFile,self.testFile) = expt.genInputs(self.n)
(self.tlog,self.trainData,self.testData) = tfexpt.setup_tlog(self.maxD,self.factFile,self.trainFile,self.testFile)
def testIt(self):
acc = tfexpt.trainAndTest(self.tlog,self.trainData,self.testData,self.epochs)
print('acc',acc)
self.assertTrue(acc >= 0.85)
if __name__ == "__main__":
unittest.main()
| 33.5 | 120 | 0.71981 | [
"Apache-2.0"
] | TeamCohen/TensorLog | datasets/grid/testexpt.py | 1,474 | Python |
import hashlib
import random
from typing import Tuple, Dict
from self_driving.beamng_config import BeamNGConfig
from self_driving.beamng_evaluator import BeamNGEvaluator
from core.member import Member
from self_driving.catmull_rom import catmull_rom
from self_driving.road_bbox import RoadBoundingBox
from self_driving.road_polygon import RoadPolygon
from self_driving.edit_distance_polyline import iterative_levenshtein
Tuple4F = Tuple[float, float, float, float]
Tuple2F = Tuple[float, float]
class BeamNGMember(Member):
"""A class representing a road returned by the RoadGenerator."""
counter = 0
def __init__(self, control_nodes: Tuple4F, sample_nodes: Tuple4F, num_spline_nodes: int,
road_bbox: RoadBoundingBox):
super().__init__()
BeamNGMember.counter += 1
self.name = f'mbr{str(BeamNGMember.counter)}'
self.name_ljust = self.name.ljust(7)
self.control_nodes = control_nodes
self.sample_nodes = sample_nodes
self.num_spline_nodes = num_spline_nodes
self.road_bbox = road_bbox
self.config: BeamNGConfig = None
self.problem: 'BeamNGProblem' = None
self._evaluator: BeamNGEvaluator = None
def clone(self):
res = BeamNGMember(list(self.control_nodes), list(self.sample_nodes), self.num_spline_nodes, self.road_bbox)
res.config = self.config
res.problem = self.problem
res.distance_to_boundary = self.distance_to_boundary
return res
def to_dict(self) -> dict:
return {
'control_nodes': self.control_nodes,
'sample_nodes': self.sample_nodes,
'num_spline_nodes': self.num_spline_nodes,
'road_bbox_size': self.road_bbox.bbox.bounds,
'distance_to_boundary': self.distance_to_boundary
}
@classmethod
def from_dict(cls, dict: Dict):
road_bbox = RoadBoundingBox(dict['road_bbox_size'])
res = BeamNGMember([tuple(t) for t in dict['control_nodes']],
[tuple(t) for t in dict['sample_nodes']],
dict['num_spline_nodes'], road_bbox)
res.distance_to_boundary = dict['distance_to_boundary']
return res
def evaluate(self):
if self.needs_evaluation():
self.simulation = self.problem._get_evaluator().evaluate([self])
print('eval mbr', self)
#assert not self.needs_evaluation()
def needs_evaluation(self):
return self.distance_to_boundary is None or self.simulation is None
def clear_evaluation(self):
self.distance_to_boundary = None
def is_valid(self):
return (RoadPolygon.from_nodes(self.sample_nodes).is_valid() and
self.road_bbox.contains(RoadPolygon.from_nodes(self.control_nodes[1:-1])))
def distance(self, other: 'BeamNGMember'):
#TODO
#return frechet_dist(self.sample_nodes, other.sample_nodes)
return iterative_levenshtein(self.sample_nodes, other.sample_nodes)
#return frechet_dist(self.sample_nodes[0::3], other.sample_nodes[0::3])
def to_tuple(self):
import numpy as np
barycenter = np.mean(self.control_nodes, axis=0)[:2]
return barycenter
def mutate(self) -> 'BeamNGMember':
RoadMutator(self, lower_bound=-int(self.problem.config.MUTATION_EXTENT), upper_bound=int(self.problem.config.MUTATION_EXTENT)).mutate()
self.distance_to_boundary = None
return self
def __repr__(self):
eval_boundary = 'na'
if self.distance_to_boundary:
eval_boundary = str(self.distance_to_boundary)
if self.distance_to_boundary > 0:
eval_boundary = '+' + eval_boundary
eval_boundary = '~' + eval_boundary
eval_boundary = eval_boundary[:7].ljust(7)
h = hashlib.sha256(str([tuple(node) for node in self.control_nodes]).encode('UTF-8')).hexdigest()[-5:]
return f'{self.name_ljust} h={h} b={eval_boundary}'
class RoadMutator:
NUM_UNDO_ATTEMPTS = 20
def __init__(self, road: BeamNGMember, lower_bound=-2, upper_bound=2):
self.road = road
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def mutate_gene(self, index, xy_prob=0.5) -> Tuple[int, int]:
gene = list(self.road.control_nodes[index])
# Choose the mutation extent
candidate_mut_values = [i for i in range(self.lower_bound, self.upper_bound) if i !=0]
mut_value = random.choice(candidate_mut_values)
#mut_value = random.randint(self.lower_bound, self.upper_bound)
# Avoid to choose 0
#if mut_value == 0:
# mut_value += 1
# Select coordinate to mutate
if random.random() < xy_prob:
c = 1
else:
c = 0
gene[c] += mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
return c, mut_value
def undo_mutation(self, index, c, mut_value):
gene = list(self.road.control_nodes[index])
gene[c] -= mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
def mutate(self, num_undo_attempts=10):
backup_nodes = list(self.road.control_nodes)
attempted_genes = set()
n = len(self.road.control_nodes) - 2
seglength = 3
candidate_length = n - (2 * seglength)
assert(candidate_length > 0)
def next_gene_index() -> int:
if len(attempted_genes) == candidate_length:
return -1
i = None
condition = False
while not condition:
i = random.randint(seglength, n - seglength)
if i not in attempted_genes:
condition = True
assert(i is not None)
assert seglength <= i <= n - seglength
# i = random.randint(3, n - 3)
# while i in attempted_genes:
# i = random.randint(3, n-3)
attempted_genes.add(i)
return i
gene_index = next_gene_index()
while gene_index != -1:
c, mut_value = self.mutate_gene(gene_index)
attempt = 0
is_valid = self.road.is_valid()
while not is_valid and attempt < num_undo_attempts:
self.undo_mutation(gene_index, c, mut_value)
c, mut_value = self.mutate_gene(gene_index)
attempt += 1
is_valid = self.road.is_valid()
if is_valid:
break
else:
gene_index = next_gene_index()
if gene_index == -1:
raise ValueError("No gene can be mutated")
assert self.road.is_valid()
assert self.road.control_nodes != backup_nodes | 37.67027 | 143 | 0.634094 | [
"MIT"
] | IharBakhanovich/DeepHyperion | DeepHyperion-BNG/self_driving/beamng_member.py | 6,969 | Python |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import numpy as np
from arch.api.proto.feature_scale_meta_pb2 import ScaleMeta
from arch.api.proto.feature_scale_param_pb2 import ScaleParam
from arch.api.proto.feature_scale_param_pb2 import ColumnScaleParam
from arch.api.utils import log_utils
from federatedml.feature.feature_scale.base_scale import BaseScale
from federatedml.statistic.statics import MultivariateStatisticalSummary
LOGGER = log_utils.getLogger()
class StandardScale(BaseScale):
"""
Standardize features by removing the mean and scaling to unit variance. The standard score of a sample x is calculated as:
z = (x - u) / s, where u is the mean of the training samples, and s is the standard deviation of the training samples
"""
def __init__(self, params):
super().__init__(params)
self.with_mean = params.with_mean
self.with_std = params.with_std
self.mean = None
self.std = None
def set_param(self, mean, std):
self.mean = mean
self.std = std
@staticmethod
def __scale_with_column_range(data, column_upper, column_lower, mean, std, process_cols_list):
for i in process_cols_list:
value = data.features[i]
if value > column_upper[i]:
value = column_upper[i]
elif value < column_lower[i]:
value = column_lower[i]
data.features[i] = np.around((value - mean[i]) / std[i], 6)
return data
@staticmethod
def __scale(data, mean, std, process_cols_list):
for i in process_cols_list:
data.features[i] = np.around((data.features[i] - mean[i]) / std[i], 6)
return data
def fit(self, data):
"""
Apply standard scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
data:data_instance, data after scale
mean: list, each column mean value
std: list, each column standard deviation
"""
self.column_min_value, self.column_max_value = self._get_min_max_value(data)
self.scale_column_idx = self._get_scale_column_idx(data)
self.header = self._get_header(data)
self.data_shape = self._get_data_shape(data)
# fit column value if larger than parameter upper or less than parameter lower
data = self.fit_feature_range(data)
if not self.with_mean and not self.with_std:
self.mean = [0 for _ in range(self.data_shape)]
self.std = [1 for _ in range(self.data_shape)]
else:
self.summary_obj = MultivariateStatisticalSummary(data, -1)
if self.with_mean:
self.mean = self.summary_obj.get_mean()
self.mean = [self.mean[key] for key in self.header]
else:
self.mean = [0 for _ in range(self.data_shape)]
if self.with_std:
self.std = self.summary_obj.get_std_variance()
self.std = [self.std[key] for key in self.header]
for i, value in enumerate(self.std):
if np.abs(value - 0) < 1e-6:
self.std[i] = 1
else:
self.std = [1 for _ in range(self.data_shape)]
f = functools.partial(self.__scale, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
fit_data = data.mapValues(f)
return fit_data
def transform(self, data):
"""
Transform input data using standard scale with fit results
Parameters
----------
data: data_instance, input data
Returns
----------
transform_data:data_instance, data after transform
"""
f = functools.partial(self.__scale_with_column_range, column_upper=self.column_max_value,
column_lower=self.column_min_value,
mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
transform_data = data.mapValues(f)
return transform_data
def __get_meta(self):
if self.header:
scale_column = [self.header[i] for i in self.scale_column_idx]
else:
scale_column = ["_".join(["col", str(i)]) for i in self.scale_column_idx]
if not self.data_shape:
self.data_shape = -1
meta_proto_obj = ScaleMeta(method="standard_scale",
area=self.area,
scale_column=scale_column,
feat_upper=self._get_upper(self.data_shape),
feat_lower=self._get_lower(self.data_shape),
with_mean=self.with_mean,
with_std=self.with_std
)
return meta_proto_obj
def __get_param(self, need_run):
column_scale_param_dict = {}
if self.header:
for i, header in enumerate(self.header):
if i in self.scale_column_idx:
param_obj = ColumnScaleParam(column_upper=self.column_max_value[i],
column_lower=self.column_min_value[i],
mean=self.mean[i],
std=self.std[i])
column_scale_param_dict[header] = param_obj
param_proto_obj = ScaleParam(col_scale_param=column_scale_param_dict,
header=self.header,
need_run=need_run)
return param_proto_obj
def export_model(self, need_run):
meta_obj = self.__get_meta()
param_obj = self.__get_param(need_run)
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result
| 37.05618 | 126 | 0.595512 | [
"Apache-2.0"
] | 0xqq/FATE | federatedml/feature/feature_scale/standard_scale.py | 6,596 | Python |
# -*- coding: utf-8 -*
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.conf.urls import url
from iam.contrib.django.dispatcher.dispatchers import (
DjangoBasicResourceApiDispatcher,
success_response,
)
from iam.resource.utils import get_filter_obj, get_page_obj
from auth.bkiam import IAM_REGISTERED_SYSTEM
from auth.bkiam import resources
from auth.bkiam.backend import iam
from auth.bkiam.resources import BKDataResourceProvider
class BKDataDjangoBasicResourceApiDispatcher(DjangoBasicResourceApiDispatcher):
def _dispatch_search_instance(self, request, data, request_id):
options = self._get_options(request)
filter_obj = get_filter_obj(data.get("filter"), ["parent", "keyword"])
page_obj = get_page_obj(data.get("page"))
provider = self._provider[data["type"]]
pre_process = getattr(provider, "pre_search_instance", None)
if pre_process and callable(pre_process):
pre_process(filter_obj, page_obj, **options)
result = provider.list_instance(filter_obj, page_obj, **options)
return success_response(result.to_dict(), request_id)
def register_resources(dispatcher, resources_module):
for item in dir(resources):
if not item.endswith("ResourceProvider"):
continue
resource_class = getattr(resources_module, item)
if issubclass(resource_class, BKDataResourceProvider) and resource_class.resource_type is not None:
dispatcher.register(resource_class.resource_type, resource_class())
dispatcher = BKDataDjangoBasicResourceApiDispatcher(iam, IAM_REGISTERED_SYSTEM)
register_resources(dispatcher, resources)
urlpatterns = [url(r"^resource/api/v1/$", dispatcher.as_view([]), name="iamApi")]
| 44.882353 | 111 | 0.753277 | [
"MIT"
] | Chromico/bk-base | src/api/auth/bkiam/urls.py | 3,088 | Python |
#link (https://neps.academy/problem/443)
voltas,placas= input().split()
result = int(voltas) * int(placas)
numbers = []
resultado = result * float(str(0) + str('.') + str(1))
for x in range(2,11):
if int(resultado)==resultado:
numbers.append(int(resultado))
else:
numbers.append(int(resultado)+1)
resultado = result * float(str(0) + str('.') + str(x))
for x in numbers:
print(int(x), end=' ')
| 26.9375 | 58 | 0.607889 | [
"MIT"
] | Filipe-uefs/Algoritmos | Python/Hora da Corrida - SBC 2019.py | 431 | Python |
class UserError(Exception):
def __init__(self, message):
self.message = message
class UserNotFoundError(UserError):
pass
class UserAlreadyRegisteredError(UserError):
pass
class InvalidEmailError(UserError):
pass
class IncorrectPasswordError(UserError):
pass
| 13.454545 | 44 | 0.733108 | [
"MIT"
] | nealwobuhei/pricing-service | models/user/errors.py | 296 | Python |
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages = ['youbot_behavior_simple_test'],
package_dir = {'': 'src'}
)
setup(**d) | 21.727273 | 60 | 0.748954 | [
"BSD-3-Clause"
] | FlexBE/youbot_behaviors | behaviors/youbot_behavior_simple_test/setup.py | 239 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tethys_datasets', '0002_auto_20150119_1756'),
]
operations = [
migrations.CreateModel(
name='SpatialDatasetService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=30)),
('engine', models.CharField(default=b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', max_length=200, choices=[(b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', b'GeoServer')])),
('endpoint', models.CharField(max_length=1024)),
('apikey', models.CharField(max_length=100, blank=True)),
('username', models.CharField(max_length=100, blank=True)),
('password', models.CharField(max_length=100, blank=True)),
],
options={
'verbose_name': 'Spatial Dataset Service',
'verbose_name_plural': 'Spatial Dataset Services',
},
bases=(models.Model,),
),
]
| 40.0625 | 227 | 0.613885 | [
"BSD-2-Clause"
] | CI-WATER/django-tethys_datasets | tethys_datasets/migrations/0003_spatialdatasetservice.py | 1,282 | Python |
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory('cell_images/train',
target_size=(100,100),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory('cell_images/test',
target_size=(100,100),
batch_size=32,
class_mode='binary')
model = keras.models.load_model("model.h5")
#model.compile(optimizer = 'Adam',loss = 'binary_crossentropy',metrics = ['accuracy'])
#checkpoints = ModelCheckpoint("checkpoints/weights.{epoch:02d}.h5",
# save_weights_only = False,
# verbose = 1)
#step_size_train = train_generator.n//train_generator.batch_size
model.fit_generator(train_generator,
steps_per_epoch=8000,
epochs=5,
validation_data=validation_generator,
validation_steps=800)
#callbacks = [checkpoints])
model.save("model_2.h5")
| 39.93617 | 86 | 0.546084 | [
"MIT"
] | We-Gold/Malaria | train_more.py | 1,877 | Python |
import subprocess
subprocess.call(["/usr/bin/python", "innotest.py"])
print "1-0"
subprocess.call(["/usr/bin/time","-v","-otiming", "./innotest", "0", "1", "0"])
print "4-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "0"])
print "8-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "0"])
print "16-0"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "0"])
print "1-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "1", "100000"])
print "4-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "100000"])
print "8-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "100000"])
print "16-100K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "100000"])
print "1-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "1", "10000"])
print "4-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "4", "10000"])
print "8-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "8", "10000"])
print "16-10K"
subprocess.call(["/usr/bin/time","-v","-otiming","--append","./innotest", "0", "16", "10000"])
| 44.16129 | 95 | 0.569759 | [
"Apache-2.0"
] | AYCH-Inc/aych.tron.realm | test/experiments/bench.py | 1,369 | Python |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Nitin Madnani <[email protected]>
# Rami Al-Rfou' <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A module for interfacing with the Stanford taggers.
Tagger models need to be downloaded from http://nlp.stanford.edu/software
and the STANFORD_MODELS environment variable set (a colon-separated
list of paths).
For more details see the documentation for StanfordPOSTagger and StanfordNERTagger.
"""
import os
import tempfile
from subprocess import PIPE
import warnings
from nltk.internals import find_file, find_jar, config_java, java, _java_options, find_jars_within_path
from nltk.tag.api import TaggerI
from nltk import compat
_stanford_url = 'http://nlp.stanford.edu/software'
class StanfordTagger(TaggerI):
"""
An interface to Stanford taggers. Subclasses must define:
- ``_cmd`` property: A property that returns the command that will be
executed.
- ``_SEPARATOR``: Class constant that represents that character that
is used to separate the tokens from their tags.
- ``_JAR`` file: Class constant that represents the jar file name.
"""
_SEPARATOR = ''
_JAR = ''
def __init__(self, model_filename, path_to_jar=None, encoding='utf8', verbose=False, java_options='-mx1000m'):
if not self._JAR:
warnings.warn('The StanfordTagger class is not meant to be '
'instantiated directly. Did you mean StanfordPOSTagger or StanfordNERTagger?')
self._stanford_jar = find_jar(
self._JAR, path_to_jar,
searchpath=(), url=_stanford_url,
verbose=verbose)
self._stanford_model = find_file(model_filename,
env_vars=('STANFORD_MODELS',), verbose=verbose)
# Adding logging jar files to classpath
stanford_dir = os.path.split(self._stanford_jar)[0]
self._stanford_jar = tuple(find_jars_within_path(stanford_dir))
self._encoding = encoding
self.java_options = java_options
@property
def _cmd(self):
raise NotImplementedError
def tag(self, tokens):
# This function should return list of tuple rather than list of list
return sum(self.tag_sents([tokens]), [])
def tag_sents(self, sentences):
encoding = self._encoding
default_options = ' '.join(_java_options)
config_java(options=self.java_options, verbose=False)
# Create a temporary input file
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
cmd = list(self._cmd)
cmd.extend(['-encoding', encoding])
# Write the actual sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join((' '.join(x) for x in sentences))
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
# Run the tagger and get the output
stanpos_output, _stderr = java(cmd, classpath=self._stanford_jar,
stdout=PIPE, stderr=PIPE)
stanpos_output = stanpos_output.decode(encoding)
# Delete the temporary file
os.unlink(self._input_file_path)
# Return java configurations to their default values
config_java(options=default_options, verbose=False)
return self.parse_output(stanpos_output, sentences)
def parse_output(self, text, sentences = None):
# Output the tagged sentences
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
sentence = []
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
sentence.append((''.join(word_tags[:-1]), word_tags[-1]))
tagged_sentences.append(sentence)
return tagged_sentences
class StanfordPOSTagger(StanfordTagger):
"""
A class for pos tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordPOSTagger
>>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP
>>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
"""
_SEPARATOR = '_'
_JAR = 'stanford-postagger.jar'
def __init__(self, *args, **kwargs):
super(StanfordPOSTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
return ['edu.stanford.nlp.tagger.maxent.MaxentTagger',
'-model', self._stanford_model, '-textFile',
self._input_file_path, '-tokenize', 'false','-outputFormatOptions', 'keepEmptySentences']
class StanfordNERTagger(StanfordTagger):
"""
A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordNERTagger
>>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP
>>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP
[('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'),
('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'),
('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')]
"""
_SEPARATOR = '/'
_JAR = 'stanford-ner.jar'
_FORMAT = 'slashTags'
def __init__(self, *args, **kwargs):
super(StanfordNERTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
# Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer
return ['edu.stanford.nlp.ie.crf.CRFClassifier',
'-loadClassifier', self._stanford_model, '-textFile',
self._input_file_path, '-outputFormat', self._FORMAT, '-tokenizerFactory', 'edu.stanford.nlp.process.WhitespaceTokenizer', '-tokenizerOptions','\"tokenizeNLs=false\"']
def parse_output(self, text, sentences):
if self._FORMAT == 'slashTags':
# Joint together to a big list
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
tagged_sentences.append((''.join(word_tags[:-1]), word_tags[-1]))
# Separate it according to the input
result = []
start = 0
for sent in sentences:
result.append(tagged_sentences[start:start + len(sent)])
start += len(sent);
return result
raise NotImplementedError
| 39.685279 | 183 | 0.637375 | [
"Apache-2.0"
] | Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/nltk/tag/stanford.py | 7,818 | Python |
##############################################################################
#
# Copyright 2019 Leap Beyond Emerging Technologies B.V. (unless otherwise stated)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
"""
Docker step by step building blocks:
generate docker image, prepare model, and build model
"""
import logging
import os.path as osp
import subprocess
from jinja2 import Environment, PackageLoader
from ..utils import get_model_tag_and_version
from .. import __version__ as catwalk_version
logger = logging.getLogger(__name__)
def build_prep(model_path=".", server_config=None, server_port=9090):
"""Prepares the model to be Dockerised by generating a dockerimage"""
model_path = osp.abspath(model_path)
model_tag, model_version = get_model_tag_and_version(model_path)
if server_config is None:
server_config = "false"
kwargs = {
"catwalk_version": catwalk_version,
"model_tag": model_tag,
"model_version": model_version,
"server_config": server_config,
"server_port": server_port
}
files_to_create = ["Dockerfile", ".dockerignore"]
env = Environment(loader=PackageLoader("catwalk", "templates"))
for f in files_to_create:
template_file = f + ".j2"
if template_file[0] == ".":
template_file = template_file[1:]
template = env.get_template(template_file)
rendered = template.render(**kwargs)
out_path = osp.join(model_path, f)
with open(out_path, "w") as fp:
fp.write(rendered)
logger.info("Wrote " + f)
def build(model_path=".", docker_registry=None, push=True, no_cache=False): # pragma: no cover
"""Builds the model into a Dockerised model server image."""
model_path = osp.abspath(model_path)
model_tag, model_version = get_model_tag_and_version(model_path)
model_path = osp.abspath(model_path)
# Setup
image_name_parts = [model_tag]
if docker_registry is not None:
image_name_parts.insert(0, docker_registry)
image_name = "/".join(image_name_parts)
docker_tag = image_name + ":" + model_version
# Perform the docker build
cmd = ["docker", "build", model_path]
cmd += ["-t", docker_tag]
if no_cache:
cmd += ["--no-cache"]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, check=True)
if result.returncode != 0:
return result.returncode
logger.info("Successfully built " + docker_tag)
if not push:
return 0
# Perform the docker push
cmd = ["docker", "push", docker_tag]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, check=True)
return result.returncode
| 31.932039 | 95 | 0.652174 | [
"Apache-2.0"
] | LeapBeyond/catwalk | catwalk/cicd/build_steps.py | 3,289 | Python |
#!/usr/bin/env python
#
# Copyright (c) 2014 Google, Inc
#
# SPDX-License-Identifier: GPL-2.0+
#
# Intel microcode update tool
from optparse import OptionParser
import os
import re
import struct
import sys
MICROCODE_DIR = 'arch/x86/dts/microcode'
class Microcode:
"""Holds information about the microcode for a particular model of CPU.
Attributes:
name: Name of the CPU this microcode is for, including any version
information (e.g. 'm12206a7_00000029')
model: Model code string (this is cpuid(1).eax, e.g. '206a7')
words: List of hex words containing the microcode. The first 16 words
are the public header.
"""
def __init__(self, name, data):
self.name = name
# Convert data into a list of hex words
self.words = []
for value in ''.join(data).split(','):
hexval = value.strip()
if hexval:
self.words.append(int(hexval, 0))
# The model is in the 4rd hex word
self.model = '%x' % self.words[3]
def ParseFile(fname):
"""Parse a micrcode.dat file and return the component parts
Args:
fname: Filename to parse
Returns:
3-Tuple:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
re_license = re.compile('/[^-*+] *(.*)$')
re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
microcodes = {}
license_text = []
date = ''
data = []
name = None
with open(fname) as fd:
for line in fd:
line = line.rstrip()
m_date = re_date.match(line)
m_license = re_license.match(line)
m_name = re_name.match(line)
if m_name:
if name:
microcodes[name] = Microcode(name, data)
name = m_name.group(1).lower()
data = []
elif m_license:
license_text.append(m_license.group(1))
elif m_date:
date = m_date.group(1)
else:
data.append(line)
if name:
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def ParseHeaderFiles(fname_list):
"""Parse a list of header files and return the component parts
Args:
fname_list: List of files to parse
Returns:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
microcodes = {}
license_text = []
date = ''
name = None
for fname in fname_list:
name = os.path.basename(fname).lower()
name = os.path.splitext(name)[0]
data = []
with open(fname) as fd:
license_start = False
license_end = False
for line in fd:
line = line.rstrip()
if len(line) >= 2:
if line[0] == '/' and line[1] == '*':
license_start = True
continue
if line[0] == '*' and line[1] == '/':
license_end = True
continue
if license_start and not license_end:
# Ignore blank line
if len(line) > 0:
license_text.append(line)
continue
# Omit anything after the last comma
words = line.split(',')[:-1]
data += [word + ',' for word in words]
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def List(date, microcodes, model):
"""List the available microcode chunks
Args:
date: Date of the microcode file
microcodes: Dict of Microcode objects indexed by name
model: Model string to search for, or None
"""
print 'Date: %s' % date
if model:
mcode_list, tried = FindMicrocode(microcodes, model.lower())
print 'Matching models %s:' % (', '.join(tried))
else:
print 'All models:'
mcode_list = [microcodes[m] for m in microcodes.keys()]
for mcode in mcode_list:
print '%-20s: model %s' % (mcode.name, mcode.model)
def FindMicrocode(microcodes, model):
"""Find all the microcode chunks which match the given model.
This model is something like 306a9 (the value returned in eax from
cpuid(1) when running on Intel CPUs). But we allow a partial match,
omitting the last 1 or two characters to allow many families to have the
same microcode.
If the model name is ambiguous we return a list of matches.
Args:
microcodes: Dict of Microcode objects indexed by name
model: String containing model name to find
Returns:
Tuple:
List of matching Microcode objects
List of abbreviations we tried
"""
# Allow a full name to be used
mcode = microcodes.get(model)
if mcode:
return [mcode], []
tried = []
found = []
for i in range(3):
abbrev = model[:-i] if i else model
tried.append(abbrev)
for mcode in microcodes.values():
if mcode.model.startswith(abbrev):
found.append(mcode)
if found:
break
return found, tried
def CreateFile(date, license_text, mcodes, outfile):
"""Create a microcode file in U-Boot's .dtsi format
Args:
date: String containing date of original microcode file
license: List of text lines for the license file
mcodes: Microcode objects to write (normally only 1)
outfile: Filename to write to ('-' for stdout)
"""
out = '''/*%s
* ---
* This is a device tree fragment. Use #include to add these properties to a
* node.
*
* Date: %s
*/
compatible = "intel,microcode";
intel,header-version = <%d>;
intel,update-revision = <%#x>;
intel,date-code = <%#x>;
intel,processor-signature = <%#x>;
intel,checksum = <%#x>;
intel,loader-revision = <%d>;
intel,processor-flags = <%#x>;
/* The first 48-bytes are the public header which repeats the above data */
data = <%s
\t>;'''
words = ''
add_comments = len(mcodes) > 1
for mcode in mcodes:
if add_comments:
words += '\n/* %s */' % mcode.name
for i in range(len(mcode.words)):
if not (i & 3):
words += '\n'
val = mcode.words[i]
# Change each word so it will be little-endian in the FDT
# This data is needed before RAM is available on some platforms so
# we cannot do an endianness swap on boot.
val = struct.unpack("<I", struct.pack(">I", val))[0]
words += '\t%#010x' % val
# Use the first microcode for the headers
mcode = mcodes[0]
# Take care to avoid adding a space before a tab
text = ''
for line in license_text:
if line[0] == '\t':
text += '\n *' + line
else:
text += '\n * ' + line
args = [text, date]
args += [mcode.words[i] for i in range(7)]
args.append(words)
if outfile == '-':
print out % tuple(args)
else:
if not outfile:
if not os.path.exists(MICROCODE_DIR):
print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR
os.makedirs(MICROCODE_DIR)
outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi')
print >> sys.stderr, "Writing microcode for '%s' to '%s'" % (
', '.join([mcode.name for mcode in mcodes]), outfile)
with open(outfile, 'w') as fd:
print >> fd, out % tuple(args)
def MicrocodeTool():
"""Run the microcode tool"""
commands = 'create,license,list'.split(',')
parser = OptionParser()
parser.add_option('-d', '--mcfile', type='string', action='store',
help='Name of microcode.dat file')
parser.add_option('-H', '--headerfile', type='string', action='append',
help='Name of .h file containing microcode')
parser.add_option('-m', '--model', type='string', action='store',
help="Model name to extract ('all' for all)")
parser.add_option('-M', '--multiple', type='string', action='store',
help="Allow output of multiple models")
parser.add_option('-o', '--outfile', type='string', action='store',
help='Filename to use for output (- for stdout), default is'
' %s/<name>.dtsi' % MICROCODE_DIR)
parser.usage += """ command
Process an Intel microcode file (use -h for help). Commands:
create Create microcode .dtsi file for a model
list List available models in microcode file
license Print the license
Typical usage:
./tools/microcode-tool -d microcode.dat -m 306a create
This will find the appropriate file and write it to %s.""" % MICROCODE_DIR
(options, args) = parser.parse_args()
if not args:
parser.error('Please specify a command')
cmd = args[0]
if cmd not in commands:
parser.error("Unknown command '%s'" % cmd)
if (not not options.mcfile) != (not not options.mcfile):
parser.error("You must specify either header files or a microcode file, not both")
if options.headerfile:
date, license_text, microcodes = ParseHeaderFiles(options.headerfile)
elif options.mcfile:
date, license_text, microcodes = ParseFile(options.mcfile)
else:
parser.error('You must specify a microcode file (or header files)')
if cmd == 'list':
List(date, microcodes, options.model)
elif cmd == 'license':
print '\n'.join(license_text)
elif cmd == 'create':
if not options.model:
parser.error('You must specify a model to create')
model = options.model.lower()
if options.model == 'all':
options.multiple = True
mcode_list = microcodes.values()
tried = []
else:
mcode_list, tried = FindMicrocode(microcodes, model)
if not mcode_list:
parser.error("Unknown model '%s' (%s) - try 'list' to list" %
(model, ', '.join(tried)))
if not options.multiple and len(mcode_list) > 1:
parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' "
"to list or specify a particular file" %
(model, ', '.join(tried),
', '.join([m.name for m in mcode_list])))
CreateFile(date, license_text, mcode_list, options.outfile)
else:
parser.error("Unknown command '%s'" % cmd)
if __name__ == "__main__":
MicrocodeTool()
| 34.823899 | 90 | 0.564475 | [
"Apache-2.0"
] | CAFA1/afl | qemu_mode/qemu-2.10.0/roms/u-boot/tools/microcode-tool.py | 11,074 | Python |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2007-2008 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## Licensees holding a valid Qt License Agreement may use this file in
## accordance with the rights, responsibilities and obligations
## contained therein. Please consult your licensing agreement or
## contact [email protected] if any conditions of this licensing
## agreement are not clear to you.
##
## Further information about Qt licensing is available at:
## http://www.trolltech.com/products/qt/licensing.html or by
## contacting [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
from PySide import QtCore, QtGui
try:
from PySide.phonon import Phonon
except ImportError:
app = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.critical(None, "Phonon Capabilities",
"Your Qt installation does not have Phonon support.",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
sys.exit(1)
class Window(QtGui.QWidget):
def __init__(self):
super(QtGui.QWidget, self).__init__()
self.setupUi()
self.updateWidgets()
notifier = Phonon.BackendCapabilities.notifier()
notifier.capabilitiesChanged.connect(self.updateWidgets)
notifier.availableAudioOutputDevicesChanged.connect(self.updateWidgets)
def updateWidgets(self):
# Output devices.
devices = Phonon.BackendCapabilities.availableAudioOutputDevices()
model = Phonon.AudioOutputDeviceModel(devices)
self.devicesListView.setModel(model)
# MIME types.
self.mimeListWidget.clear()
for mimeType in Phonon.BackendCapabilities.availableMimeTypes():
item = QtGui.QListWidgetItem(self.mimeListWidget)
item.setText(mimeType)
# Effects.
self.effectsTreeWidget.clear()
for effect in Phonon.BackendCapabilities.availableAudioEffects():
item = QtGui.QTreeWidgetItem(self.effectsTreeWidget)
item.setText(0, "Effect")
item.setText(1, effect.name())
item.setText(2, effect.description())
# Effects parameters.
for parameter in Phonon.Effect(effect, self).parameters():
defaultValue = parameter.defaultValue()
minimumValue = parameter.minimumValue()
maximumValue = parameter.maximumValue()
valueString = "%s / %s / %s" % (defaultValue, minimumValue, maximumValue)
parameterItem = QtGui.QTreeWidgetItem(item)
parameterItem.setText(0, "Parameter")
parameterItem.setText(1, parameter.name())
parameterItem.setText(2, parameter.description())
parameterItem.setText(3, str(parameter.type()))
parameterItem.setText(4, valueString)
for i in range(self.effectsTreeWidget.columnCount()):
if i == 0:
self.effectsTreeWidget.setColumnWidth(0, 150)
elif i == 2:
self.effectsTreeWidget.setColumnWidth(2, 350)
else:
self.effectsTreeWidget.resizeColumnToContents(i)
def setupUi(self):
self.setupBackendBox()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.backendBox)
self.setLayout(layout)
self.setWindowTitle("Backend Capabilities Example")
def setupBackendBox(self):
self.devicesLabel = QtGui.QLabel("Available Audio Devices:")
self.devicesListView = QtGui.QListView()
self.mimeTypesLabel = QtGui.QLabel("Supported MIME Types:")
self.mimeListWidget = QtGui.QListWidget()
self.effectsLabel = QtGui.QLabel("Available Audio Effects:")
headerLabels = ("Type", "Name", "Description", "Value Type",
"Default/Min/Max Values")
self.effectsTreeWidget = QtGui.QTreeWidget()
self.effectsTreeWidget.setHeaderLabels(headerLabels)
self.effectsTreeWidget.setColumnCount(5)
layout = QtGui.QGridLayout()
layout.addWidget(self.devicesLabel, 0, 0)
layout.addWidget(self.devicesListView, 1, 0)
layout.addWidget(self.mimeTypesLabel, 0, 1)
layout.addWidget(self.mimeListWidget, 1, 1)
layout.addWidget(self.effectsLabel, 2, 0)
layout.addWidget(self.effectsTreeWidget, 3, 0, 2, 2)
layout.setRowStretch(3, 100)
self.backendBox = QtGui.QGroupBox("Backend Capabilities")
self.backendBox.setLayout(layout)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setApplicationName("Phonon Capabilities Example")
window = Window()
window.show()
sys.exit(app.exec_())
| 35.822695 | 89 | 0.643833 | [
"EPL-1.0"
] | Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/phonon/capabilities.py | 5,051 | Python |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for loading data from a Google service account file."""
import io
import json
import six
from google.auth import crypt
def from_dict(data, require=None):
"""Validates a dictionary containing Google service account data.
Creates and returns a :class:`google.auth.crypt.Signer` instance from the
private key specified in the data.
Args:
data (Mapping[str, str]): The service account data
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
google.auth.crypt.Signer: A signer created from the private key in the
service account file.
Raises:
ValueError: if the data was in the wrong format, or if one of the
required keys is missing.
"""
keys_needed = set(require if require is not None else [])
missing = keys_needed.difference(six.iterkeys(data))
if missing:
raise ValueError(
"Service account info was not in the expected format, missing "
"fields {}.".format(", ".join(missing))
)
# Create a signer.
signer = crypt.RSASigner.from_service_account_info(data)
return signer
def from_filename(filename, require=None):
"""Reads a Google service account JSON file and returns its parsed info.
Args:
filename (str): The path to the service account .json file.
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified
info and a signer instance.
"""
with io.open(filename, "r", encoding="utf-8") as json_file:
data = json.load(json_file)
return data, from_dict(data, require=require)
| 31.453333 | 78 | 0.680797 | [
"Apache-2.0"
] | CodingFanSteve/google-auth-library-python | google/auth/_service_account_info.py | 2,359 | Python |
"""
ASGI config for infosafe project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'infosafe.settings')
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 | [
"Apache-2.0"
] | royaleagle-dev/infosafe | infosafe/asgi.py | 393 | Python |
from rest_framework import generics
from rest_framework.exceptions import NotFound
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from workprogramsapp.expertise.models import UserExpertise, ExpertiseComments, Expertise
from workprogramsapp.expertise.serializers import UserExpertiseSerializer, CommentSerializer, ExpertiseSerializer
from workprogramsapp.permissions import IsMemberOfExpertise, IsRpdDeveloperOrReadOnly, IsMemberOfUserExpertise, \
IsExpertiseMaster, IsWorkProgramMemberOfExpertise
from workprogramsapp.workprogram_additions.models import UserStructuralUnit
class UserExpertiseListView(generics.ListAPIView):
"""
Вывод всей информации об экспертизе для эксперта (автоматически по токену пользователя выдает экспертизы, в которых он учавствует):
Если нужна опредленная экспертиза от пользователя, то надо указать ее id
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
return UserExpertise.objects.filter(expertise=self.kwargs['pk'], expert=self.request.user)
else:
return UserExpertise.objects.filter(expert=self.request.user)
class UserExpertiseCreateView(generics.CreateAPIView):
"""
создание экспертизы
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseCommentsView(generics.ListAPIView):
"""
View для получения и отправки комментариев
Комментарии можно получить или отправить, указав в адресе id экспертизы,
При желании можно в параметрах указать блок комментариев для GET-запроса
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
if self.request.query_params.get('block') != None:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'],
comment_block=self.request.query_params.get('block'))
else:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'])
else:
return ExpertiseComments.objects.all()
class ExpertiseCommentCreateView(generics.CreateAPIView):
"""
создание коммента к экспертизе
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseWorkProgramView(generics.RetrieveAPIView):
# TODO: Зачем вообще эта вьюха нужна?
"""
ссылка выдает все экспертизы связанные с id рабочей программы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsWorkProgramMemberOfExpertise, IsRpdDeveloperOrReadOnly]
def get_object(self):
try:
return Expertise.objects.get(work_program__id=self.kwargs['pk'])
except Expertise.DoesNotExist:
raise NotFound()
class ExpertiseListView(generics.ListAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
def list(self, request, **kwargs):
# Note the use of `get_queryset()` instead of `self.queryset`
if request.user.groups.filter(name="expertise_master"):
queryset = Expertise.objects.all()
elif UserStructuralUnit.objects.filter(user=request.user, status__in=["leader", "deputy"]):
queryset = Expertise.objects.filter(
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct() | \
Expertise.objects.filter(expertse_users_in_rpd__expert=request.user).distinct()
else:
queryset = Expertise.objects.filter(expertse_users_in_rpd__expert=request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
newdata = dict(serializer.data[0])
return Response("newdata")
class ExpertiseViewById(generics.RetrieveAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ExpertiseCreateView(generics.CreateAPIView):
"""
Создание экспертизы
Автоматически добавляет пользователя-создателя как лидера экспертизы
(Подробней о создании экспертизы см. сериализатор)
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsRpdDeveloperOrReadOnly]
class ChangeExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ChangeUserExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
class DeleteUserExpertise(generics.DestroyAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsExpertiseMaster]
| 38.253247 | 135 | 0.733831 | [
"MIT"
] | 18ariana/analytics_backend | application/workprogramsapp/expertise/views.py | 6,570 | Python |
import datetime
from ..errors import NaiveDateTimeNotAllowed
from ..ewsdatetime import EWSDateTime
from ..util import create_element, set_xml_value, xml_text_to_value, peek, TNS, MNS
from ..version import EXCHANGE_2010
from .common import EWSService
class GetServerTimeZones(EWSService):
"""
MSDN: https://msdn.microsoft.com/en-us/library/office/dd899371(v=exchg.150).aspx
"""
SERVICE_NAME = 'GetServerTimeZones'
element_container_name = '{%s}TimeZoneDefinitions' % MNS
def call(self, timezones=None, return_full_timezone_data=False):
if self.protocol.version.build < EXCHANGE_2010:
raise NotImplementedError('%s is only supported for Exchange 2010 servers and later' % self.SERVICE_NAME)
return self._get_elements(payload=self.get_payload(
timezones=timezones,
return_full_timezone_data=return_full_timezone_data
))
def get_payload(self, timezones, return_full_timezone_data):
payload = create_element(
'm:%s' % self.SERVICE_NAME,
attrs=dict(ReturnFullTimeZoneData='true' if return_full_timezone_data else 'false'),
)
if timezones is not None:
is_empty, timezones = peek(timezones)
if not is_empty:
tz_ids = create_element('m:Ids')
for timezone in timezones:
tz_id = set_xml_value(create_element('t:Id'), timezone.ms_id, version=self.protocol.version)
tz_ids.append(tz_id)
payload.append(tz_ids)
return payload
def _get_elements_in_container(self, container):
for timezonedef in container:
tz_id = timezonedef.get('Id')
tz_name = timezonedef.get('Name')
tz_periods = self._get_periods(timezonedef)
tz_transitions_groups = self._get_transitions_groups(timezonedef)
tz_transitions = self._get_transitions(timezonedef)
yield (tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups)
@staticmethod
def _get_periods(timezonedef):
tz_periods = {}
periods = timezonedef.find('{%s}Periods' % TNS)
for period in periods.findall('{%s}Period' % TNS):
# Convert e.g. "trule:Microsoft/Registry/W. Europe Standard Time/2006-Daylight" to (2006, 'Daylight')
p_year, p_type = period.get('Id').rsplit('/', 1)[1].split('-')
tz_periods[(int(p_year), p_type)] = dict(
name=period.get('Name'),
bias=xml_text_to_value(period.get('Bias'), datetime.timedelta)
)
return tz_periods
@staticmethod
def _get_transitions_groups(timezonedef):
from ..recurrence import WEEKDAY_NAMES
tz_transitions_groups = {}
transitiongroups = timezonedef.find('{%s}TransitionsGroups' % TNS)
if transitiongroups is not None:
for transitiongroup in transitiongroups.findall('{%s}TransitionsGroup' % TNS):
tg_id = int(transitiongroup.get('Id'))
tz_transitions_groups[tg_id] = []
for transition in transitiongroup.findall('{%s}Transition' % TNS):
# Apply same conversion to To as for period IDs
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
))
for transition in transitiongroup.findall('{%s}RecurringDayTransition' % TNS):
# Apply same conversion to To as for period IDs
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
occurrence = xml_text_to_value(transition.find('{%s}Occurrence' % TNS).text, int)
if occurrence == -1:
# See TimeZoneTransition.from_xml()
occurrence = 5
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
offset=xml_text_to_value(transition.find('{%s}TimeOffset' % TNS).text, datetime.timedelta),
iso_month=xml_text_to_value(transition.find('{%s}Month' % TNS).text, int),
iso_weekday=WEEKDAY_NAMES.index(transition.find('{%s}DayOfWeek' % TNS).text) + 1,
occurrence=occurrence,
))
return tz_transitions_groups
@staticmethod
def _get_transitions(timezonedef):
tz_transitions = {}
transitions = timezonedef.find('{%s}Transitions' % TNS)
if transitions is not None:
for transition in transitions.findall('{%s}Transition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
tz_transitions[tg_id] = None
for transition in transitions.findall('{%s}AbsoluteDateTransition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
try:
t_date = xml_text_to_value(transition.find('{%s}DateTime' % TNS).text, EWSDateTime).date()
except NaiveDateTimeNotAllowed as e:
# We encountered a naive datetime. Don't worry. we just need the date
t_date = e.args[0].date()
tz_transitions[tg_id] = t_date
return tz_transitions
| 50.12069 | 117 | 0.596491 | [
"BSD-2-Clause"
] | dsanghan/exchangelib | exchangelib/services/get_server_time_zones.py | 5,814 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Embedding Projector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import json
import os
import numpy as np
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.projector import projector_plugin
class ProjectorAppTest(test.TestCase):
def setUp(self):
self.log_dir = self.get_temp_dir()
def testRunsWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['.'])
def testRunsWithNoCheckpoint(self):
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testRunsWithInvalidModelCheckpointPath(self):
checkpoint_file = os.path.join(self.log_dir, 'checkpoint')
f = open(checkpoint_file, 'w')
f.write('model_checkpoint_path: "does_not_exist"\n')
f.write('all_model_checkpoint_paths: "does_not_exist"\n')
f.close()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testInfoWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
info_json = self._GetJson('/data/plugin/projector/info?run=.')
self.assertItemsEqual(info_json['embeddings'], [{
'tensorShape': [1, 2],
'tensorName': 'var1'
}, {
'tensorShape': [10, 10],
'tensorName': 'var2'
}, {
'tensorShape': [100, 100],
'tensorName': 'var3'
}])
def testTensorWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
url = '/data/plugin/projector/tensor?run=.&name=var1'
tensor_bytes = self._Get(url).data
tensor = np.reshape(np.fromstring(tensor_bytes, dtype='float32'), [1, 2])
expected_tensor = np.array([[6, 6]], dtype='float32')
self.assertTrue(np.array_equal(tensor, expected_tensor))
def _SetupWSGIApp(self):
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
projector = projector_plugin.ProjectorPlugin()
projector.get_plugin_apps(multiplexer, self.log_dir)
plugins = {'projector': projector}
wsgi_app = application.TensorBoardWSGIApp(
self.log_dir, plugins, multiplexer, reload_interval=0)
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def _Get(self, path):
return self.server.get(path)
def _GetJson(self, path):
response = self.server.get(path)
data = response.data
if response.headers.get('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, io.BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def _GenerateProjectorTestData(self):
config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')
config = ProjectorConfig()
embedding = config.embeddings.add()
# Add an embedding by its canonical tensor name.
embedding.tensor_name = 'var1:0'
config_pbtxt = text_format.MessageToString(config)
with gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with ops.Graph().as_default():
sess = session.Session()
checkpoint_path = os.path.join(self.log_dir, 'model')
variable_scope.get_variable(
'var1', [1, 2], initializer=init_ops.constant_initializer(6.0))
variable_scope.get_variable('var2', [10, 10])
variable_scope.get_variable('var3', [100, 100])
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
saver.save(sess, checkpoint_path)
if __name__ == '__main__':
test.main()
| 36.413793 | 97 | 0.726136 | [
"Apache-2.0"
] | danielmawhirter/tf_1_0 | tensorflow/tensorboard/plugins/projector/projector_plugin_test.py | 5,280 | Python |
#!/usr/bin/python
import re
import sys
import glob
import subprocess
BLACKLIST = [
"googlestreetview"
]
def main():
if len(sys.argv) > 1:
split_current, split_number = (int(v) for v in sys.argv[1].split("/"))
split_current = split_current - 1
else:
split_current, split_number = (0, 1)
return_code, split_current = check("contribs/gmf/apps", "", "contribs/gmf/apps/", split_current, split_number)
exit(return_code)
def check(folder, file_postfix, make_prefix, split_current, split_number):
return_code = 0
re_ = re.compile(r"^{}/([a-zA-Z_]*){}$".format(re.escape(folder), re.escape(file_postfix)))
for ex in glob.glob("{}/*{}".format(folder, file_postfix)):
match = re_.search(ex)
if match is not None and match.group(1) not in BLACKLIST:
if split_current == 0:
new_code = subprocess.call(
["make", ".build/{}{}.check.timestamp".format(make_prefix, match.group(1))]
)
print('The command "make .build/{}{}.check.timestamp" exited with {}'.format(
make_prefix, match.group(1), new_code
))
return_code = max(return_code, new_code)
split_current = (split_current + 1) % split_number
return return_code, split_current
if __name__ == '__main__':
main()
| 31.363636 | 114 | 0.603623 | [
"MIT"
] | Geoportail-Luxembourg/geoportailv3 | geoportal/geoportailv3_geoportal/static-ngeo/ngeo/buildtools/test_examples.py | 1,380 | Python |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def compute_lr(target_lr, n_epochs, train_set_size, batch_size, warmup):
total = (n_epochs - 1) * int(np.ceil(train_set_size / batch_size))
progress = [float(t) / total for t in range(0, total)]
factor = [p / warmup if p < warmup else max((p - 1.) / (warmup - 1.), 0.) for p in progress]
lr = [f * target_lr for f in factor]
return lr
def load_train_log(directories, num_epochs, target_lr, **kwargs):
parts = []
for d, ep, t_lr in zip(directories, num_epochs, target_lr):
files = ['{}/loss_ep{}.pkl'.format(d, i) for i in range(1, ep)]
files = [f for f in files if os.path.exists(f)]
part = pd.concat([pd.read_pickle(f) for f in files])
part['lr'] = compute_lr(target_lr=t_lr, n_epochs=ep, **kwargs)[0:len(part)]
parts.append(part)
return pd.concat(parts).reset_index(drop=True)
def plot_loss_against_lr(loss, wnd_size=6000):
fig = plt.figure(figsize=(11.69, 8.27))
ax1 = fig.add_subplot(111)
ax1.set_xlabel('time')
ax1.set_ylabel('loss', color='b')
ax1.plot(loss.loss.rolling(wnd_size).mean(), color='b')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('learning rate', color='r')
ax2.plot(loss.lr.rolling(wnd_size).mean(), 'r')
| 28.666667 | 96 | 0.648983 | [
"Apache-2.0"
] | qurator-spk/sbb_ned | qurator/sbb_ned/models/evaluation.py | 1,376 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'monetdbe'
copyright = '2021, MonetDB Solutions'
author = 'Niels Nes'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# This is needed to keep readthedocs happy
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.169492 | 79 | 0.666326 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | MonetDB/MonetDB | documentation/monetdbe/conf.py | 1,957 | Python |
import httpretty
import json
import textwrap
import pygerduty.events
from pygerduty.events import INTEGRATION_API_URL
from pygerduty.common import Requester
@httpretty.activate
def test_create_event():
body = textwrap.dedent("""
{
"status": "success",
"message": "Event processed",
"incident_key": "srv01/HTTP"
}
""")
httpretty.register_uri(
httpretty.POST, INTEGRATION_API_URL,
body=body, status=200)
requester = Requester()
p = pygerduty.events.Events('my_key', requester)
request_json = open('tests/fixtures/event_request.json').read()
request = json.loads(request_json)
response = p.create_event(
request['description'],
request['event_type'],
request['details'],
request['incident_key'],
)
assert response == 'srv01/HTTP'
| 22.263158 | 67 | 0.665485 | [
"MIT"
] | amckenna-pinterest/pygerduty | tests/events_test.py | 846 | Python |
#!/usr/bin/env python
import sys
from Bio import SeqIO
min_length, fasta_file_path = sys.argv[1:]
with open(fasta_file_path.replace('fa', 'filter{}.fa'.format(min_length)), 'w') as filtered_fasta:
with open(fasta_file_path, 'rU') as input_fasta:
def filtered_contigs_generator(min):
for contig in SeqIO.parse(input_fasta, 'fasta'):
if len(contig) >= min:
yield contig
SeqIO.write(filtered_contigs_generator(int(min_length)), filtered_fasta, 'fasta')
| 38.916667 | 98 | 0.745182 | [
"CC0-1.0"
] | CFSAN-Biostatistics/filter_contigs | filter_contigs.py | 467 | Python |
class User(object):
"""
"""
def __init__(self, user_id, user_name, user_cereal, user_midday, user_dinner):
self.user_id = user_id
self.user_name = user_name
self.user_cereal = user_cereal
self.user_midday = user_midday
self.user_dinner = user_dinner
if __name__ == '__main__':
pass
| 21.375 | 82 | 0.640351 | [
"MIT"
] | serviceoutsource/ML-AI | hyperflex_recommend/enpity/User.py | 342 | Python |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from datetime import datetime, timedelta
from awscli.formatter import get_formatter
from awscli.arguments import CustomArgument
from awscli.customizations.commands import BasicCommand
from awscli.customizations.datapipeline import translator
from awscli.customizations.datapipeline.createdefaultroles \
import CreateDefaultRoles
from awscli.customizations.datapipeline.listrunsformatter \
import ListRunsFormatter
DEFINITION_HELP_TEXT = """\
The JSON pipeline definition. If the pipeline definition
is in a file you can use the file://<filename> syntax to
specify a filename.
"""
PARAMETER_OBJECTS_HELP_TEXT = """\
The JSON parameter objects. If the parameter objects are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter objects provided
on command line would replace the one in definition.
"""
PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. If the parameter values are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter values provided
on command line would replace the one in definition.
"""
INLINE_PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. You can specify these as
key-value pairs in the key=value format. Multiple parameters
are separated by a space. For list type parameter values
you can use the same key name and specify each value as
a key value pair. e.g. arrayValue=value1 arrayValue=value2
"""
class DocSectionNotFoundError(Exception):
pass
class ParameterDefinitionError(Exception):
def __init__(self, msg):
full_msg = ("Error in parameter: %s\n" % msg)
super(ParameterDefinitionError, self).__init__(full_msg)
self.msg = msg
def register_customizations(cli):
cli.register(
'building-argument-table.datapipeline.put-pipeline-definition',
add_pipeline_definition)
cli.register(
'building-argument-table.datapipeline.activate-pipeline',
activate_pipeline_definition)
cli.register(
'after-call.datapipeline.GetPipelineDefinition',
translate_definition)
cli.register(
'building-command-table.datapipeline',
register_commands)
cli.register_last(
'doc-output.datapipeline.get-pipeline-definition',
document_translation)
def register_commands(command_table, session, **kwargs):
command_table['list-runs'] = ListRunsCommand(session)
command_table['create-default-roles'] = CreateDefaultRoles(session)
def document_translation(help_command, **kwargs):
# Remove all the writes until we get to the output.
# I don't think this is the ideal way to do this, we should
# improve our plugin/doc system to make this easier.
doc = help_command.doc
current = ''
while current != '======\nOutput\n======':
try:
current = doc.pop_write()
except IndexError:
# This should never happen, but in the rare case that it does
# we should be raising something with a helpful error message.
raise DocSectionNotFoundError(
'Could not find the "output" section for the command: %s'
% help_command)
doc.write('======\nOutput\n======')
doc.write(
'\nThe output of this command is the pipeline definition, which'
' is documented in the '
'`Pipeline Definition File Syntax '
'<http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/'
'dp-writing-pipeline-definition.html>`__')
def add_pipeline_definition(argument_table, **kwargs):
argument_table['pipeline-definition'] = PipelineDefinitionArgument(
'pipeline-definition', required=True,
help_text=DEFINITION_HELP_TEXT)
argument_table['parameter-objects'] = ParameterObjectsArgument(
'parameter-objects', required=False,
help_text=PARAMETER_OBJECTS_HELP_TEXT)
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri',
required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT)
# The pipeline-objects is no longer needed required because
# a user can provide a pipeline-definition instead.
# get-pipeline-definition also displays the output in the
# translated format.
del argument_table['pipeline-objects']
def activate_pipeline_definition(argument_table, **kwargs):
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri', required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT,
)
def translate_definition(parsed, **kwargs):
translator.api_to_definition(parsed)
def convert_described_objects(api_describe_objects, sort_key_func=None):
# We need to take a field list that looks like this:
# {u'key': u'@sphere', u'stringValue': u'INSTANCE'},
# into {"@sphere": "INSTANCE}.
# We convert the fields list into a field dict.
converted = []
for obj in api_describe_objects:
new_fields = {
'@id': obj['id'],
'name': obj['name'],
}
for field in obj['fields']:
new_fields[field['key']] = field.get('stringValue',
field.get('refValue'))
converted.append(new_fields)
if sort_key_func is not None:
converted.sort(key=sort_key_func)
return converted
class QueryArgBuilder(object):
"""
Convert CLI arguments to Query arguments used by QueryObject.
"""
def __init__(self, current_time=None):
if current_time is None:
current_time = datetime.utcnow()
self.current_time = current_time
def build_query(self, parsed_args):
selectors = []
if parsed_args.start_interval is None and \
parsed_args.schedule_interval is None:
# If no intervals are specified, default
# to a start time of 4 days ago and an end time
# of right now.
end_datetime = self.current_time
start_datetime = end_datetime - timedelta(days=4)
start_time_str = start_datetime.strftime('%Y-%m-%dT%H:%M:%S')
end_time_str = end_datetime.strftime('%Y-%m-%dT%H:%M:%S')
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
else:
self._build_schedule_times(selectors, parsed_args)
if parsed_args.status is not None:
self._build_status(selectors, parsed_args)
query = {'selectors': selectors}
return query
def _build_schedule_times(self, selectors, parsed_args):
if parsed_args.start_interval is not None:
start_time_str = parsed_args.start_interval[0]
end_time_str = parsed_args.start_interval[1]
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
if parsed_args.schedule_interval is not None:
start_time_str = parsed_args.schedule_interval[0]
end_time_str = parsed_args.schedule_interval[1]
selectors.append({
'fieldName': '@scheduledStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
def _build_status(self, selectors, parsed_args):
selectors.append({
'fieldName': '@status',
'operator': {
'type': 'EQ',
'values': [status.upper() for status in parsed_args.status]
}
})
class PipelineDefinitionArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
api_objects = translator.definition_to_api_objects(parsed)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['pipelineObjects'] = api_objects
# Use Parameter objects and values from def if not already provided
if 'parameterObjects' not in parameters \
and parameter_objects is not None:
parameters['parameterObjects'] = parameter_objects
if 'parameterValues' not in parameters \
and parameter_values is not None:
parameters['parameterValues'] = parameter_values
class ParameterObjectsArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameters['parameterObjects'] = parameter_objects
class ParameterValuesArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parsed = json.loads(value)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ParameterValuesInlineArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parameter_object = {}
# break string into = point
for argument in value:
try:
argument_components = argument.split('=', 1)
key = argument_components[0]
value = argument_components[1]
if key in parameter_object:
parameter_object[key] = [parameter_object[key], value]
else:
parameter_object[key] = value
except IndexError:
raise ParameterDefinitionError(
"Invalid inline parameter format: %s" % argument
)
parsed = {'values': parameter_object}
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ListRunsCommand(BasicCommand):
NAME = 'list-runs'
DESCRIPTION = (
'Lists the times the specified pipeline has run. '
'You can optionally filter the complete list of '
'results to include only the runs you are interested in.')
ARG_TABLE = [
{'name': 'pipeline-id', 'help_text': 'The identifier of the pipeline.',
'action': 'store', 'required': True, 'cli_type_name': 'string', },
{'name': 'status',
'help_text': (
'Filters the list to include only runs in the '
'specified statuses. '
'The valid statuses are as follows: waiting, pending, cancelled, '
'running, finished, failed, waiting_for_runner, '
'and waiting_on_dependencies. You can combine statuses as a '
'comma-separated list. For example: '
'<code>--status pending,waiting_on_dependencies</code>'),
'action': 'store'},
{'name': 'start-interval',
'help_text': (
'Filters the list to include only runs that started '
'within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
{'name': 'schedule-interval',
'help_text': (
'Filters the list to include only runs that are scheduled to '
'start within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
]
VALID_STATUS = ['waiting', 'pending', 'cancelled', 'running',
'finished', 'failed', 'waiting_for_runner',
'waiting_on_dependencies', 'shutting_down']
def _run_main(self, parsed_args, parsed_globals, **kwargs):
self._set_client(parsed_globals)
self._parse_type_args(parsed_args)
self._list_runs(parsed_args, parsed_globals)
def _set_client(self, parsed_globals):
# This is called from _run_main and is used to ensure that we have
# a service/endpoint object to work with.
self.client = self._session.create_client(
'datapipeline',
region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
verify=parsed_globals.verify_ssl)
def _parse_type_args(self, parsed_args):
# TODO: give good error messages!
# Parse the start/schedule times.
# Parse the status csv.
if parsed_args.start_interval is not None:
parsed_args.start_interval = [
arg.strip() for arg in
parsed_args.start_interval.split(',')]
if parsed_args.schedule_interval is not None:
parsed_args.schedule_interval = [
arg.strip() for arg in
parsed_args.schedule_interval.split(',')]
if parsed_args.status is not None:
parsed_args.status = [
arg.strip() for arg in
parsed_args.status.split(',')]
self._validate_status_choices(parsed_args.status)
def _validate_status_choices(self, statuses):
for status in statuses:
if status not in self.VALID_STATUS:
raise ValueError("Invalid status: %s, must be one of: %s" %
(status, ', '.join(self.VALID_STATUS)))
def _list_runs(self, parsed_args, parsed_globals):
query = QueryArgBuilder().build_query(parsed_args)
object_ids = self._query_objects(parsed_args.pipeline_id, query)
objects = self._describe_objects(parsed_args.pipeline_id, object_ids)[
'pipelineObjects']
converted = convert_described_objects(
objects,
sort_key_func=lambda x: (x.get('@scheduledStartTime'),
x.get('name')))
formatter = self._get_formatter(parsed_globals)
formatter(self.NAME, converted)
def _describe_objects(self, pipeline_id, object_ids):
parsed = self.client.describe_objects(
pipelineId=pipeline_id, objectIds=object_ids)
return parsed
def _query_objects(self, pipeline_id, query):
paginator = self.client.get_paginator('query_objects').paginate(
pipelineId=pipeline_id,
sphere='INSTANCE', query=query)
parsed = paginator.build_full_result()
return parsed['ids']
def _get_formatter(self, parsed_globals):
output = parsed_globals.output
if output is None:
return ListRunsFormatter(parsed_globals)
else:
return get_formatter(output, parsed_globals)
| 39.106635 | 79 | 0.64558 | [
"MIT"
] | claytonbrown/SublimeLinter-contrib-AWS-Cloudformation-JSON | dist/awscli/customizations/datapipeline/__init__.py | 16,503 | Python |
from PIL import ImageGrab
import pyautogui
import numpy
import time
import cv2
import os
timeA = time.time()
fourcc = cv2.VideoWriter_fourcc(*"XVID")
name = f"Recording{len(os.listdir())-2}.mp4"
out = cv2.VideoWriter(name, fourcc, 14.0, (1920, 1080))
white = (255, 255, 255)
black = (0, 0, 0)
while True:
frame = ImageGrab.grab()
data = frame.load()
(x, y) = pyautogui.position()
mouseFrame = numpy.array(frame)
finalFrame = cv2.cvtColor(mouseFrame, 4)
cv2.circle(finalFrame, (x, y), 7, (0, 0, 0), -1)
cv2.circle(finalFrame, (x, y), 6, (255, 255, 255), -1)
cv2.imshow("Recoding", finalFrame)
out.write(finalFrame)
if (cv2.waitKey(1) & 0xFF == ord("q")):
break
out.release()
cv2.destroyAllWindows()
print("Time:", str(time.time() - timeA)[:4]+"s") | 22.702703 | 59 | 0.608333 | [
"Apache-2.0"
] | udham2511/Python-Screen-Recorder | recorder.py | 840 | Python |
from ._datasets import fly_brain, scicar_mouse_kidney
__all__ = ['fly_brain', 'scicar_mouse_kidney']
| 17.333333 | 53 | 0.788462 | [
"MIT"
] | brianhie/schema | schema/datasets/__init__.py | 104 | Python |
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class ProtectionInfo(object):
"""Implementation of the 'ProtectionInfo' model.
dataLocation defines data location related information.
Attributes:
end_time_usecs (long|int): Specifies the end time for object
retention.
location (string): Specifies the location of the object.
policy_id (string): Specifies the id of the policy.
protection_job_id (long|int): Specifies the id of the protection job.
protection_job_name (string): Specifies the protection job name which
protects this object.
retention_period (long|int): Specifies the retention period.
start_time_usecs (long|int): Specifies the start time for object
retention.
storage_domain (string): Specifies the storage domain name.
total_snapshots (long|int): Specifies the total number of snapshots.
"""
# Create a mapping from Model property names to API property names
_names = {
"end_time_usecs":'endTimeUsecs',
"location":'location',
"policy_id":'policyId',
"protection_job_id":'protectionJobId',
"protection_job_name":'protectionJobName',
"retention_period":'retentionPeriod',
"start_time_usecs":'startTimeUsecs',
"storage_domain":'storageDomain',
"total_snapshots":'totalSnapshots'
}
def __init__(self,
end_time_usecs=None,
location=None,
policy_id=None,
protection_job_id=None,
protection_job_name=None,
retention_period=None,
start_time_usecs=None,
storage_domain=None,
total_snapshots=None):
"""Constructor for the ProtectionInfo class"""
# Initialize members of the class
self.end_time_usecs = end_time_usecs
self.location = location
self.policy_id = policy_id
self.protection_job_id = protection_job_id
self.protection_job_name = protection_job_name
self.retention_period = retention_period
self.start_time_usecs = start_time_usecs
self.storage_domain = storage_domain
self.total_snapshots = total_snapshots
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
end_time_usecs = dictionary.get('endTimeUsecs')
location = dictionary.get('location')
policy_id = dictionary.get('policyId')
protection_job_id = dictionary.get('protectionJobId')
protection_job_name = dictionary.get('protectionJobName')
retention_period = dictionary.get('retentionPeriod')
start_time_usecs = dictionary.get('startTimeUsecs')
storage_domain = dictionary.get('storageDomain')
total_snapshots = dictionary.get('totalSnapshots')
# Return an object of this model
return cls(end_time_usecs,
location,
policy_id,
protection_job_id,
protection_job_name,
retention_period,
start_time_usecs,
storage_domain,
total_snapshots)
| 36.028846 | 81 | 0.62957 | [
"Apache-2.0"
] | cohesity/management-sdk-python | cohesity_management_sdk/models/protection_info.py | 3,747 | Python |
from mycroft import MycroftSkill, intent_file_handler
import subprocess
class Fortune(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('fortune.intent')
def handle_fortune(self, message):
result = subprocess.run("fortune", capture_output=True, text=True)
self.speak_dialog(result.stdout)
def create_skill():
return Fortune()
| 23.823529 | 74 | 0.728395 | [
"MIT"
] | rogermoore6872/fortune-skill | __init__.py | 405 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of Archdiffer and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Created on Sun Mar 4 10:23:41 2018
@author: Pavla Kratochvilova <[email protected]>
"""
import operator
import datetime
from flask import request
from .exceptions import BadRequest
def make_datetime(time_string, formats=None):
"""Makes datetime from string based on one of the formats.
:param string time_string: time in string
:param list formats: list of accepted formats
:return datetime.datetime: datetime or None if no format is matched
"""
if formats is None:
formats = [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
for fmt in formats:
try:
return datetime.datetime.strptime(time_string, fmt)
except ValueError:
pass
return None
# Transformation functions
def _dict_transform(string):
return dict([item.split(':', 1) for item in string.split(';')])
def _list_transform(string):
return string.split(',')
# Transformations of common arguments
_TRANSFORMATIONS = {
'filter_by' : _dict_transform,
'filter' : _list_transform,
'order_by' : _list_transform,
'limit' : lambda x: int(x),
'offset' : lambda x: int(x),
}
# Filters creators
def before(column, name='before'):
"""Make filter template for filtering column values less or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
"""
return {name: (column, operator.le, make_datetime)}
def after(column, name='after'):
"""Make filter template for filtering column values greater or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
"""
return {name: (column, operator.ge, make_datetime)}
def time(column, name='time'):
"""Make filter template for filtering column values equal to datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
"""
return {name: (column, operator.eq, make_datetime)}
def equals(column, name='id', function=(lambda x: x)):
"""Make filter template for filtering column values equal to value
transformed by given function.
:param column: database model
:param string name: name used in the filter template
:param callable function: function for transforming the value
:return dict: resulting template
"""
return {name: (column, operator.eq, function)}
# Request parser
def parse_request(filters=None, defaults=None):
"""Parse arguments in request according to the _TRANSFORMATIONS or given
filters.
Requests containing other keys are considered invalid.
:param dict filters: dict of filter templates containing for each key
(column, operator, function transforming value from request argument)
:param dict defaults: default values of modifiers
:return dict: dict of parsed arguments
:raises werkzeug.exceptions.BadRequest: if one of the request arguments is
not recognized
"""
if filters is None:
filters = {}
if defaults is not None:
args_dict = defaults.copy()
else:
args_dict = {}
filters_list = []
for key, value in request.args.items():
if key in _TRANSFORMATIONS:
try:
args_dict[key] = _TRANSFORMATIONS[key](value)
except ValueError:
raise BadRequest('Argument has invalid value "%s".' % value)
elif key in filters.keys():
filters_list.append(
filters[key][1](filters[key][0], filters[key][2](value))
)
else:
raise BadRequest('Argument "%s" not recognized.' % key)
if 'filter' not in args_dict.keys():
args_dict['filter'] = []
args_dict['filter'] += filters_list
return args_dict
def get_request_arguments(*names, args_dict=None, invert=False):
"""Get arguments from args_dict or request if they match given names.
:param *names: names of arguments
:param dict args_dict: dict of arguments
:param bool invert: True if names should be exclueded instead
:return dict: dict of arguments
"""
if args_dict is None:
args_dict = parse_request()
if invert:
return {k:v for k, v in args_dict.items() if k not in names}
return {k:v for k, v in args_dict.items() if k in names}
def update_modifiers(old_modifiers, new_modifiers):
"""Update modifiers.
:param dict old_modifiers: old modifiers
:param dict old_modifiers: new modifiers
:return dict: resulting modifiers
"""
modifiers = old_modifiers.copy()
for key, value in new_modifiers.items():
if key in old_modifiers:
if _TRANSFORMATIONS.get(key) == _list_transform:
modifiers[key] += value
elif _TRANSFORMATIONS.get(key) == _dict_transform:
modifiers[key].update(value)
else:
modifiers[key] = value
else:
modifiers[key] = value
return modifiers
| 31.760479 | 78 | 0.659879 | [
"MIT"
] | Kratochvilova/archdiffer | archdiffer/flask_frontend/request_parser.py | 5,304 | Python |
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
| 38.618143 | 92 | 0.667632 | [
"Apache-2.0"
] | wadpac/mcfly | mcfly/modelgen.py | 18,305 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions to draw various pygimli matrices with matplotlib."""
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
def drawSparseMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix
Returns
-------
mpl.lines.line2d
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawSparseMatrix
>>> A = pg.randn((10,10), seed=0)
>>> SM = pg.core.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, i, 5.0)
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)
>>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')
>>> _ = drawSparseMatrix(ax2, SM, color='green')
"""
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
def drawBlockMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Arguments
---------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.Matrix.BlockMatrix
Keyword Arguments
-----------------
spy: bool [False]
Draw all matrix entries instead of colored blocks
Returns
-------
ax:
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> I = pg.matrix.IdentityMatrix(10)
>>> SM = pg.matrix.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, 10 - i, 5.0)
... SM.setVal(i, i, 5.0)
>>> B = pg.matrix.BlockMatrix()
>>> B.add(I, 0, 0)
0
>>> B.add(SM, 10, 10)
1
>>> print(B)
pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)
>>> _ = pg.show(B, ax=ax1)
>>> _ = pg.show(B, spy=True, ax=ax2)
"""
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap("Set3", len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati,
rowOffset=e.rowStart,
colOffset=e.colStart,
color=cMap(mid)))
return gci, None
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = mat.mat(mid).rows() - 0.1 # to make sure non-matrix regions are not connected in the plot
widthx = mat.mat(mid).cols() - 0.1
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart],
[e.colStart + widthx, e.rowStart + widthy],
marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
gci, cBar = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label("Matrix ID")
if len(mat.entries()) > 10:
gci.set_cmap("viridis")
return gci, cBar
| 28.789063 | 110 | 0.538128 | [
"Apache-2.0"
] | JuliusHen/gimli | pygimli/viewer/mpl/matrixview.py | 3,685 | Python |
import uuid
from http import HTTPStatus
import pytest
from botx.clients.methods.errors.chat_not_found import (
ChatNotFoundData,
ChatNotFoundError,
)
from botx.clients.methods.v3.chats.add_user import AddUser
from botx.concurrency import callable_to_coroutine
pytestmark = pytest.mark.asyncio
pytest_plugins = ("tests.test_clients.fixtures",)
async def test_raising_chat_not_found(client, requests_client):
method = AddUser(
host="example.com",
group_chat_id=uuid.uuid4(),
user_huids=[uuid.uuid4()],
)
errors_to_raise = {
AddUser: (
HTTPStatus.NOT_FOUND,
ChatNotFoundData(group_chat_id=method.group_chat_id),
),
}
with client.error_client(errors=errors_to_raise):
request = requests_client.build_request(method)
response = await callable_to_coroutine(requests_client.execute, request)
with pytest.raises(ChatNotFoundError):
await callable_to_coroutine(
requests_client.process_response,
method,
response,
)
| 26.902439 | 80 | 0.682684 | [
"MIT"
] | ExpressApp/pybotx | tests/test_clients/test_methods/test_errors/test_chat_not_found.py | 1,103 | Python |
"""This is the core module for accessing using and accessing the bot"""
from .core import Bot
| 23.75 | 71 | 0.747368 | [
"MIT"
] | CaffeineDuck/BoilerBot | bot/__init__.py | 95 | Python |
from abaqusConstants import *
from .BoundaryConditionState import BoundaryConditionState
class DisplacementBaseMotionBCState(BoundaryConditionState):
"""The DisplacementBaseMotionBCState object stores the propagating data for a velocity base
motion boundary condition in a step. One instance of this object is created internally
by the DisplacementBaseMotionBC object for each step. The instance is also deleted
internally by the DisplacementBaseMotionBC object.
The DisplacementBaseMotionBCState object has no constructor or methods.
The DisplacementBaseMotionBCState object is derived from the BoundaryConditionState
object.
Attributes
----------
amplitudeState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
status: SymbolicConstant
A SymbolicConstant specifying the propagation state of the :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` object. Possible values are:
NOT_YET_ACTIVE
CREATED
PROPAGATED
MODIFIED
DEACTIVATED
NO_LONGER_ACTIVE
TYPE_NOT_APPLICABLE
INSTANCE_NOT_APPLICABLE
PROPAGATED_FROM_BASE_STATE
MODIFIED_FROM_BASE_STATE
DEACTIVATED_FROM_BASE_STATE
BUILT_INTO_MODES
amplitude: str
A String specifying the name of the amplitude reference. The String is empty if the
boundary condition has no amplitude reference.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].steps[name].boundaryConditionStates[name]
The corresponding analysis keywords are:
- BASE MOTION
"""
# A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
# values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
amplitudeState: SymbolicConstant = None
# A SymbolicConstant specifying the propagation state of the BoundaryConditionState object. Possible values are:
# NOT_YET_ACTIVE
# CREATED
# PROPAGATED
# MODIFIED
# DEACTIVATED
# NO_LONGER_ACTIVE
# TYPE_NOT_APPLICABLE
# INSTANCE_NOT_APPLICABLE
# PROPAGATED_FROM_BASE_STATE
# MODIFIED_FROM_BASE_STATE
# DEACTIVATED_FROM_BASE_STATE
# BUILT_INTO_MODES
status: SymbolicConstant = None
# A String specifying the name of the amplitude reference. The String is empty if the
# boundary condition has no amplitude reference.
amplitude: str = ''
| 35.756757 | 179 | 0.733182 | [
"MIT"
] | Haiiliin/PyAbaqus | src/abaqus/BoundaryCondition/DisplacementBaseMotionBCState.py | 2,646 | Python |
from abc import abstractmethod, ABCMeta
from collections import deque
from functools import partial
from plenum.common.constants import VIEW_CHANGE_START, PreVCStrategies, VIEW_CHANGE_CONTINUE
from plenum.common.messages.node_messages import ViewChangeStartMessage, ViewChangeContinueMessage, PrePrepare, Prepare, \
Commit, Ordered
from stp_zmq.zstack import Quota
from stp_core.common.log import getlogger
logger = getlogger()
class PreViewChangeStrategy(metaclass=ABCMeta):
"""Abstract class for routines before starting viewChange procedure"""
def __init__(self, view_changer, node):
self.view_changer = view_changer
self.node = node
@abstractmethod
def prepare_view_change(self, proposed_view_no: int):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_started(obj, msg, frm):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_continued(obj, msg):
raise NotImplementedError()
@abstractmethod
def on_strategy_complete(self):
raise NotImplementedError()
class VCStartMsgStrategy(PreViewChangeStrategy):
"""Strategy logic:
- when startViewChange method was called, then put 'local' ViewChangeStart message and set corresponded handlers
- on processing startViewChange message on the nodeInBoxRouter's side the next steps will be performed:
- call nodestack.service method with extended quota parameters for getting as much as possible 3PC
messages from ZMQ's side
- process all messages from nodeInBox queue and stash all not 3PC
- append to replica's inBox queue ViewChangeContinueMessage
- then replica's inBox queue will be processed and after ViewChangeContinueMessage view_change procedure
will be continued in the normal way
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stashedNodeInBox = deque()
self.replica = self.node.master_replica
self.is_preparing = False
def prepare_view_change(self, proposed_view_no: int):
if not self.is_preparing:
logger.info("VCStartMsgStrategy: Starting prepare_view_change process")
self._set_req_handlers()
vcs_msg = ViewChangeStartMessage(proposed_view_no)
nodeInBox = self.node.nodeInBox
nodeInBox.append((vcs_msg, self.node.name))
self.is_preparing = True
def on_strategy_complete(self):
logger.info("VCStartMsgStrategy: on_strategy_complete - View Change can be started")
self.unstash_messages()
self.is_preparing = False
@staticmethod
async def _process_node_inbox_3PC(node):
current_view_no = node.viewNo
stashed_not_3PC = deque()
types_3PC = (PrePrepare, Prepare, Commit, Ordered)
while node.nodeInBox:
m = node.nodeInBox.popleft()
if len(m) == 2 and isinstance(m[0], types_3PC) and \
m[0].viewNo == current_view_no and \
m[0].instId == node.instances.masterId:
await node.process_one_node_message(m)
else:
stashed_not_3PC.append(m)
return stashed_not_3PC
"""Handler for processing ViewChangeStart message on node's nodeInBoxRouter"""
@staticmethod
async def on_view_change_started(node, msg: ViewChangeStartMessage, frm):
strategy = node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
logger.info("VCStartMsgStrategy: got ViewChangeStartMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > node.view_changer.view_no:
vcc_msg = ViewChangeContinueMessage(proposed_view_no)
quota = Quota(
count=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.count,
size=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.size)
msgs_count = await node.nodestack.service(limit=None,
quota=quota)
logger.info("VCStartMsgStrategy: Got {} messages from nodestack".format(msgs_count))
strategy.stashedNodeInBox = await VCStartMsgStrategy._process_node_inbox_3PC(node)
logger.info("VCStartMsgStrategy: {} not 3PC msgs was stashed".format(len(strategy.stashedNodeInBox)))
node.master_replica.inBox.append(vcc_msg)
"""Handler for processing ViewChangeStart message on replica's inBoxRouter"""
@staticmethod
def on_view_change_continued(replica, msg: ViewChangeContinueMessage):
strategy = replica.node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
replica.logger.info("VCStartMsgStrategy: got ViewChangeContinueMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > replica.node.viewNo:
"""
Return stashed not 3PC msgs to nodeInBox queue and start ViewChange
Critical assumption: All 3PC msgs passed from node already processed
"""
strategy.unstash_messages()
replica.logger.info("VCStartMsgStrategy: continue view_change procedure in a normal way")
replica.node.view_changer.startViewChange(proposed_view_no, continue_vc=True)
strategy.is_preparing = False
def unstash_messages(self):
logger.info("VCStartMsgStrategy: unstash all not 3PC msgs to nodeInBox queue")
while self.stashedNodeInBox:
self.node.nodeInBox.appendleft(self.stashedNodeInBox.pop())
def _set_req_handlers(self):
node_msg_router = self.node.nodeMsgRouter
replica_msg_router = self.replica.inBoxRouter
if ViewChangeStartMessage not in node_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_started,
self.node)
node_msg_router.add((ViewChangeStartMessage, processor))
if ViewChangeContinueMessage not in replica_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_continued,
self.replica)
replica_msg_router.add((ViewChangeContinueMessage, processor))
preVCStrategies = {
PreVCStrategies.VC_START_MSG_STRATEGY: VCStartMsgStrategy
}
| 44.715278 | 131 | 0.701196 | [
"Apache-2.0"
] | andkononykhin/indy-plenum-copy | plenum/server/view_change/pre_view_change_strategies.py | 6,439 | Python |
import requests
import json
import datetime
import os
import io
from invoke import task
from .invoke_utils import ServerConnection, use_dump_modifier_function
RAJK_PASSWORD = os.environ.get("RAJK_PASSWORD")
RAJK_RSA = os.environ.get("RAJK_RSA")
TEST_DEPLOY_DIRECTORY = os.getcwd() + "/build"
rajk_server_connection = ServerConnection(
"rajk", "146.110.60.20", 2222, "/var/www/rajkdjango2/bin/python"
)
def redo_rsa_from_text(c, rsa_text):
os.makedirs("{}/.ssh".format(os.path.expanduser("~")), exist_ok=True)
rsa_path = "{}/.ssh/id_rsa".format(os.path.expanduser("~"))
with open(rsa_path, "w") as fp:
fp.write(rsa_text)
c.run("chmod 600 {}".format(rsa_path))
@task
def backup_django(c):
os.makedirs("backups", exist_ok=True)
bup_dir = os.path.join("backups", datetime.date.today().isoformat())
c.run("mkdir {}".format(bup_dir))
scp_command = rajk_server_connection.copy_from_server_command(
bup_dir, "/var/www/rajkdjango2"
)
c.run(scp_command)
@task
def restart_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 restart", RAJK_PASSWORD
)
c.run(command)
@task
def stop_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 stop", RAJK_PASSWORD
)
c.run(command)
@task
def start_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 start", RAJK_PASSWORD
)
c.run(command)
@task
def dump(c, fname="dump.json", no_contenttypes=False):
py_command = "/var/www/rajkdjango2/manage.py dumpdata {}".format(
"-e contenttypes" if no_contenttypes else ""
)
command = rajk_server_connection.remote_python_command(py_command)
c.run(command + " > {}".format(fname))
@task
def remote_dump(c, no_contenttypes=True):
py_command = "/var/www/rajkdjango2/manage.py dumpdata {} > /var/www/rajk/djangodump.json".format(
"-e contenttypes" if no_contenttypes else ""
)
command = rajk_server_connection.remote_python_command(py_command)
c.run(command)
@task
def setup_test_deploy_env(c):
c.run("rm -rf ./{}".format(TEST_DEPLOY_DIRECTORY))
c.run("mkdir {}".format(TEST_DEPLOY_DIRECTORY))
resp = requests.get("https://api.github.com/orgs/rajk-apps/repos")
repos = [
"git+https://github.com/{}".format(d["full_name"])
for d in json.loads(resp.content)
]
app_names = [r.split("/")[-1].replace("-", "_") for r in repos]
c.run("python3 -m venv {}/django_venv".format(TEST_DEPLOY_DIRECTORY))
for r in ["wheel", "django", "toml"] + repos:
c.run("{}/django_venv/bin/pip install {}".format(TEST_DEPLOY_DIRECTORY, r))
c.run(
"cd {};django_venv/bin/django-admin startproject rajkproject".format(
TEST_DEPLOY_DIRECTORY
)
)
with open(
"{}/rajkproject/rajkproject/settings.py".format(TEST_DEPLOY_DIRECTORY), "a"
) as fp:
fp.write(
"\nINSTALLED_APPS += [{}]".format(
", ".join(["'{}'".format(a) for a in app_names])
)
)
with open(
"{}/rajkproject/rajkproject/urls.py".format(TEST_DEPLOY_DIRECTORY), "a"
) as fp:
fp.write(
"\nfrom django.urls import include"
"\nurlpatterns.append(path('accounts/', include('django.contrib.auth.urls')))"
"\nurlpatterns += [{}]".format(
", ".join(
[
"path('{}', include('{}.urls'))".format(
a + "/" if a != "rajk_appman" else "", a
)
for a in app_names
]
)
)
)
dump_fname = "{}/dump.json".format(TEST_DEPLOY_DIRECTORY)
resp = requests.get("https://rajk.uni-corvinus.hu/djangodump.json")
with open(dump_fname, "wb") as fp:
fp.write(resp.content)
for django_command in [
"makemigrations",
"makemigrations {}".format(" ".join(app_names)),
"migrate",
"loaddata {}".format(dump_fname),
]:
c.run(
"{}/django_venv/bin/python {}/rajkproject/manage.py {}".format(
TEST_DEPLOY_DIRECTORY, TEST_DEPLOY_DIRECTORY, django_command
)
)
@task
def deploy(c, dump_modifier_function=None, live=False, redo_rsa=False):
f = io.StringIO()
c.run(
"{}/django_venv/bin/python setup.py --fullname".format(TEST_DEPLOY_DIRECTORY),
out_stream=f,
)
current_app_fullname = f.getvalue().strip()
f.close()
c.run("{}/django_venv/bin/python setup.py sdist".format(TEST_DEPLOY_DIRECTORY))
local_tarball = "./dist/{}.tar.gz".format(current_app_fullname)
c.run(
"{}/django_venv/bin/pip install {}".format(TEST_DEPLOY_DIRECTORY, local_tarball)
)
dump_fname = "{}/dump.json".format(TEST_DEPLOY_DIRECTORY)
resp = requests.get("https://rajk.uni-corvinus.hu/djangodump.json")
with open(dump_fname, "wb") as fp:
fp.write(resp.content)
if dump_modifier_function is not None:
use_dump_modifier_function(dump_modifier_function, dump_fname)
c.run("rm {}/rajkproject/db.sqlite3".format(TEST_DEPLOY_DIRECTORY))
for django_command in [
"makemigrations",
"makemigrations {}".format(current_app_fullname.split("-")[0]),
"migrate",
"loaddata {}".format(dump_fname)
]:
c.run(
"{}/django_venv/bin/python {}/rajkproject/manage.py {}".format(
TEST_DEPLOY_DIRECTORY, TEST_DEPLOY_DIRECTORY, django_command
)
)
if live:
_live_deploy(c, local_tarball, current_app_fullname, dump_modifier_function, redo_rsa)
def _live_deploy(c, local_tarball, current_app_fullname, dump_modifier_function=None, redo_rsa=False):
if redo_rsa:
if RAJK_RSA:
redo_rsa_from_text(c, RAJK_RSA)
else:
raise EnvironmentError("No RAJK_RSA env variable")
local_dump_fname = "{}/deploy_dump.json".format(TEST_DEPLOY_DIRECTORY)
remote_dump_fname = "/var/www/rajkdjango2/deploy_dump.json"
print("stopping server")
stop_server(c)
print("dumping data")
dump(c, local_dump_fname, True)
if dump_modifier_function is not None:
use_dump_modifier_function(dump_modifier_function, local_dump_fname)
scp_command = rajk_server_connection.copy_to_server_command(
local_dump_fname, remote_dump_fname
)
c.run(scp_command)
remote_tarball = "/var/www/rajkdjango2/tarballs/{}".format(
local_tarball.split("/")[-1]
)
tar_scp_command = rajk_server_connection.copy_to_server_command(
local_tarball, remote_tarball
)
c.run(tar_scp_command)
install_command = "/var/www/rajkdjango2/bin/pip --no-cache-dir install --upgrade {}".format(
remote_tarball
)
remote_install_command = rajk_server_connection.run_ssh_command(install_command)
c.run(remote_install_command)
c.run(rajk_server_connection.run_ssh_command("rm /var/www/rajkdjango2/db.sqlite3"))
for django_command in [
"makemigrations",
"makemigrations {}".format(current_app_fullname.split("-")[0]),
"migrate",
"loaddata {}".format(remote_dump_fname),
]:
c.run(
rajk_server_connection.remote_python_command(
"/var/www/rajkdjango2/manage.py {}".format(django_command)
)
)
start_server(c)
| 29.703557 | 102 | 0.637126 | [
"MIT"
] | rajk-apps/rajk-appman | rajk_appman/invoke_rajk.py | 7,515 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.