max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Py Learning/15. My Final Project/StudentDB.py | MahmudX/TestSharp | 0 | 6630151 | import sqlite3
import os
db = sqlite3.connect('studentDB.db')
cursor = db.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS studentDB(
ID INTEGER PRIMARY KEY, Name TEXT, District TEXT, 'Blood Group' TEXT
)""")
cursor.execute(
''' SELECT * FROM sqlite_master WHERE type='table' AND name='studentDB' ''')
cursor.execute("SELECT COUNT(*) FROM studentDB")
sCOUNT = cursor.fetchone()[0]
def inValInput():
print('Invalid Input.')
print('Type \'exit()\' exit the program')
print('Press enter to try again.')
def Create():
print("How many student\'s you want to add?")
n = int(input())
print('ID Scheme:')
sID = int(input())
print('Starting Position')
sPo = int(input())
counter = 1
while counter <= n:
print(f'ID: {sID}{sPo}')
print('Name: ', end=' ')
sNAME = input()
print('Distict: ', end=' ')
bgDis = input()
sDIS = bgDis if bgDis else ' '
print('Blood Group: ', end=' ')
bgTemp = input()
sBG = bgTemp if bgTemp else ' '
cursor.execute(f"""
INSERT INTO studentDB (ID, Name, District, 'Blood Group') VALUES ('{sID}{sPo}','{sNAME}','{sDIS}', '{sBG}')
""")
db.commit()
counter += 1
sPo += 1
print('All the student(s) added to the database successfully.\n')
def Lookup():
print(f'There are {sCOUNT} student(s) in the database.\n')
if sCOUNT:
print('All the students are -\n')
for row in cursor.execute("SELECT * FROM studentDB"):
print(f'ID: {row[0]}')
print(f'Name: {row[1]}')
print(f'District: {row[2]}')
print(f'Blood Group: {row[3]}\n')
def SingleLookup():
if sCOUNT:
print('Enter student\'s ID:')
idTemp = int(input())
for row in cursor.execute(f"SELECT * FROM studentDB WHERE ID = '{idTemp}'"):
print(f'\nID: {row[0]}')
print(f'Name: {row[1]}')
print(f'District: {row[2]}')
print(f'Blood Group: {row[3]}\n')
else:
print('Please add at least one student to the database first.\n')
def Update():
if sCOUNT:
print('Enter student\'s ID:')
idTemp = int(input())
tempName = input('Name: ')
tempDis = input('Districe: ')
tempBG = input('Blood Group: ')
if tempName:
cursor.execute(
f'UPDATE studentDB SET Name=\'{tempName}\' WHERE ID=\'{idTemp}\'')
if tempDis:
cursor.execute(
f'UPDATE studentDB SET District=\'{tempDis}\' WHERE ID=\'{idTemp}\'')
if tempBG:
cursor.execute(
f'UPDATE studentDB SET \'Blood Group\'=\'{tempBG}\' WHERE ID=\'{idTemp}\'')
db.commit()
print('Updated Successfully.\n')
else:
print('There is no student to edit.\n')
def Delete():
print('Enter ID: ', end=' ')
tempID = int(input())
cursor.execute(f'DELETE FROM studentDB WHERE ID= {tempID}')
db.commit()
print('Student Removed from the database successfully.\n')
def PrintMenu():
print('Menu')
print('1. Lookup the full Database')
print('2. Lookup a student\'s information')
print('3. Update a student\' information')
print('4. Add student(s) to the database')
print('5. Delete students to the database')
print('6. Exit\n')
def main():
closeTheApp = ''
while closeTheApp != 'exit()':
PrintMenu()
MenuDict = {1: Lookup, 2: SingleLookup, 3: Update,
4: Create, 5: Delete}
try:
print('Enter a choice:', end=' ')
menuItem = int(input())
print()
closeTheApp = 'exit()' if menuItem is 6 else MenuDict[menuItem]()
except:
inValInput()
closeTheApp = input()
if __name__ == "__main__":
main()
| import sqlite3
import os
db = sqlite3.connect('studentDB.db')
cursor = db.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS studentDB(
ID INTEGER PRIMARY KEY, Name TEXT, District TEXT, 'Blood Group' TEXT
)""")
cursor.execute(
''' SELECT * FROM sqlite_master WHERE type='table' AND name='studentDB' ''')
cursor.execute("SELECT COUNT(*) FROM studentDB")
sCOUNT = cursor.fetchone()[0]
def inValInput():
print('Invalid Input.')
print('Type \'exit()\' exit the program')
print('Press enter to try again.')
def Create():
print("How many student\'s you want to add?")
n = int(input())
print('ID Scheme:')
sID = int(input())
print('Starting Position')
sPo = int(input())
counter = 1
while counter <= n:
print(f'ID: {sID}{sPo}')
print('Name: ', end=' ')
sNAME = input()
print('Distict: ', end=' ')
bgDis = input()
sDIS = bgDis if bgDis else ' '
print('Blood Group: ', end=' ')
bgTemp = input()
sBG = bgTemp if bgTemp else ' '
cursor.execute(f"""
INSERT INTO studentDB (ID, Name, District, 'Blood Group') VALUES ('{sID}{sPo}','{sNAME}','{sDIS}', '{sBG}')
""")
db.commit()
counter += 1
sPo += 1
print('All the student(s) added to the database successfully.\n')
def Lookup():
print(f'There are {sCOUNT} student(s) in the database.\n')
if sCOUNT:
print('All the students are -\n')
for row in cursor.execute("SELECT * FROM studentDB"):
print(f'ID: {row[0]}')
print(f'Name: {row[1]}')
print(f'District: {row[2]}')
print(f'Blood Group: {row[3]}\n')
def SingleLookup():
if sCOUNT:
print('Enter student\'s ID:')
idTemp = int(input())
for row in cursor.execute(f"SELECT * FROM studentDB WHERE ID = '{idTemp}'"):
print(f'\nID: {row[0]}')
print(f'Name: {row[1]}')
print(f'District: {row[2]}')
print(f'Blood Group: {row[3]}\n')
else:
print('Please add at least one student to the database first.\n')
def Update():
if sCOUNT:
print('Enter student\'s ID:')
idTemp = int(input())
tempName = input('Name: ')
tempDis = input('Districe: ')
tempBG = input('Blood Group: ')
if tempName:
cursor.execute(
f'UPDATE studentDB SET Name=\'{tempName}\' WHERE ID=\'{idTemp}\'')
if tempDis:
cursor.execute(
f'UPDATE studentDB SET District=\'{tempDis}\' WHERE ID=\'{idTemp}\'')
if tempBG:
cursor.execute(
f'UPDATE studentDB SET \'Blood Group\'=\'{tempBG}\' WHERE ID=\'{idTemp}\'')
db.commit()
print('Updated Successfully.\n')
else:
print('There is no student to edit.\n')
def Delete():
print('Enter ID: ', end=' ')
tempID = int(input())
cursor.execute(f'DELETE FROM studentDB WHERE ID= {tempID}')
db.commit()
print('Student Removed from the database successfully.\n')
def PrintMenu():
print('Menu')
print('1. Lookup the full Database')
print('2. Lookup a student\'s information')
print('3. Update a student\' information')
print('4. Add student(s) to the database')
print('5. Delete students to the database')
print('6. Exit\n')
def main():
closeTheApp = ''
while closeTheApp != 'exit()':
PrintMenu()
MenuDict = {1: Lookup, 2: SingleLookup, 3: Update,
4: Create, 5: Delete}
try:
print('Enter a choice:', end=' ')
menuItem = int(input())
print()
closeTheApp = 'exit()' if menuItem is 6 else MenuDict[menuItem]()
except:
inValInput()
closeTheApp = input()
if __name__ == "__main__":
main()
| en | 0.436431 | CREATE TABLE IF NOT EXISTS studentDB(
ID INTEGER PRIMARY KEY, Name TEXT, District TEXT, 'Blood Group' TEXT
) SELECT * FROM sqlite_master WHERE type='table' AND name='studentDB' INSERT INTO studentDB (ID, Name, District, 'Blood Group') VALUES ('{sID}{sPo}','{sNAME}','{sDIS}', '{sBG}') | 4.076909 | 4 |
Bases/Theory/2 - operators.py | PierreAnken/TrainingPython | 0 | 6630152 | <filename>Bases/Theory/2 - operators.py
if __name__ == '__main__':
print(True)
print(3 < 5)
if 3 > 2:
print('x')
my_list = ['2']
my_empty_list = []
if my_empty_list:
print('List is not empty')
else:
print('List is empty')
# arithmetics
print(' == Operateurs arithmetics ==')
var = 1 * 2 / 2 - 3 + 5
print(5 % 2)
print(5 // 2)
# assigement
var1 = 1
var1 = var1 + 2
var1 += 2
# comparison
print(' == comparison operateurs ==')
print(1 == 2)
print(1 != 2)
# operateurs logiques
print(' == logical ==')
print(True and False)
print(True or False)
print(not False)
# identity operators
print(' == identity operators ==')
print(type(var1) is int)
print(isinstance(True, bool)) # classes
# membership operator
print(' == membership operator ==')
print('x' in 'axc')
print('x' in ['a', 'x', 'c'])
print(len('axc'))
| <filename>Bases/Theory/2 - operators.py
if __name__ == '__main__':
print(True)
print(3 < 5)
if 3 > 2:
print('x')
my_list = ['2']
my_empty_list = []
if my_empty_list:
print('List is not empty')
else:
print('List is empty')
# arithmetics
print(' == Operateurs arithmetics ==')
var = 1 * 2 / 2 - 3 + 5
print(5 % 2)
print(5 // 2)
# assigement
var1 = 1
var1 = var1 + 2
var1 += 2
# comparison
print(' == comparison operateurs ==')
print(1 == 2)
print(1 != 2)
# operateurs logiques
print(' == logical ==')
print(True and False)
print(True or False)
print(not False)
# identity operators
print(' == identity operators ==')
print(type(var1) is int)
print(isinstance(True, bool)) # classes
# membership operator
print(' == membership operator ==')
print('x' in 'axc')
print('x' in ['a', 'x', 'c'])
print(len('axc'))
| en | 0.609876 | # arithmetics # assigement # comparison # operateurs logiques # identity operators # classes # membership operator | 3.836324 | 4 |
train.py | limpidezza/DeepSpeech | 0 | 6630153 | <filename>train.py
"""Trainer for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import io
from model_utils.model import DeepSpeech2Model
from model_utils.model_check import check_cuda, check_version
from data_utils.data import DataGenerator
from utils.utility import add_arguments, print_arguments
import paddle.fluid as fluid
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('num_epoch', int, 200, "# of training epochs.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
add_arg('num_iter_print', int, 100, "Every # batch for printing "
"train cost.")
add_arg('save_epoch', int, 10, "# Every # batch for save checkpoint and modle params ")
add_arg('num_samples', int, 10000, "The num of train samples.")
add_arg('learning_rate', float, 5e-4, "Learning rate.")
add_arg('max_duration', float, 27.0, "Longest audio duration allowed.")
add_arg('min_duration', float, 0.0, "Shortest audio duration allowed.")
add_arg('test_off', bool, False, "Turn off testing.")
add_arg('use_sortagrad', bool, True, "Use SortaGrad or not.")
add_arg('use_gpu', bool, True, "Use GPU or not.")
add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
add_arg('is_local', bool, True, "Use pserver or not.")
add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
"bi-directional RNNs. Not for GRU.")
add_arg('init_from_pretrained_model',str,
None,
"If None, the training starts from scratch, "
"otherwise, it resumes from the pre-trained model.")
add_arg('train_manifest', str,
'data/librispeech/manifest.train',
"Filepath of train manifest.")
add_arg('dev_manifest', str,
'data/librispeech/manifest.dev-clean',
"Filepath of validation manifest.")
add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz',
"Filepath of normalizer's mean & std.")
add_arg('vocab_path', str,
'data/librispeech/vocab.txt',
"Filepath of vocabulary.")
add_arg('output_model_dir', str,
"./checkpoints/libri",
"Directory for saving checkpoints.")
add_arg('augment_conf_path',str,
'conf/augmentation.config',
"Filepath of augmentation configuration file (json-format).")
add_arg('specgram_type', str,
'linear',
"Audio feature type. Options: linear, mfcc.",
choices=['linear', 'mfcc'])
add_arg('shuffle_method', str,
'batch_shuffle_clipped',
"Shuffle method.",
choices=['instance_shuffle', 'batch_shuffle', 'batch_shuffle_clipped'])
# yapf: disable
args = parser.parse_args()
def train():
"""DeepSpeech2 training."""
# check if set use_gpu=True in paddlepaddle cpu version
check_cuda(args.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
if args.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
train_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config=io.open(args.augment_conf_path, mode='r', encoding='utf8').read(),
max_duration=args.max_duration,
min_duration=args.min_duration,
specgram_type=args.specgram_type,
place=place)
dev_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config="{}",
specgram_type=args.specgram_type,
place = place)
train_batch_reader = train_generator.batch_reader_creator(
manifest_path=args.train_manifest,
batch_size=args.batch_size,
sortagrad=args.use_sortagrad if args.init_from_pretrained_model is None else False,
shuffle_method=args.shuffle_method)
dev_batch_reader = dev_generator.batch_reader_creator(
manifest_path=args.dev_manifest,
batch_size=args.batch_size,
sortagrad=False,
shuffle_method=None)
ds2_model = DeepSpeech2Model(
vocab_size=train_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_layer_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights,
place=place,
init_from_pretrained_model=args.init_from_pretrained_model,
output_model_dir=args.output_model_dir)
ds2_model.train(
train_batch_reader=train_batch_reader,
dev_batch_reader=dev_batch_reader,
feeding_dict=train_generator.feeding,
learning_rate=args.learning_rate,
gradient_clipping=400,
batch_size=args.batch_size,
num_samples=args.num_samples,
num_epoch=args.num_epoch,
save_epoch=args.save_epoch,
num_iterations_print=args.num_iter_print,
test_off=args.test_off)
def main():
print_arguments(args)
train()
if __name__ == '__main__':
main()
| <filename>train.py
"""Trainer for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import io
from model_utils.model import DeepSpeech2Model
from model_utils.model_check import check_cuda, check_version
from data_utils.data import DataGenerator
from utils.utility import add_arguments, print_arguments
import paddle.fluid as fluid
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('num_epoch', int, 200, "# of training epochs.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
add_arg('num_iter_print', int, 100, "Every # batch for printing "
"train cost.")
add_arg('save_epoch', int, 10, "# Every # batch for save checkpoint and modle params ")
add_arg('num_samples', int, 10000, "The num of train samples.")
add_arg('learning_rate', float, 5e-4, "Learning rate.")
add_arg('max_duration', float, 27.0, "Longest audio duration allowed.")
add_arg('min_duration', float, 0.0, "Shortest audio duration allowed.")
add_arg('test_off', bool, False, "Turn off testing.")
add_arg('use_sortagrad', bool, True, "Use SortaGrad or not.")
add_arg('use_gpu', bool, True, "Use GPU or not.")
add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
add_arg('is_local', bool, True, "Use pserver or not.")
add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
"bi-directional RNNs. Not for GRU.")
add_arg('init_from_pretrained_model',str,
None,
"If None, the training starts from scratch, "
"otherwise, it resumes from the pre-trained model.")
add_arg('train_manifest', str,
'data/librispeech/manifest.train',
"Filepath of train manifest.")
add_arg('dev_manifest', str,
'data/librispeech/manifest.dev-clean',
"Filepath of validation manifest.")
add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz',
"Filepath of normalizer's mean & std.")
add_arg('vocab_path', str,
'data/librispeech/vocab.txt',
"Filepath of vocabulary.")
add_arg('output_model_dir', str,
"./checkpoints/libri",
"Directory for saving checkpoints.")
add_arg('augment_conf_path',str,
'conf/augmentation.config',
"Filepath of augmentation configuration file (json-format).")
add_arg('specgram_type', str,
'linear',
"Audio feature type. Options: linear, mfcc.",
choices=['linear', 'mfcc'])
add_arg('shuffle_method', str,
'batch_shuffle_clipped',
"Shuffle method.",
choices=['instance_shuffle', 'batch_shuffle', 'batch_shuffle_clipped'])
# yapf: disable
args = parser.parse_args()
def train():
"""DeepSpeech2 training."""
# check if set use_gpu=True in paddlepaddle cpu version
check_cuda(args.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
if args.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
train_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config=io.open(args.augment_conf_path, mode='r', encoding='utf8').read(),
max_duration=args.max_duration,
min_duration=args.min_duration,
specgram_type=args.specgram_type,
place=place)
dev_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config="{}",
specgram_type=args.specgram_type,
place = place)
train_batch_reader = train_generator.batch_reader_creator(
manifest_path=args.train_manifest,
batch_size=args.batch_size,
sortagrad=args.use_sortagrad if args.init_from_pretrained_model is None else False,
shuffle_method=args.shuffle_method)
dev_batch_reader = dev_generator.batch_reader_creator(
manifest_path=args.dev_manifest,
batch_size=args.batch_size,
sortagrad=False,
shuffle_method=None)
ds2_model = DeepSpeech2Model(
vocab_size=train_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_layer_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights,
place=place,
init_from_pretrained_model=args.init_from_pretrained_model,
output_model_dir=args.output_model_dir)
ds2_model.train(
train_batch_reader=train_batch_reader,
dev_batch_reader=dev_batch_reader,
feeding_dict=train_generator.feeding,
learning_rate=args.learning_rate,
gradient_clipping=400,
batch_size=args.batch_size,
num_samples=args.num_samples,
num_epoch=args.num_epoch,
save_epoch=args.save_epoch,
num_iterations_print=args.num_iter_print,
test_off=args.test_off)
def main():
print_arguments(args)
train()
if __name__ == '__main__':
main()
| en | 0.637891 | Trainer for DeepSpeech2 model. # yapf: disable # batch for printing " # batch for save checkpoint and modle params ") # yapf: disable DeepSpeech2 training. # check if set use_gpu=True in paddlepaddle cpu version # check if paddlepaddle version is satisfied | 2.515664 | 3 |
tests/template_tests/filter_tests/test_truncatewords_html.py | Fak3/django | 19 | 6630154 | from django.template.defaultfilters import truncatewords_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')
def test_truncate(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 2),
'<p>one <a href="#">two …</a></p>',
)
def test_truncate2(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 4),
'<p>one <a href="#">two - three <br>four …</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 5),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate4(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatewords_html('\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m …')
def test_truncate_complex(self):
self.assertEqual(
truncatewords_html('<i>Buenos días! ¿Cómo está?</i>', 3),
'<i>Buenos días! ¿Cómo …</i>',
)
def test_invalid_arg(self):
self.assertEqual(truncatewords_html('<p>string</p>', 'a'), '<p>string</p>')
| from django.template.defaultfilters import truncatewords_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')
def test_truncate(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 2),
'<p>one <a href="#">two …</a></p>',
)
def test_truncate2(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 4),
'<p>one <a href="#">two - three <br>four …</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 5),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate4(self):
self.assertEqual(
truncatewords_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatewords_html('\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m …')
def test_truncate_complex(self):
self.assertEqual(
truncatewords_html('<i>Buenos días! ¿Cómo está?</i>', 3),
'<i>Buenos días! ¿Cómo …</i>',
)
def test_invalid_arg(self):
self.assertEqual(truncatewords_html('<p>string</p>', 'a'), '<p>string</p>')
| fr | 0.235888 | #x00bf;Cómo está?</i>', 3), #x00bf;Cómo …</i>', | 2.469225 | 2 |
src/lobbyist/library/error.py | chpatton013/lobbyist | 0 | 6630155 | <filename>src/lobbyist/library/error.py
from typing import Any, Dict, Set, Tuple
class HttpError(Exception):
def __init__(
self,
code: int,
name: str,
description: str,
context: Dict[str, str] = {},
):
super().__init__(code, description)
self.code = code
self.name = name
self.description = description
self.context = context
def into_response(self) -> Tuple[Dict[str, Any], int]:
payload = {}
if self.name:
payload["error"] = self.name
if self.description:
payload["error_description"] = self.description
if self.context:
payload["error_context"] = self.context
return (payload, self.code)
class ClientError(HttpError):
pass
class BadRequestError(ClientError):
def __init__(self, description: str, **context):
super().__init__(400, "invalid_request", description, context)
class UnauthorizedError(ClientError):
def __init__(self, description: str):
super().__init__(401, "invalid_client", description)
class ForbiddenError(ClientError):
def __init__(self, description: str):
super().__init__(403, "", description)
class NotFoundError(ClientError):
def __init__(self, description: str):
super().__init__(404, "", description)
class NotAcceptableError(ClientError):
def __init__(self, **context):
super().__init__(406, "", "cannot meet accept constraints", context)
class ConflictError(ClientError):
def __init__(self, **context):
super().__init__(409, "", "integrity constraint failure", context)
| <filename>src/lobbyist/library/error.py
from typing import Any, Dict, Set, Tuple
class HttpError(Exception):
def __init__(
self,
code: int,
name: str,
description: str,
context: Dict[str, str] = {},
):
super().__init__(code, description)
self.code = code
self.name = name
self.description = description
self.context = context
def into_response(self) -> Tuple[Dict[str, Any], int]:
payload = {}
if self.name:
payload["error"] = self.name
if self.description:
payload["error_description"] = self.description
if self.context:
payload["error_context"] = self.context
return (payload, self.code)
class ClientError(HttpError):
pass
class BadRequestError(ClientError):
def __init__(self, description: str, **context):
super().__init__(400, "invalid_request", description, context)
class UnauthorizedError(ClientError):
def __init__(self, description: str):
super().__init__(401, "invalid_client", description)
class ForbiddenError(ClientError):
def __init__(self, description: str):
super().__init__(403, "", description)
class NotFoundError(ClientError):
def __init__(self, description: str):
super().__init__(404, "", description)
class NotAcceptableError(ClientError):
def __init__(self, **context):
super().__init__(406, "", "cannot meet accept constraints", context)
class ConflictError(ClientError):
def __init__(self, **context):
super().__init__(409, "", "integrity constraint failure", context)
| none | 1 | 2.472145 | 2 |
|
setup.py | ryonsherman/noise | 0 | 6630156 | #!/usr/bin/env python2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2014-2015, <NAME>"
__license__ = "MIT"
__version__ = "1.0" # setup version
import os, sys
from setuptools import setup
# get app version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
from noise import __version__ as _version_ # app version
# perform setup
setup(
name="Noise",
version=_version_,
url="https://github.com/ryonsherman/noise",
description="A static webpage generator",
#long_description=open('README.md').read(),
packages=['noise'],
package_dir={'noise': 'src/noise'},
#install_requires=[],
entry_points={'console_scripts': [
'noise-cmd = noise:main'
]}
)
| #!/usr/bin/env python2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2014-2015, <NAME>"
__license__ = "MIT"
__version__ = "1.0" # setup version
import os, sys
from setuptools import setup
# get app version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
from noise import __version__ as _version_ # app version
# perform setup
setup(
name="Noise",
version=_version_,
url="https://github.com/ryonsherman/noise",
description="A static webpage generator",
#long_description=open('README.md').read(),
packages=['noise'],
package_dir={'noise': 'src/noise'},
#install_requires=[],
entry_points={'console_scripts': [
'noise-cmd = noise:main'
]}
)
| en | 0.337867 | #!/usr/bin/env python2 # setup version # get app version # app version # perform setup #long_description=open('README.md').read(), #install_requires=[], | 1.617235 | 2 |
website/events/ssr_views.py | abecede753/trax | 0 | 6630157 | import random
import datetime
from django.conf import settings
from django.contrib import messages
from django import forms
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.http import JsonResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.shortcuts import get_object_or_404, render
from django.views.decorators.cache import never_cache
from django.views.generic import CreateView, DetailView
from trax.choices import RACE_STATES
from tracks.models import Laptime
from vehicles.models import Vehicle
from .models import StaggeredStartRace, SSRParticipation
from .utils import get_user_car_list
class SSRCreateForm(forms.ModelForm):
class Meta:
model = StaggeredStartRace
fields = ['track', 'laps', 'algorithm', 'vehicle_class', 'comment']
@method_decorator(login_required, name='dispatch')
class StaggeredStartRaceCreator(CreateView):
model = StaggeredStartRace
form_class = SSRCreateForm
def get(self, request, *a, **k):
raise Http404()
def form_valid(self, form):
form.instance.host = self.request.user
form.instance.save()
form.instance.update_json()
return super(StaggeredStartRaceCreator, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
@method_decorator(never_cache, name='dispatch')
class StaggeredStartRaceDetail(DetailView):
model = StaggeredStartRace
def get_context_data(self, **kwargs):
context = super(StaggeredStartRaceDetail,
self).get_context_data(**kwargs)
url = settings.SERVER_NAME
url += reverse('staggeredstartrace_detail',
args=(self.object.pk,))
context['invite_url'] = url
context['vehicle_list'] = get_user_car_list(
user=self.request.user,
vehicle_class=self.object.vehicle_class, )
return context
def get(self, *a, **k):
obj = self.get_object()
if obj.status == RACE_STATES.planning:
obj.status = RACE_STATES.initializing
obj.save()
SSRParticipation.objects.get_or_create(player=self.request.user,
staggeredstartrace=obj)
obj.update_json()
if self.request.GET.get('start_in_secs'):
nowplus = random.randrange(0, 6)
nowplus += int(self.request.GET.get('start_in_secs'))
start_timestamp = datetime.datetime.now() + \
datetime.timedelta(milliseconds=nowplus * 1000)
self.object = self.get_object()
self.object.start_timestamp = start_timestamp
self.object.status = RACE_STATES.running
overtake_deficit = None
try:
overtake_deficit = int(self.request.GET.get('per_overtake_deficit_millis'))
except:
pass
self.object.per_overtake_deficit_millis = overtake_deficit
self.object.save()
self.calculate_players_start_timestamps()
self.object.update_json()
return super(StaggeredStartRaceDetail, self).get(*a, **k)
def post(self, *a, **k):
if self.request.POST.get('personal_laptime'):
timestr = self.request.POST.get('personal_laptime')
if timestr != '-1':
minu, seco = timestr.split(':')
millis = int(minu) * 60 * 1000 + float(seco) * 1000
event = self.get_object()
particip = SSRParticipation.objects.get(
staggeredstartrace=event,
player__username=self.request.user.username)
lt = Laptime(
track=event.track,
player=self.request.user,
recorded=datetime.date.today(),
vehicle=particip.vehicle,
millis=millis,
millis_per_km=millis / event.track.route_length_km,
comment='participation in a staggered start race',
)
lt.save()
particip.laptime = lt
particip.save()
messages.add_message(
self.request, messages.SUCCESS,
'Thanks! (a nicer page will follow later. Maybe.)')
return render(self.request,
'events/staggeredstartrace_wait_for_more.html',
)
return super(StaggeredStartRaceDetail, self).get(*a, **k)
def calculate_players_start_timestamps(self):
parts = list(self.object.ssrparticipation_set.all().order_by(
'-estimated_laptime').exclude(estimated_laptime__isnull=True))
for p in parts:
p.estimated_net_millis = p.estimated_laptime * self.object.laps
total_millis = parts[0].estimated_net_millis
previous_racestart_dt = this_racestart_dt = self.object.start_timestamp
raceend_dt = self.object.start_timestamp + datetime.timedelta(
milliseconds=total_millis)
for idx, p in enumerate(parts):
this_racestart_dt = max(
raceend_dt -datetime.timedelta(
milliseconds=p.estimated_net_millis) - datetime.timedelta(
milliseconds=self.object.per_overtake_deficit_millis * idx),
previous_racestart_dt
)
p.start_timestamp = this_racestart_dt
p.save()
previous_racestart_dt = this_racestart_dt
def participants_list(request, pk=None):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
result = []
for lt in ssr.ssrparticipation_set.all():
vehicle = str(lt.vehicle)
result.append([lt.player.username, vehicle])
return JsonResponse({'data': result})
class ParticipationForm(forms.ModelForm):
class Meta:
model = Laptime
fields = ['vehicle',]
@login_required
def enlist(request, pk, vehicle_pk):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
vehicle = Vehicle.objects.get(pk=vehicle_pk)
multiplier = 1.0
if ssr.algorithm == 'PF':
multiplier = request.user.defaultspeedmultiplier
if ssr.algorithm == 'SA':
multiplier = max(1.0, request.user.defaultspeedmultiplier)
estimated_laptime = (vehicle.cc_millis_per_km *
ssr.track.route_length_km *
multiplier)
defaults = {'vehicle': vehicle,
'estimated_laptime': estimated_laptime}
participation, created = SSRParticipation.objects.get_or_create(
player=request.user, staggeredstartrace=ssr, defaults=defaults)
if not created:
participation.vehicle = vehicle
participation.estimated_laptime = estimated_laptime
participation.save()
ssr.update_json()
return JsonResponse({'result': 'OK'})
@login_required
def announce(request, pk):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
SSRParticipation.objects.get_or_create(
player=request.user, staggeredstartrace=ssr)
ssr.update_json()
return JsonResponse({'result': 'OK'})
@login_required
def check_for_newer_ssr(request, pk):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
newer = StaggeredStartRace.objects.filter(pk__gt=pk, host=ssr.host)
if newer:
return JsonResponse({'result': newer[0].get_absolute_url()})
return JsonResponse({'result': None})
@method_decorator(login_required, name='dispatch')
class StaggeredStartRaceStatus(DetailView):
model = StaggeredStartRace
def get(self, *a, **k):
obj = self.get_object()
players = []
for s in obj.ssrparticipation_set.all().order_by('player__username'):
if s.vehicle:
vehicle_name = s.vehicle.name
else:
vehicle_name = ''
players.append({'username': s.player.username,
'pk': s.player.pk,
'vehicle': vehicle_name,
'start_time': 0})
return JsonResponse({'result': self.get_object().status,
'players': s})
def get_players(self):
s = self.get_object()
return s.ssrparticipation_set.all()[0].player.username
| import random
import datetime
from django.conf import settings
from django.contrib import messages
from django import forms
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.http import JsonResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.shortcuts import get_object_or_404, render
from django.views.decorators.cache import never_cache
from django.views.generic import CreateView, DetailView
from trax.choices import RACE_STATES
from tracks.models import Laptime
from vehicles.models import Vehicle
from .models import StaggeredStartRace, SSRParticipation
from .utils import get_user_car_list
class SSRCreateForm(forms.ModelForm):
class Meta:
model = StaggeredStartRace
fields = ['track', 'laps', 'algorithm', 'vehicle_class', 'comment']
@method_decorator(login_required, name='dispatch')
class StaggeredStartRaceCreator(CreateView):
model = StaggeredStartRace
form_class = SSRCreateForm
def get(self, request, *a, **k):
raise Http404()
def form_valid(self, form):
form.instance.host = self.request.user
form.instance.save()
form.instance.update_json()
return super(StaggeredStartRaceCreator, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
@method_decorator(never_cache, name='dispatch')
class StaggeredStartRaceDetail(DetailView):
model = StaggeredStartRace
def get_context_data(self, **kwargs):
context = super(StaggeredStartRaceDetail,
self).get_context_data(**kwargs)
url = settings.SERVER_NAME
url += reverse('staggeredstartrace_detail',
args=(self.object.pk,))
context['invite_url'] = url
context['vehicle_list'] = get_user_car_list(
user=self.request.user,
vehicle_class=self.object.vehicle_class, )
return context
def get(self, *a, **k):
obj = self.get_object()
if obj.status == RACE_STATES.planning:
obj.status = RACE_STATES.initializing
obj.save()
SSRParticipation.objects.get_or_create(player=self.request.user,
staggeredstartrace=obj)
obj.update_json()
if self.request.GET.get('start_in_secs'):
nowplus = random.randrange(0, 6)
nowplus += int(self.request.GET.get('start_in_secs'))
start_timestamp = datetime.datetime.now() + \
datetime.timedelta(milliseconds=nowplus * 1000)
self.object = self.get_object()
self.object.start_timestamp = start_timestamp
self.object.status = RACE_STATES.running
overtake_deficit = None
try:
overtake_deficit = int(self.request.GET.get('per_overtake_deficit_millis'))
except:
pass
self.object.per_overtake_deficit_millis = overtake_deficit
self.object.save()
self.calculate_players_start_timestamps()
self.object.update_json()
return super(StaggeredStartRaceDetail, self).get(*a, **k)
def post(self, *a, **k):
if self.request.POST.get('personal_laptime'):
timestr = self.request.POST.get('personal_laptime')
if timestr != '-1':
minu, seco = timestr.split(':')
millis = int(minu) * 60 * 1000 + float(seco) * 1000
event = self.get_object()
particip = SSRParticipation.objects.get(
staggeredstartrace=event,
player__username=self.request.user.username)
lt = Laptime(
track=event.track,
player=self.request.user,
recorded=datetime.date.today(),
vehicle=particip.vehicle,
millis=millis,
millis_per_km=millis / event.track.route_length_km,
comment='participation in a staggered start race',
)
lt.save()
particip.laptime = lt
particip.save()
messages.add_message(
self.request, messages.SUCCESS,
'Thanks! (a nicer page will follow later. Maybe.)')
return render(self.request,
'events/staggeredstartrace_wait_for_more.html',
)
return super(StaggeredStartRaceDetail, self).get(*a, **k)
def calculate_players_start_timestamps(self):
parts = list(self.object.ssrparticipation_set.all().order_by(
'-estimated_laptime').exclude(estimated_laptime__isnull=True))
for p in parts:
p.estimated_net_millis = p.estimated_laptime * self.object.laps
total_millis = parts[0].estimated_net_millis
previous_racestart_dt = this_racestart_dt = self.object.start_timestamp
raceend_dt = self.object.start_timestamp + datetime.timedelta(
milliseconds=total_millis)
for idx, p in enumerate(parts):
this_racestart_dt = max(
raceend_dt -datetime.timedelta(
milliseconds=p.estimated_net_millis) - datetime.timedelta(
milliseconds=self.object.per_overtake_deficit_millis * idx),
previous_racestart_dt
)
p.start_timestamp = this_racestart_dt
p.save()
previous_racestart_dt = this_racestart_dt
def participants_list(request, pk=None):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
result = []
for lt in ssr.ssrparticipation_set.all():
vehicle = str(lt.vehicle)
result.append([lt.player.username, vehicle])
return JsonResponse({'data': result})
class ParticipationForm(forms.ModelForm):
class Meta:
model = Laptime
fields = ['vehicle',]
@login_required
def enlist(request, pk, vehicle_pk):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
vehicle = Vehicle.objects.get(pk=vehicle_pk)
multiplier = 1.0
if ssr.algorithm == 'PF':
multiplier = request.user.defaultspeedmultiplier
if ssr.algorithm == 'SA':
multiplier = max(1.0, request.user.defaultspeedmultiplier)
estimated_laptime = (vehicle.cc_millis_per_km *
ssr.track.route_length_km *
multiplier)
defaults = {'vehicle': vehicle,
'estimated_laptime': estimated_laptime}
participation, created = SSRParticipation.objects.get_or_create(
player=request.user, staggeredstartrace=ssr, defaults=defaults)
if not created:
participation.vehicle = vehicle
participation.estimated_laptime = estimated_laptime
participation.save()
ssr.update_json()
return JsonResponse({'result': 'OK'})
@login_required
def announce(request, pk):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
SSRParticipation.objects.get_or_create(
player=request.user, staggeredstartrace=ssr)
ssr.update_json()
return JsonResponse({'result': 'OK'})
@login_required
def check_for_newer_ssr(request, pk):
ssr = get_object_or_404(StaggeredStartRace, pk=pk)
newer = StaggeredStartRace.objects.filter(pk__gt=pk, host=ssr.host)
if newer:
return JsonResponse({'result': newer[0].get_absolute_url()})
return JsonResponse({'result': None})
@method_decorator(login_required, name='dispatch')
class StaggeredStartRaceStatus(DetailView):
model = StaggeredStartRace
def get(self, *a, **k):
obj = self.get_object()
players = []
for s in obj.ssrparticipation_set.all().order_by('player__username'):
if s.vehicle:
vehicle_name = s.vehicle.name
else:
vehicle_name = ''
players.append({'username': s.player.username,
'pk': s.player.pk,
'vehicle': vehicle_name,
'start_time': 0})
return JsonResponse({'result': self.get_object().status,
'players': s})
def get_players(self):
s = self.get_object()
return s.ssrparticipation_set.all()[0].player.username
| none | 1 | 1.994246 | 2 |
|
ohlc/__main__.py | ubunatic/ohlc | 29 | 6630158 | <gh_stars>10-100
from ohlc.candles.app import main
main()
| from ohlc.candles.app import main
main() | none | 1 | 1.040327 | 1 |
|
dltb/base/image.py | Petr-By/qtpyvis | 3 | 6630159 | """Defintion of abstract classes for image handling.
The central data structure is :py:class:`Image`, a subclass of
:py:class:`Data`, specialized to work with images. It provides,
for example, properties like size and channels.
Relation to other `image` modules in the Deep Learning ToolBox:
* :py:mod:`dltb.util.image`: This defines general functions for image I/O and
basic image operations. That module should be standalone, not
(directly) requiring other parts of the toolbox (besides util) or
third party modules (besides numpy). However, implementation for
the interfaces defined there are provided by third party modules,
which are automagically loaded if needed.
* :py:mod:`dltb.tool.image`: Extension of the :py:class:`Tool` API to provide
a class :py:class:`ImageTool` which can work on `Image` data objects.
So that module obviously depends on :py:mod:``dltb.base.image` and
it may make use of functionality provided by :py:mod:`dltb.util.image`.
"""
# standard imports
from typing import Union, List, Tuple, Dict, Any, Optional, Iterable
from abc import abstractmethod, ABC
from collections import namedtuple
from enum import Enum
from pathlib import Path
import threading
import logging
import time
import math
# third-party imports
import numpy as np
# toolbox imports
from .observer import Observable
from .data import Data, DataDict, BatchDataItem
from .implementation import Implementable
from ..util.error import handle_exception
# logging
LOG = logging.getLogger(__name__)
# FIXME[todo]: create an interface to work with different image/data formats
# (as started in dltb.thirdparty.pil)
# * add a way to specify the default format for reading images
# - in dltb.util.image.imread(format='pil')
# - for Imagesources
# * add on the fly conversion for Data objects, e.g.
# data.pil should
# - check if property pil already exists
# - if not: invoke Image.as_pil(data)
# - store the result as property data.pil
# - return it
# * this method could be extended:
# - just store filename and load on demand
# - compute size on demand
#
# Imagelike is intended to be everything that can be used as
# an image.
#
# np.ndarray:
# The raw image data
# str:
# A URL.
Imagelike = Union[np.ndarray, str, Path]
Sizelike = Union[Tuple[int, int], List[int], str]
class Size(namedtuple('Size', ['width', 'height'])):
def __new__(cls, size, *args):
"""Allow to instantiate size from any `Sizeable` objects and
also from a pair of arguments.
"""
if isinstance(size, Size):
return size
if args:
return super().__new__(cls, size, *args)
if isinstance(size, str):
separator = next((sep for sep in size if sep in ",x"), None)
size = ((int(number) for number in size.split(separator))
if separator else int(size))
elif isinstance(size, float):
size = int(size)
if isinstance(size, int):
return super().__new__(cls, size, size)
return super().__new__(cls, *size)
def __eq__(self, size: Sizelike) -> bool:
"""Allow to compare `Size` to any `Sizeable` objects.
"""
return super().__eq__(Size(size))
Sizelike = Union[Sizelike, Size]
class Colorspace(Enum):
"""Enumeration of potential colorspace for representing images.
"""
RGB = 1
BGR = 2
HSV = 3
class Format:
# pylint: disable=too-few-public-methods
"""Data structure for representing image format. This includes
the datatype of the image, colorspace, and min and max values.
It may also include an image size.
"""
dtype = np.uint8
colorspace = Colorspace.RGB
_min_value = None
_max_value = None
size: Optional[Size] = None
@property
def min_value(self) -> Union[int, float]:
"""The minimal possible pixel value in an image.
"""
if self._min_value is not None:
return self._min_value
if issubclass(self.dtype, (int, np.integer)):
return 0
return 0.0
@property
def max_value(self) -> Union[int, float]:
"""The minimal possible pixel value in an image.
"""
if self._max_value is not None:
return self._max_value
if issubclass(self.dtype, (int, np.integer)):
return 255
return 1.0
class Image(DataDict):
"""A collection of image related functions.
"""
converters = {
'array': [
(np.ndarray, lambda array, copy: (array, copy)),
(Data, lambda data, copy: (data.array, copy)),
(BatchDataItem, lambda data, copy: (data.array, copy))
],
'image': [
(np.ndarray, Data)
]
}
@classmethod
def add_converter(cls, source: type, converter,
target: str = 'image') -> None:
"""Register a new image converter. An image converter is
a function, that can convert a given image into another
format.
Arguments
---------
source:
The input type of the converter, that is the type of
its first argument of the `convert` function.
convert:
The actual converter function. This function takes two
arguments: `image` is the image to convert and `bool` is
a flag indicating if the image data should be copied.
target:
The output format. This can be `image` (the converter
produces an instance of `Image`) or `array` (a numpy array),
or another string identifying a third party format, if
available.
"""
# FIXME[todo]: make this more flexible, use introspection,
# get rid off the copy parameter, deal with other arguments
if target not in cls.converters:
cls.converters[target] = [(source, converter)]
else:
cls.converters[target].append((source, converter))
@classmethod
def supported_formats(cls) -> Iterable[str]:
"""The names of supported image formats.
"""
return cls.converters.keys()
@classmethod
def as_array(cls, image: Imagelike, copy: bool = False,
dtype: Optional[type] = None,
colorspace: Colorspace = None) -> np.ndarray:
"""Get image-like object as numpy array. This may
act as the identity function in case `image` is already
an array, or it may extract the relevant property, or
it may even load an image from a filename.
Arguments
---------
image: Imagelike
An image like object to turn into an array.
copy: bool
A flag indicating if the data should be copied or
if the original data is to be returned (if possible).
dtype:
Numpy datatype, e.g., numpy.float32.
colorspace: Colorspace
The colorspace in which the pixels in the resulting
array are encoded. If no colorspace is given, or
if the colorspace of the input image Image is unknown,
no color conversion is performed.
"""
for source_class, converter in cls.converters['array']:
if isinstance(image, source_class):
LOG.debug("Using image converter for type %s (copy=%s)",
type(image), copy)
image, copy = converter(image, copy)
break
else:
if isinstance(image, Path):
image = str(image)
if isinstance(image, str):
# FIXME[hack]: local imports to avoid circular module
# dependencies ...
# pylint: disable=import-outside-toplevel
from dltb.util.image import imread
LOG.debug("Loading image '%s' using imread.", image)
image, copy = imread(image), False
else:
raise NotImplementedError(f"Conversion of "
f"{type(image).__module__}"
f".{type(image).__name__} to "
"numpy.ndarray is not implemented")
LOG.debug("Obtained image of shape %s, dtype=%s.",
image.shape, image.dtype)
if colorspace == Colorspace.RGB:
if len(image.shape) == 2: # grayscale image
rgb = np.empty(image.shape + (3,), dtype=image.dtype)
rgb[:, :, :] = image[:, :, np.newaxis]
image = rgb
copy = False
elif len(image.shape) == 3 and image.shape[2] == 4: # RGBD
image = image[:, :, :3]
if dtype is not None and dtype != image.dtype:
image = image.astype(dtype) # /256.
copy = False
if copy:
image = image.copy()
LOG.debug("Returning image of shape %s, dtype=%s.",
image.shape, image.dtype)
return image
@staticmethod
def as_data(image: Imagelike, copy: bool = False) -> 'Data':
"""Get image-like objec as :py:class:`Data` object.
"""
if isinstance(image, Data) and not copy:
return image
data = Image(image, copy=copy)
if isinstance(image, str):
data.add_attribute('url', image)
return data
@classmethod
def as_shape(cls, image: Imagelike) -> Tuple[int]:
if isinstance(image, np.ndarray):
return image.shape
if isinstance(image, Image):
return image.array.shape
raise TypeError(f"Cannot determine shape of {type(image)}")
def __new__(cls, image: Imagelike = None, array: np.ndarray = None,
copy: bool = False, **kwargs) -> None:
if isinstance(image, Image) and not copy:
return image # just reuse the given Image instance
return super().__new__(cls, image, array, copy, **kwargs)
def __init__(self, image: Imagelike = None, array: np.ndarray = None,
copy: bool = False, **kwargs) -> None:
# FIXME[todo]: it would be good to have the possibility to
# indicate desired attributes, e.g. 'array', 'pil', that
# should be filled during initialization.
if isinstance(image, Image) and not copy:
return # just reuse the given Image instance
try:
if image is not None:
array = self.as_array(image, copy=copy)
finally:
# make sure super().__init__() is called even if
# preparing the array fails. If ommitted, the object may
# be in an incomplete state, causing problems at destruction.
super().__init__(array=array, **kwargs)
if isinstance(image, str):
self.add_attribute('filename', image)
self.add_attribute('shape', array.shape)
def visualize(self, size=None) -> np.ndarray:
"""Provide a visualization of this image. The may be simply
the image (in case of a single image)
In case of a batch, it can be an image galery.
"""
if not self.is_batch:
return self.array
# self is a batch of images: create a matrix showing all images.
rows = int(math.sqrt(len(self)))
columns = math.ceil(len(self) / rows)
from ..util.image import imresize
if size is None:
size = (self[0].shape[1], self[0].shape[0])
matrix = np.zeros((size[1]*rows, size[0]*columns, 3),
dtype=self[0].array.dtype)
for idx, image in enumerate(self):
column = idx % columns
row = idx // columns
image = imresize(image.array, size)
if image.ndim == 2:
image = np.expand_dims(image, axis=2).repeat(3, axis=2)
matrix[row*size[1]:(row+1)*size[1],
column*size[0]:(column+1)*size[0]] = image
return matrix
def size(self) -> Size:
"""The size of this image.
"""
if self.has_attribute('array'):
return Size(*self.shape[1::-1])
class ImageAdapter(ABC):
"""If an object is an ImageAdapter, it can adapt images to
some internal representation. It has to implement the
:py:class:`image_to_internal` and :py:class:`internal_to_image`
methods. Such an object can then be extended to do specific
image processing.
The :py:class:`ImageAdapter` keeps a map of known
:py:class:`ImageExtension`. If a subclass of
:py:class:`ImageAdapter` also subclasses a base class of these
extensions it will be adapted to also subclass the corresponding
extension, e.g., a :py:class:`ImageAdapter` that is a `Tool` will
become an `ImageTool`, provided the mapping of `Tool` to
`ImageTool` has been registered with the `ImageAdapter` class.
Creating `ImageTool` as an :py:class:`ImageExtension` of
`base=Tool` will automatically do the registration.
"""
_image_extensions: Dict[type, type] = {}
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
for base, replacement in ImageAdapter._image_extensions.items():
if base in cls.__mro__ and replacement not in cls.__mro__:
new_bases = []
found = False
for base_class in cls.__bases__:
if base_class is base:
found = True
new_bases.append(replacement)
continue
if not found and issubclass(base_class, base):
new_bases.append(replacement)
found = True
new_bases.append(base_class)
LOG.debug("ImageAdapter.__init_subclass__(%s): %s -> %s",
cls, cls.__bases__, new_bases)
cls.__bases__ = tuple(new_bases)
def image_to_internal(self, image: Imagelike) -> np.ndarray:
"""
"""
# FIXME[hack]: batch handling
from dltb.base.data import Data
if isinstance(image, Data) and image.is_batch:
result = np.ndarray((len(image), 227, 227, 3))
for index, img in enumerate(image.array):
result[index] = self._image_to_internal(img)
return result
elif isinstance(image, list):
result = np.ndarray((len(image), 227, 227, 3))
for index, img in enumerate(image):
result[index] = self._image_to_internal(img)
return result
image = self._image_to_internal(image)
return image[np.newaxis]
@abstractmethod
def _image_to_internal(self, image: Imagelike) -> Any:
"to be implemented by subclasses"
@abstractmethod
def internal_to_image(self, data: Any) -> Imagelike:
"to be implemented by subclasses"
class ImageExtension(ImageAdapter, ABC):
# pylint: disable=abstract-method
"""An :py:class:`ImageExtension` extends some base class to be able to
process images. In that it makes use of the :py:class:`ImageAdapter`
interface.
In addition to deriving from :py:class:`ImageAdapter`, the
:py:class:`ImageExtension` introduces some "behind the scene
magic": a class `ImageTool` that is declared as an `ImageExtension`
with base `Tool` is registered with the :py:class:`ImageAdapter`
class, so that any common subclass of :py:class:`ImageAdapter`
and `Tool` will automagically become an `ImageTool`.
"""
def __init_subclass__(cls, base: type = None, **kwargs) -> None:
# pylint: disable=arguments-differ
super().__init_subclass__(**kwargs)
if base is not None:
new_bases = [ImageAdapter, base]
for base_class in cls.__bases__:
if base_class is not ImageExtension:
new_bases.append(base_class)
cls.__bases__ = tuple(new_bases)
ImageAdapter._image_extensions[base] = cls
class ImageObservable(Observable, method='image_changed',
changes={'image_changed'}):
"""A base for classes that can create and change images.
"""
@property
def image(self) -> Imagelike:
"""Provide the current image.
"""
class ImageGenerator(ImageObservable):
# pylint: disable=too-few-public-methods
"""An image :py:class:`Generator` can generate images.
"""
# FIXME[todo]: spell this out
class ImageIO:
# pylint: disable=too-few-public-methods
"""An abstract interface to read, write and display images.
"""
class ImageReader(ImageIO, Implementable):
"""An :py:class:`ImageReader` can read images from file or URL.
The :py:meth:`read` method is the central method of this class.
"""
def __str__(self) -> str:
return type(self).__module__ + '.' + type(self).__name__
def read(self, filename: str, **kwargs) -> np.ndarray:
"""Read an image from a file or URL.
"""
raise NotImplementedError(f"{self.__class__.__name__} claims to "
"be an ImageReader, but does not implement "
"the read method.")
class ImageWriter(ImageIO, Implementable):
"""An :py:class:`ImageWriter` can write iamges to files or upload them
to a given URL. The :py:meth:`write` method is the central method
of this class.
"""
def write(self, filename: str, image: Imagelike, **kwargs) -> None:
"""Write an `image` to a file with the given `filename`.
"""
raise NotImplementedError(f"{self.__class__.__name__} claims to "
"be an ImageWriter, but does not implement "
"the write method.")
class ImageResizer(Implementable):
"""FIXME[todo]: there is also the network.resize module, which may be
incorporated!
Image resizing is implemented by various libraries, using slightly
incompatible interfaces. The idea of this class is to provide a
well defined resizing behaviour, that offers most of the functionality
found in the different libraries. Subclasses can be used to map
this interface to specific libraries.
Enlarging vs. Shrinking
-----------------------
Interpolation:
* Linear, cubic, ...
* Mean value:
Cropping
--------
* location: center, random, or fixed
* boundaries: if the crop size is larger than the image: either
fill boundaries with some value or return smaller image
Parameters
----------
* size:
scipy.misc.imresize:
size : int, float or tuple
- int - Percentage of current size.
- float - Fraction of current size.
- tuple - Size of the output image.
* zoom : float or sequence, optional
in scipy.ndimage.zoom:
"The zoom factor along the axes. If a float, zoom is the same
for each axis. If a sequence, zoom should contain one value
for each axis."
* downscale=2, float, optional
in skimage.transform.pyramid_reduce
"Downscale factor.
* preserve_range:
skimage.transform.pyramid_reduce:
"Whether to keep the original range of values. Otherwise, the
input image is converted according to the conventions of
img_as_float."
* interp='nearest'
in scipy.misc.imresize:
"Interpolation to use for re-sizing
('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic')."
* order: int, optional
in scipy.ndimage.zoom, skimage.transform.pyramid_reduce:
"The order of the spline interpolation, default is 3. The
order has to be in the range 0-5."
0: Nearest-neighbor
1: Bi-linear (default)
2: Bi-quadratic
3: Bi-cubic
4: Bi-quartic
5: Bi-quintic
* mode: str, optional
in scipy.misc.imresize:
"The PIL image mode ('P', 'L', etc.) to convert arr
before resizing."
* mode: str, optional
in scipy.ndimage.zoom, skimage.transform.pyramid_reduce:
"Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest',
'reflect' or 'wrap'). Default is 'constant'"
- 'constant' (default): Pads with a constant value.
- 'reflect': Pads with the reflection of the vector mirrored
on the first and last values of the vector along each axis.
- 'nearest':
- 'wrap': Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end
values are used to pad the beginning.
* cval: scalar, optional
in scipy.ndimage.zoom, skimage.transform.pyramid_reduce:
"Value used for points outside the boundaries of the input
if mode='constant'. Default is 0.0"
* prefilter: bool, optional
in scipy.ndimage.zoom:
"The parameter prefilter determines if the input is
pre-filtered with spline_filter before interpolation
(necessary for spline interpolation of order > 1). If False,
it is assumed that the input is already filtered. Default is
True."
* sigma: float, optional
in skimage.transform.pyramid_reduce:
"Sigma for Gaussian filter. Default is 2 * downscale / 6.0
which corresponds to a filter mask twice the size of the
scale factor that covers more than 99% of the Gaussian
distribution."
Libraries providing resizing functionality
------------------------------------------
Scikit-Image:
* skimage.transform.resize:
image_resized = resize(image, (image.shape[0]//4, image.shape[1]//4),
anti_aliasing=True)
Documentation:
https://scikit-image.org/docs/dev/api/skimage.transform.html
#skimage.transform.resize
* skimage.transform.rescale:
image_rescaled = rescale(image, 0.25, anti_aliasing=False)
* skimage.transform.downscale_local_mean:
image_downscaled = downscale_local_mean(image, (4, 3))
https://scikit-image.org/docs/dev/api/skimage.transform.html
#skimage.transform.downscale_local_mean
Pillow:
* PIL.Image.resize:
OpenCV:
* cv2.resize:
cv2.resize(image,(width,height))
Mahotas:
* mahotas.imresize:
mahotas.imresize(img, nsize, order=3)
This function works in two ways: if nsize is a tuple or list of
integers, then the result will be of this size; otherwise, this
function behaves the same as mh.interpolate.zoom
* mahotas.interpolate.zoom
imutils:
* imutils.resize
Scipy (deprecated):
* scipy.misc.imresize:
The documentation of scipy.misc.imresize says that imresize is
deprecated! Use skimage.transform.resize instead. But it seems
skimage.transform.resize gives different results from
scipy.misc.imresize.
https://stackoverflow.com/questions/49374829/scipy-misc-imresize-deprecated-but-skimage-transform-resize-gives-different-resu
SciPy: scipy.misc.imresize is deprecated in SciPy 1.0.0,
and will be removed in 1.3.0. Use Pillow instead:
numpy.array(Image.fromarray(arr).resize())
* scipy.ndimage.interpolation.zoom:
* scipy.ndimage.zoom:
* skimage.transform.pyramid_reduce: Smooth and then downsample image.
"""
def resize(self, image: np.ndarray,
size: Size, **_kwargs) -> np.ndarray:
"""Resize an image to the given size.
Arguments
---------
image:
The image to be scaled.
size:
The target size.
"""
if type(self).scale is ImageResizer.scale:
raise NotImplementedError(f"{type(self)} claims to be an "
"ImageResizer, but does not implement "
"the resize method.")
image_size = image.shape[:2]
scale = (size[0]/image_size[0], size[1]/image_size[1])
return self.scale(image, scale=scale)
def scale(self, image: np.ndarray,
scale: Union[float, Tuple[float, float]],
**kwargs) -> np.ndarray:
"""Scale an image image by a given factor.
Arguments
---------
image:
The image to be scaled.
scale:
Either a single float value being the common
scale factor for horizontal and vertical direction, or
a pair of scale factors for these two axes.
"""
if type(self).resize is ImageResizer.resize:
raise NotImplementedError(f"{type(self)} claims to be an "
"ImageResizer, but does not implement "
"the scale method.")
if isinstance(scale, float):
scale = (scale, scale)
image_size = image.shape[:2]
size = Size(int(image_size[0] * scale[0]),
int(image_size[1] * scale[1]))
return self.resize(image, size=size, **kwargs)
@staticmethod
def crop(image: Imagelike, size: Size, **_kwargs) -> np.ndarray:
"""Crop an :py:class:`Image` to a given size.
If no position is provided, a center crop will be performed.
"""
# FIXME[todo]: deal with sizes extending the original size
# FIXME[todo]: allow center/random/position crop
image = Image.as_array(image)
old_size = image.shape[:2]
center = old_size[0]//2, old_size[1]//2
point1 = center[0] - size[0]//2, center[1] - size[1]//2
point2 = point1[0] + size[0], point1[1] + size[1]
return image[point1[0]:point2[0], point1[1]:point2[1]]
class ImageWarper(Implementable):
"""
"""
@staticmethod
def warp(image: Imagelike, transformation: np.ndarray,
size: Size) -> np.ndarray:
"""Warp an image by applying a transformation.
To be implemented by subclasses.
"""
@staticmethod
def compute_transformation(points: np.ndarray,
reference: np.ndarray) -> np.ndarray:
"""Obtain a tranformation for aligning key points to
reference positions
To be implemented by subclasses.
Arguments
---------
points:
A sequence of points to be mapped onto the reference points,
given as (x,y) coordinates
reference:
A sequence with the same number of points serving as reference
points to which `points` should be moved.
Result
------
transformation:
A affine transformation matrix. This is a 2x3 matrix,
allowing to compute [x',y'] = matrix * [x,y,1].
Note
----
Affine transformations are more general than similarity
transformations, which can always be decomposed into a
combination of scaling, rotating, and translating. General
affine tansformations can not be decomposed in this way.
The affine transformation matrix contains the following entries:
```
cos(theta) * s -sin(theta) * s t_x
sin(theta) * s cos(theta) * s t_y
```
with theta being the rotation angle, s the scaling factor and
t the translation.
"""
@classmethod
def align(cls, image: Imagelike, points, reference,
size: Sizelike) -> np.ndarray:
"""Align an image by applying an (affine) transformation that maps
key points to reference positions.
Arguments
---------
image:
The image to align.
points:
A sequence of points to be mapped onto the reference points,
given as (x,y) coordinates
reference:
A sequence with the same number of points serving as reference
points to which `points` should be moved.
size:
The size of the resulting image.
Result
------
aligned:
The aligned image.
"""
transformation = cls.align(points, reference)
return cls.align(image, transformation, size)
class ImageOperator:
"""An :py:class:`ImageOperator` can be applied to an image to
obtain some transformation of that image.
"""
def __call__(self, image: np.ndarray) -> np.ndarray:
"""Perform the actual operation.
"""
raise NotImplementedError(f"{self.__class__.__name__} claims to "
"be an ImageOperator, but does not "
"implement the `__call__` method.")
def transform(self, source: str, target: str) -> None:
"""Transform a source file into a target file.
"""
# FIXME[concept]: this requires the util.image module!
# pylint: disable=import-outside-toplevel
from ..util.image import imread, imwrite
imwrite(target, self(imread(source)))
def transform_data(self, image: Image,
target: str, source: str = None) -> None:
"""Apply image operator to an :py:class:`Image` data object.
"""
image.add_attribute(target, value=self(image.get_attribute(source)))
class ImageDisplay(ImageIO, Implementable, ImageGenerator.Observer):
"""An :py:class:`ImageDisplay` can display images. Typically, it will
use some graphical user interface to open a window in which the
image is displayed. It may also provide some additional controls
to addapt display properties.
Blocking and non-blocking display
---------------------------------
There are two ways how an image can be displayed. In blocking
mode the execution of the main program is paused while the image
is displayed and is only continued when the display is closed. In
non-blocking mode, the the execution of the main program is
continued while the image is displayed.
The blocking behaviour can be controlled by the `blocking`
argument. It can be set to `True` (running the GUI event loop in
the calling thread and thereby blocking it) or `False` (running
the GUI event loop in some other thread). It can also be set to
`None` (meaning that no GUI event loop is started, which is
similar to the non-blocking mode, however it will usually result
in an inresponsive display window if no additional actions are
undertaken; see the section on "GUI Event loop" below for more
information).
Ending the display
------------------
Different conditions can be set up to determine when the display
should end. The most natural one is to wait until the display
window is closed (using the standard controls of the window
system). Additionally, the display can be terminated when a key is
pressed on the keyboard or after a given amount of time.
If run in a multi-threaded setting, it is also possible to end
the display programatically, calling :py:meth:`close`.
The next question is: what should happen once the display ended?
Again the most natural way is to close the window. However,
if more images are going to be displayed it may be more suitable
to leave the window on screen an just remove the image, until the
next image is available.
GUI Event loop
--------------
An :py:class:`ImageDisplay` displays the image using some
graphical user interface (GUI). Such a GUI usually requires to
run an event loop to stay responsive, that is to react to mouse
and other actions, like resizing, closing and even repainting the
window. The event loop regularly checks if such events have
occured and processes them. Running a display without an event
loop usually results in unpleasant behaviour and hence should be
avoided.
Nevertheless, running an event loop is not always straight forward.
Different GUI libraries use different concepts. For example, some
libraries require that event loops are run in the main thread of
the application, which can not always be realized (for example, it
would not be possible to realize a non-blocking display in the
main thread). The :py:class:`ImageDisplay` provides different
means to deal with such problems.
Usage scenarios
---------------
Example 1: show an image in a window and block until the window is
closed:
>>> display = Display() # blocking=True (default)
>>> display.show(imagelike)
Example 2: show an image in a window without blocking (the event loop
for the window will be run in a separate thread):
>>> display = Display(blocking=False)
>>> display.show(imagelike)
Example 3: show an image in a window without blocking. No event loop
is started for the window and it is the caller's responsibility to
regularly call display.process_events() to keep the interface
responsive.
>>> display = Display(blocking=None)
>>> display.show(imagelike)
Example 4: show an image for five seconds duration.
After 5 seconds the display is closed.
>>> display = Display()
>>> display.show(imagelike, timeout=5.0)
Example 5: show multiple images, each for five seconds, but don't close
the window in between:
>>> with Display() as display:
>>> for image in images:
>>> display.show(image, timeout=5.0)
Example 6: presenter:
>>> def presenter(display, video):
>>> while frame in video:
>>> if display.closed:
>>> break
>>> display.show(frame)
>>>
>>> display = Display()
>>> display.present(presenter, (video,))
"""
_event_loop: Optional[threading.Thread]
def __init__(self, module: Union[str, List[str]] = None,
blocking: bool = True, **kwargs) -> None:
# pylint: disable=unused-argument
super().__init__(**kwargs)
# _opened: a flag indicating the current state of the display
# window: True = window is open (visible), False = window is closed
self._opened: bool = False
# _blocking: a flag indicating if the display should operate
# in blocking mode (True) or non-blocking mode (False).
self._blocking: bool = blocking
# _entered: a counter to for tracing how often the context manager
# is used (usually it should only be used once!)
self._entered: int = 0
# _event_loop: some Thread object, referring to the thread running the
# event loop. If None, then currently no event loop is running.
self._event_loop = None
# _presentation: a Thread object running a presentation, initiated
# by the method `present`
self._presentation: Optional[threading.Thread] = None
@property
def blocking(self) -> bool:
"""Blocking behaviour of this image :py:class:`Display`. `True` means
that an event loop is run in the calling thread and execution
of the program is blocked while showing an image, `False`
means that the event loop is executed in a background thread
while the calling thread immediately returns. `None` means
that no event loop is started. The caller is responsible for
processing events, by regurlarly calling either
:py:meth:`process_events` or :py:meth:`show` (which internally
calls :py:meth:`process_events`).
"""
return self._blocking
@blocking.setter
def blocking(self, blocking: bool) -> None:
if blocking is self._blocking:
return # nothing to do
if not self.closed:
raise RuntimeError("Cannot change blocking state of open Display.")
self._blocking = blocking
#
# context manager
#
def __enter__(self) -> 'ImageDisplay':
self._entered += 1
if self._entered > 1:
LOG.warning("Entering Display multiple times: %d", self._entered)
else:
LOG.debug("Entering Display")
self.open()
return self
def __exit__(self, _exception_type, _exception_value, _traceback) -> None:
LOG.debug("Exiting Display (%d)", self._entered)
self._entered -= 1
if self._entered == 0:
self.close()
#
# public interface
#
def show(self, image: Imagelike, blocking: bool = None, close: bool = None,
timeout: float = None, **kwargs) -> None:
"""Display the given image.
This method may optionally pause execution of the main program
to display the image, if the wait_for_key or timeout arguments
are given. If both are given, the first event that occurs
will stop pausing.
Arguments
---------
image: Imagelike
The image to display. This may be a single image or a
batch of images.
blocking: bool
A flag indicating if the image should be shown in blocking
mode (`True`) or non-blocking mode (`False`). If no value
is specified, the value of the property :py:prop:`blocking`
is used.
close: bool
A flag indicating if the display should be closed after
showing. Closing the display will also end all event
loops that are running. If no value is provided, the
display will be kept open, if it was already open when
this method is called, and it will be closed in case it
was closed before.
wait_for_key: bool
A flag indicating if the display should pause execution
and wait or a key press.
timeout: float
Time in seconds to pause execution.
"""
if self._presentation is not None:
blocking = None
else:
blocking = self._blocking if blocking is None else blocking
if close is None:
close = self.closed and (blocking is True)
# make sure the window is open
if self.closed:
if self._presentation is threading.current_thread():
raise RuntimeError("Presentation is trying to use closed "
"ImageDisplay.")
self.open()
# show the image
array = Image.as_array(image, dtype=np.uint8)
LOG.debug("Showing image of shape %s, blocking=%s, close=%s, "
"timout=%s, event loop=%s, presentation=%s",
array.shape, blocking, close, timeout,
self.event_loop_is_running(), self._presentation is not None)
self._show(array, **kwargs)
# run the event loop
if blocking is True:
if not self.event_loop_is_running() is None:
self._run_blocking_event_loop(timeout=timeout)
elif blocking is False:
if timeout is not None:
LOG.warning("Setting timeout (%f) has no effect "
" for non-blocking image Display", timeout)
if not self.event_loop_is_running():
self._run_nonblocking_event_loop()
elif blocking is None:
self._process_events()
# close the window if desired
if close:
if self._entered > 0:
LOG.warning("Closing image Display inside a context manager.")
self.close()
def present(self, presenter, args=(), kwargs={}) -> None:
# pylint: disable=dangerous-default-value
"""Run the given presenter in a background thread while
executing the GUI event loop in the calling thread (which
by some GUI library is supposed to be the main thread).
The presenter will get the display as its first argument,
and `args`, `kwargs` as additional arguments. The presenter
may update the display by calling the :py:meth:`show` method.
The presenter should observe the display's `closed` property
and finish presentation once it is set to `True`.
Arguments
---------
presenter:
A function expecting a display object as first argument
and `args`, and `kwargs` as additional arguments.
"""
def target() -> None:
# pylint: disable=broad-except
LOG.info("ImageDisplay[background]: calling presenter")
try:
presenter(self, *args, **kwargs)
except BaseException as exception:
LOG.error("Unhandled exception in presentation.")
handle_exception(exception)
finally:
self.close()
with self:
LOG.info("ImageDisplay[main]: Starting presentation")
self._presentation = threading.Thread(target=target)
self._presentation.start()
self._run_blocking_event_loop()
def open(self) -> None:
"""Open this :py:class:`ImageDisplay`.
"""
if not self._opened and self._presentation is None:
self._open()
self._opened = True
def close(self) -> None:
"""Close this :py:class:`ImageDisplay`. This should also stop
all background threads, like event loops or ongoing presentatons
"""
LOG.info("Closing ImageDisplay "
"(opened=%s, presentation=%s, event loop=%s)",
self._opened, self._presentation is not None,
self.event_loop_is_running())
if self._opened:
self._opened = False
self._close()
presentation = self._presentation
if presentation is not None:
# we have started a presentation in a background Thread and
# hence we will wait that this presentation finishes. In
# order for this to work smoothly, the presentation should
# regularly check the display.closed property and exit
# (before calling display.show) if that flag is True.
if presentation is not threading.current_thread():
presentation.join()
self._presentation = None
event_loop = self._event_loop
if isinstance(event_loop, threading.Thread):
if event_loop is not threading.current_thread():
event_loop.join()
self._event_loop = None
@property
def opened(self) -> bool:
"""Check if this image :py:class:`Display` is opened, meaning
the display window is shown and an event loop is running.
"""
return self._opened
@property
def closed(self) -> bool:
"""Check if this image :py:class:`Display` is closed, meaning
that no window is shown (and no event loop is running).
"""
return not self._opened
#
# ImageObserver
#
def image_changed(self, tool, change) -> None:
"""Implementation of the :py:class:`ImageObserver` interface.
The display will be updated if the image has changed.
"""
if change.image_changed:
self.show(tool.image)
#
# methods to be implemented by subclasses
#
def _open(self) -> None:
"""Open the display window. The function is only called if
no window is open yet.
"""
raise NotImplementedError(f"{type(self)} claims to be a ImageDisplay, "
"but does not implement an _open() method.")
def _show(self, image: np.ndarray, wait_for_key: bool = False,
timeout: float = None, **kwargs) -> None:
raise NotImplementedError(f"{type(self).__name__} claims to "
"be an ImageDisplay, but does not implement "
"the _show method.")
def _close(self) -> None:
raise NotImplementedError(f"{type(self)} claims to be a ImageDisplay, "
"but does not implement an _close() method.")
def _process_events(self) -> None:
raise NotImplementedError(f"{type(self)} claims to be a ImageDisplay, "
"but does not implement "
"_process_events().")
def _run_event_loop(self) -> None:
if self.blocking is True:
self._run_blocking_event_loop()
elif self.blocking is False:
self._run_nonblocking_event_loop()
def _dummy_event_loop(self, timeout: float = None) -> None:
# pylint: disable=broad-except
interval = 0.1
start = time.time()
try:
print("ImageDisplay: start dummy event loop. "
f"closed={self.closed}")
while (not self.closed and
(timeout is None or time.time() < start + timeout)):
self._process_events()
time.sleep(interval)
except BaseException as exception:
LOG.error("Unhandled exception in event loop")
handle_exception(exception)
finally:
LOG.info("ImageDisplay: ended dummy event loop (closed=%s).",
self.closed)
self._event_loop = None
self.close()
def _run_blocking_event_loop(self, timeout: float = None) -> None:
self._event_loop = threading.current_thread()
self._dummy_event_loop(timeout)
def _run_nonblocking_event_loop(self) -> None:
"""Start a dummy event loop. This event loop will run in the
background and regularly trigger event processing. This may be
slightly less responsive than running the official event loop,
but it has the advantage that this can be done from a background
Thread, allowing to return the main thread to the caller.
In other words: this function is intended to realize a non-blocking
image display with responsive image window.
FIXME[todo]: check how this behaves under heavy load (GPU computation)
and if in case of problems, resorting to a QThread would improve
the situation.
"""
if self.event_loop_is_running():
raise RuntimeError("Only one event loop is allowed.")
self._event_loop = \
threading.Thread(target=self._nonblocking_event_loop)
self._event_loop.start()
def _nonblocking_event_loop(self) -> None:
self._dummy_event_loop()
def event_loop_is_running(self) -> bool:
"""Check if an event loop is currently running.
"""
return self._event_loop is not None
# ------------------------------------------------------------------------
# FIXME[old/todo]: currently used by ./contrib/styletransfer.py ...
def run(self, tool):
"""Monitor the operation of a Processor. This will observe
the processor and update the display whenever new data
are available.
"""
self.observe(tool, interests=ImageGenerator.Change('image_changed'))
try:
print("Starting thread")
thread = threading.Thread(target=tool.loop)
thread.start()
# FIXME[old/todo]: run the main event loop of the GUI to get
# a responsive interface - this is probably framework
# dependent and should be realized in different subclasses
# before we can design a general API.
# Also we would need some stopping mechanism to end the
# display (by key press or buttons, but also programmatically)
# self._application.exec_()
print("Application main event loop finished")
except KeyboardInterrupt:
print("Keyboard interrupt.")
tool.stop()
thread.join()
print("Thread joined")
# FIXME[old/todo]: currently used by ./dltb/thirdparty/qt.py/dltb/thirdparty/qt.py ...
@property
def active(self) -> bool:
"""Check if this :py:class:`ImageDisplay` is active.
"""
return True # FIXME[hack]
class Location:
"""A :py:class:`Location` identifies an area in a two-dimensional
space. A typical location is a bounding box (realized by the
subclass :py:class:`BoundingBox`), but this abstract definition
also allows for alternative ways to describe a location.
"""
def __init__(self, points) -> None:
pass
def __contains__(self, point) -> bool:
"""Checks if the given point lies in this :py:class:`Location`.
To be implemented by subclasses.
"""
def mark_image(self, image: Imagelike, color=(1, 0, 0)):
"""Mark this :py:class:`Location` in some given image.
Arguments
---------
image:
"""
raise NotImplementedError(f"Location {self.__class__.__name__} "
f"does not provide a method for marking "
f"an image.")
def extract_from_image(self, image: Imagelike) -> np.ndarray:
"""Extract this :py:class:`Location` from a given image.
Arguments
---------
image:
The image from which the location is to be extracted.
"""
raise NotImplementedError(f"Location {self.__class__.__name__} "
f"does not provide a method for extraction "
f"from an image.")
def scale(self, factor: Union[float, Tuple[float, float]],
reference: str = 'origin') -> None:
"""Scale this :py:class:`location` by the given factor.
All coordinates will be multiplied by this value.
"""
raise NotImplementedError(f"Location {self.__class__.__name__} "
f"does not provide a method for scaling.")
class PointsBasedLocation:
"""A :py:class:`PointsBasedLocation` is a :py:class:`Location`
that can be described by points, like a polygon area, or more
simple: a bounding box.
Attributes
----------
_points: np.ndarray
An array of shape (n, 2), providing n points in form of (x, y)
coordinates.
"""
def __init__(self, points: np.ndarray) -> None:
super().__init__()
self._points = points
def __contains__(self, point) -> bool:
return ((self._points[:, 0].min() <= point[0] <=
self._points[:, 0].max()) and
(self._points[:, 1].min() <= point[1] <=
self._points[:, 1].max()))
def __getitem__(self, idx):
return self._points[idx]
def mark_image(self, image: np.ndarray, color=(1, 0, 0)):
"""Mark this :py:class:`PointsBasedLocation` in an image.
"""
for point in self._points:
image[max(point[1]-1, 0):min(point[1]+1, image.shape[0]),
max(point[0]-1, 0):min(point[0]+1, image.shape[1])] = color
def extract_from_image(self, image: Imagelike) -> np.ndarray:
"""Extract this :py:class:`Location` from a given image.
Arguments
---------
image:
The image from which this :py:class:`PointsBasedLocation`
is to be extracted.
"""
image = Image.as_array(image)
height, width = image.shape[:2]
point1_x, point1_y = self._points.min(axis=0)
point2_x, point2_y = self._points.max(axis=0)
point1_x, point1_y = max(0, int(point1_x)), max(0, int(point1_y))
point2_x, point2_y = \
min(width, int(point2_x)), min(height, int(point2_y))
return image[point1_y:point2_y, point1_x:point2_x]
def scale(self, factor: Union[float, Tuple[float, float]],
reference: str = 'origin') -> None:
"""Scale the :py:class:`Location`.
Arguments
---------
factor:
The scaling factor. This can either be a float, or a pair
of floats in which case the first number is the horizontal (x)
scaling factor and the second numger is the vertical (y)
scaling factor.
"""
if reference == 'origin':
reference = np.ndarray((0, 0))
elif reference == 'center':
reference = self._points.mean(axis=0)
else:
reference = np.asarray(reference)
self._points = (self._points - reference) * factor + reference
@property
def points(self) -> np.ndarray:
"""The points specifying this :py:class:`PointsBasedLocation`.
This is an array of shape (n, 2), providing n points in form of (x, y)
coordinates.
"""
return self._points
def __len__(self):
return len(self._points)
class Landmarks(PointsBasedLocation):
"""Landmarks are an ordered list of points.
"""
def __len__(self) -> int:
return 0 if self._points is None else len(self._points)
def __str__(self) -> str:
return f"Landmarks with {len(self)} points."
class BoundingBox(PointsBasedLocation):
# pylint: disable=invalid-name
"""A bounding box describes a rectangular arae in an image.
"""
def __init__(self, x1=None, y1=None, x2=None, y2=None,
x=None, y=None, width=None, height=None) -> None:
super().__init__(np.ndarray((2, 2)))
if x1 is not None:
self.x1 = x1
elif x is not None:
self.x1 = x
if y1 is not None:
self.y1 = y1
elif y is not None:
self.y1 = y
if x2 is not None:
self.x2 = x2
elif width is not None:
self.width = width
if y2 is not None:
self.y2 = y2
elif height is not None:
self.height = height
@property
def x1(self):
"""The horizontal position of the left border of this
:py:class:`BoundingBox`.
"""
return self._points[0, 0]
@x1.setter
def x1(self, x1):
self._points[0, 0] = x1
@property
def y1(self):
"""The vertical position of the upper border of this
:py:class:`BoundingBox`.
"""
return self._points[0, 1]
@y1.setter
def y1(self, y1):
self._points[0, 1] = y1
@property
def x2(self):
"""The horizontal position of the right border of this
:py:class:`BoundingBox`.
"""
return self._points[1, 0]
@x2.setter
def x2(self, x2):
self._points[1, 0] = max(x2, self.x1) # Avoid negative width
@property
def y2(self):
"""The vertical position of the lower border of this
:py:class:`BoundingBox`.
"""
return self._points[1, 1]
@y2.setter
def y2(self, y2):
self._points[1, 1] = max(y2, self.y1) # Avoid negative height
@property
def x(self):
"""The horizontal position of the left border of this
:py:class:`BoundingBox`.
"""
return self.x1
@x.setter
def x(self, x):
self.x1 = x
@property
def y(self):
"""The vertical position of the upper border of this
:py:class:`BoundingBox`.
"""
return self.y1
@y.setter
def y(self, y):
self.y1 = y
@property
def width(self):
"""The width of the :py:class:`BoundingBox`.
"""
return self.x2 - self.x1
@width.setter
def width(self, width):
self.x2 = self.x1 + width
@property
def height(self):
"""The height of the :py:class:`BoundingBox`.
"""
return self.y2 - self.y1
@height.setter
def height(self, height):
self.y2 = self.y1 + height
@property
def size(self) -> Size:
"""The :py:class:`Size` of this :py:class:`BoundingBox`.
"""
return Size(self.width, self.height)
def mark_image(self, image: np.ndarray, color=None) -> None:
color = color or (0, 255, 0)
size = image.shape[1::-1]
thickness = max(1, max(size)//300)
t1 = thickness//2
t2 = (thickness+1)//2
x1 = max(int(self.x1), t2)
y1 = max(int(self.y1), t2)
x2 = min(int(self.x2), size[0]-t1)
y2 = min(int(self.y2), size[1]-t1)
# print(f"mark_image[{self}]: image size={size}"
# f"shape={image.shape}, {image.dtype}:"
# f"{image.min()}-{image.max()}, box:({x1}, {y1}) - ({x2}, {y2})")
for offset in range(-t2, t1):
image[(y1+offset, y2+offset), x1:x2] = color
image[y1:y2, (x1+offset, x2+offset)] = color
def crop(self, image: Imagelike, size: Optional[Size] = None) -> Imagelike:
"""Crop the bounding box from an image.
Arguments
---------
size:
The size of the resulting crop. If different from the size
of this :py:class:`BoundingBox`, the
"""
image = Image.as_array(image)
if size is None:
size = self.size
img_width, img_height, img_channels = image.shape
result = np.ndarray((size.height, size.width, img_channels),
image.dtype)
x1_source, x1_target = max(0, self.x1), max(-self.x1, 0)
y1_source, y1_target = max(0, self.y1), max(-self.y1, 0)
x2_source, x2_target = min(img_width, self.x2), \
min(size.width - (self.x2 - img_width), size.width)
y2_source, y2_target = min(img_height, self.y2), \
min(size.height - (self.y2 - img_height), size.height)
result[y1_target: y2_target, x1_target:x2_target] = \
image[y1_source: y2_source, x1_source:x2_source]
if size != self.size:
pass # FIXME[todo]
return result
def extract_from_image(self, image: Imagelike, padding: bool = True,
copy: bool = None) -> np.ndarray:
"""Extract the region described by the bounding box from an image.
"""
image = Image.as_array(image)
image_size = image.shape[1::-1]
channels = 1 if image.ndim < 3 else image.shape[2]
x1, x2 = int(self.x1), int(self.x2)
y1, y2 = int(self.y1), int(self.y2)
invalid = (x1 < 0 or x2 > image_size[0] or
y1 < 0 or y2 > image_size[1])
if invalid and padding:
copy = True
else:
# no padding: resize bounding box to become valid
x1, x2 = max(x1, 0), min(x2, image_size[0])
y1, y2 = max(y1, 0), min(y2, image_size[1])
invalid = False
width, height = x2 - x1, y2 - y1
if copy:
shape = (height, width) + ((channels, ) if channels > 1 else ())
box = np.zeros(shape, dtype=image.dtype)
slice_box0 = slice(max(-y1, 0), height-max(y2-image_size[1], 0))
slice_box1 = slice(max(-x1, 0), width-max(x2-image_size[0], 0))
slice_image0 = slice(max(y1, 0), min(y2, image_size[1]))
slice_image1 = slice(max(x1, 0), min(x2, image_size[0]))
LOG.debug("Extracting[%s]: image[%s, %s] -> box[%s, %s]", self,
slice_image0, slice_image1, slice_box0, slice_box1)
box[slice_box0, slice_box1] = image[slice_image0, slice_image1]
else:
box = image[y1:y2, x1:x2]
return box
def __str__(self) -> str:
"""String representation of this :py:class:`BoundingBox`.
"""
# return f"({self.x1},{self.y1})-({self.x2},{self.y2})"
# return (f"BoundingBox at ({self.x}, {self.y})"
# f" of size {self.width} x {self.height}")
return (f"BoundingBox from ({self.x1}, {self.y1})"
f" to ({self.x2}, {self.y2})")
def __add__(self, other: 'BoundingBox') -> 'BoundingBox':
"""Adding two bounding boxes means to create a new bounding box
that bounds both of them.
"""
return BoundingBox(x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2))
def __mul__(self, other: 'BoundingBox') -> 'BoundingBox':
"""Multiplying two bounding boxes means to form the intersection.
"""
return BoundingBox(x1=max(self.x1, other.x1),
y1=max(self.y1, other.y1),
x2=min(self.x2, other.x2),
y2=min(self.y2, other.y2))
def area(self):
"""Compute the area of this :py:class:`BoundingBox`.
"""
return self.width * self.height
@property
def center(self) -> Tuple[float, float]:
"""The center of this bounding box as an (x,y) pair.
"""
return ((self.x1 + self.x2)/2, (self.y1 + self.y2)/2)
class Region:
"""A region in an image, optionally annotated with attributes.
Attributes
----------
_location:
The location of the region. This can be a :py:class:`BoundingBox`
or any other description of a location (a contour, etc.).
_attributes: dict
A dictionary with further attributes describing the region,
e.g., a label.
"""
_location: Location
_atributes = None
color_min_confidence: np.ndarray = np.asarray((255., 0., 0.)) # red
color_max_confidence: np.ndarray = np.asarray((0., 255., 0.)) # green
def __init__(self, location: Location, **attributes):
self._location = location
self._attributes = attributes
def __str__(self) -> str:
return f"{self._location} with {len(self._attributes)} attributes"
def __contains__(self, point) -> bool:
return point in self._location
def __getattr__(self, name: str) -> Any:
if name in self._attributes:
return self._attributes[name]
raise AttributeError(f"Region has no attribute '{name}'. Valid "
f"attributes are: {self._attributes.keys()}")
def __len__(self) -> int:
return len(self._attributes)
@property
def location(self):
"""The :py:class:`Location` describing this :py:class:`Region`.
"""
return self._location
def mark_image(self, image: Imagelike, color: Tuple = None):
"""Mark this :py:class:`region` in a given image.
Arguments
---------
image:
The image into which the region is to be marked.
color:
The color to be used for marking.
"""
# FIXME[concept]: how to proceed for images that can not (easily)
# be modified in place (e.g. filename/URL) -> should we rather
# return the marked image?
if color is None and 'confidence' in self._attributes:
confidence = max(0, min(1.0, self._attributes['confidence']))
mark_color = ((1-confidence) * self.color_min_confidence +
confidence * self.color_max_confidence)
color = tuple(mark_color.astype(np.uint8))
image = Image.as_array(image)
self._location.mark_image(image, color=color)
def extract_from_image(self, image: Imagelike, **kwargs) -> np.ndarray:
"""Extract this :py:class:`Region` from a given image.
Arguments
---------
image:
The image from the the region is to be extracted.
Result
------
patch:
A numpy array (`dtype=np.uint8`) containing the extracted
region.
"""
return self._location.extract_from_image(image, **kwargs)
def scale(self, factor: Union[float, Tuple[float, float]],
reference: str = 'origin') -> None:
"""Scale this region by a given factor.
Arguments
---------
factor:
The scaling factor. This can either be a float, or a pair
of floats in which case the first number is the horizontal (x)
scaling factor and the second numger is the vertical (y)
scaling factor.
reference:
The reference point. The default is `'origin'`, meaning
all coordinates are scaled with respect to the origin.
Another special value is `'center'`, meaning that
the center of the region should be taken as reference
point.
"""
if self._location is not None:
self._location.scale(factor)
| """Defintion of abstract classes for image handling.
The central data structure is :py:class:`Image`, a subclass of
:py:class:`Data`, specialized to work with images. It provides,
for example, properties like size and channels.
Relation to other `image` modules in the Deep Learning ToolBox:
* :py:mod:`dltb.util.image`: This defines general functions for image I/O and
basic image operations. That module should be standalone, not
(directly) requiring other parts of the toolbox (besides util) or
third party modules (besides numpy). However, implementation for
the interfaces defined there are provided by third party modules,
which are automagically loaded if needed.
* :py:mod:`dltb.tool.image`: Extension of the :py:class:`Tool` API to provide
a class :py:class:`ImageTool` which can work on `Image` data objects.
So that module obviously depends on :py:mod:``dltb.base.image` and
it may make use of functionality provided by :py:mod:`dltb.util.image`.
"""
# standard imports
from typing import Union, List, Tuple, Dict, Any, Optional, Iterable
from abc import abstractmethod, ABC
from collections import namedtuple
from enum import Enum
from pathlib import Path
import threading
import logging
import time
import math
# third-party imports
import numpy as np
# toolbox imports
from .observer import Observable
from .data import Data, DataDict, BatchDataItem
from .implementation import Implementable
from ..util.error import handle_exception
# logging
LOG = logging.getLogger(__name__)
# FIXME[todo]: create an interface to work with different image/data formats
# (as started in dltb.thirdparty.pil)
# * add a way to specify the default format for reading images
# - in dltb.util.image.imread(format='pil')
# - for Imagesources
# * add on the fly conversion for Data objects, e.g.
# data.pil should
# - check if property pil already exists
# - if not: invoke Image.as_pil(data)
# - store the result as property data.pil
# - return it
# * this method could be extended:
# - just store filename and load on demand
# - compute size on demand
#
# Imagelike is intended to be everything that can be used as
# an image.
#
# np.ndarray:
# The raw image data
# str:
# A URL.
Imagelike = Union[np.ndarray, str, Path]
Sizelike = Union[Tuple[int, int], List[int], str]
class Size(namedtuple('Size', ['width', 'height'])):
def __new__(cls, size, *args):
"""Allow to instantiate size from any `Sizeable` objects and
also from a pair of arguments.
"""
if isinstance(size, Size):
return size
if args:
return super().__new__(cls, size, *args)
if isinstance(size, str):
separator = next((sep for sep in size if sep in ",x"), None)
size = ((int(number) for number in size.split(separator))
if separator else int(size))
elif isinstance(size, float):
size = int(size)
if isinstance(size, int):
return super().__new__(cls, size, size)
return super().__new__(cls, *size)
def __eq__(self, size: Sizelike) -> bool:
"""Allow to compare `Size` to any `Sizeable` objects.
"""
return super().__eq__(Size(size))
Sizelike = Union[Sizelike, Size]
class Colorspace(Enum):
"""Enumeration of potential colorspace for representing images.
"""
RGB = 1
BGR = 2
HSV = 3
class Format:
# pylint: disable=too-few-public-methods
"""Data structure for representing image format. This includes
the datatype of the image, colorspace, and min and max values.
It may also include an image size.
"""
dtype = np.uint8
colorspace = Colorspace.RGB
_min_value = None
_max_value = None
size: Optional[Size] = None
@property
def min_value(self) -> Union[int, float]:
"""The minimal possible pixel value in an image.
"""
if self._min_value is not None:
return self._min_value
if issubclass(self.dtype, (int, np.integer)):
return 0
return 0.0
@property
def max_value(self) -> Union[int, float]:
"""The minimal possible pixel value in an image.
"""
if self._max_value is not None:
return self._max_value
if issubclass(self.dtype, (int, np.integer)):
return 255
return 1.0
class Image(DataDict):
"""A collection of image related functions.
"""
converters = {
'array': [
(np.ndarray, lambda array, copy: (array, copy)),
(Data, lambda data, copy: (data.array, copy)),
(BatchDataItem, lambda data, copy: (data.array, copy))
],
'image': [
(np.ndarray, Data)
]
}
@classmethod
def add_converter(cls, source: type, converter,
target: str = 'image') -> None:
"""Register a new image converter. An image converter is
a function, that can convert a given image into another
format.
Arguments
---------
source:
The input type of the converter, that is the type of
its first argument of the `convert` function.
convert:
The actual converter function. This function takes two
arguments: `image` is the image to convert and `bool` is
a flag indicating if the image data should be copied.
target:
The output format. This can be `image` (the converter
produces an instance of `Image`) or `array` (a numpy array),
or another string identifying a third party format, if
available.
"""
# FIXME[todo]: make this more flexible, use introspection,
# get rid off the copy parameter, deal with other arguments
if target not in cls.converters:
cls.converters[target] = [(source, converter)]
else:
cls.converters[target].append((source, converter))
@classmethod
def supported_formats(cls) -> Iterable[str]:
"""The names of supported image formats.
"""
return cls.converters.keys()
@classmethod
def as_array(cls, image: Imagelike, copy: bool = False,
dtype: Optional[type] = None,
colorspace: Colorspace = None) -> np.ndarray:
"""Get image-like object as numpy array. This may
act as the identity function in case `image` is already
an array, or it may extract the relevant property, or
it may even load an image from a filename.
Arguments
---------
image: Imagelike
An image like object to turn into an array.
copy: bool
A flag indicating if the data should be copied or
if the original data is to be returned (if possible).
dtype:
Numpy datatype, e.g., numpy.float32.
colorspace: Colorspace
The colorspace in which the pixels in the resulting
array are encoded. If no colorspace is given, or
if the colorspace of the input image Image is unknown,
no color conversion is performed.
"""
for source_class, converter in cls.converters['array']:
if isinstance(image, source_class):
LOG.debug("Using image converter for type %s (copy=%s)",
type(image), copy)
image, copy = converter(image, copy)
break
else:
if isinstance(image, Path):
image = str(image)
if isinstance(image, str):
# FIXME[hack]: local imports to avoid circular module
# dependencies ...
# pylint: disable=import-outside-toplevel
from dltb.util.image import imread
LOG.debug("Loading image '%s' using imread.", image)
image, copy = imread(image), False
else:
raise NotImplementedError(f"Conversion of "
f"{type(image).__module__}"
f".{type(image).__name__} to "
"numpy.ndarray is not implemented")
LOG.debug("Obtained image of shape %s, dtype=%s.",
image.shape, image.dtype)
if colorspace == Colorspace.RGB:
if len(image.shape) == 2: # grayscale image
rgb = np.empty(image.shape + (3,), dtype=image.dtype)
rgb[:, :, :] = image[:, :, np.newaxis]
image = rgb
copy = False
elif len(image.shape) == 3 and image.shape[2] == 4: # RGBD
image = image[:, :, :3]
if dtype is not None and dtype != image.dtype:
image = image.astype(dtype) # /256.
copy = False
if copy:
image = image.copy()
LOG.debug("Returning image of shape %s, dtype=%s.",
image.shape, image.dtype)
return image
@staticmethod
def as_data(image: Imagelike, copy: bool = False) -> 'Data':
"""Get image-like objec as :py:class:`Data` object.
"""
if isinstance(image, Data) and not copy:
return image
data = Image(image, copy=copy)
if isinstance(image, str):
data.add_attribute('url', image)
return data
@classmethod
def as_shape(cls, image: Imagelike) -> Tuple[int]:
if isinstance(image, np.ndarray):
return image.shape
if isinstance(image, Image):
return image.array.shape
raise TypeError(f"Cannot determine shape of {type(image)}")
def __new__(cls, image: Imagelike = None, array: np.ndarray = None,
copy: bool = False, **kwargs) -> None:
if isinstance(image, Image) and not copy:
return image # just reuse the given Image instance
return super().__new__(cls, image, array, copy, **kwargs)
def __init__(self, image: Imagelike = None, array: np.ndarray = None,
copy: bool = False, **kwargs) -> None:
# FIXME[todo]: it would be good to have the possibility to
# indicate desired attributes, e.g. 'array', 'pil', that
# should be filled during initialization.
if isinstance(image, Image) and not copy:
return # just reuse the given Image instance
try:
if image is not None:
array = self.as_array(image, copy=copy)
finally:
# make sure super().__init__() is called even if
# preparing the array fails. If ommitted, the object may
# be in an incomplete state, causing problems at destruction.
super().__init__(array=array, **kwargs)
if isinstance(image, str):
self.add_attribute('filename', image)
self.add_attribute('shape', array.shape)
def visualize(self, size=None) -> np.ndarray:
"""Provide a visualization of this image. The may be simply
the image (in case of a single image)
In case of a batch, it can be an image galery.
"""
if not self.is_batch:
return self.array
# self is a batch of images: create a matrix showing all images.
rows = int(math.sqrt(len(self)))
columns = math.ceil(len(self) / rows)
from ..util.image import imresize
if size is None:
size = (self[0].shape[1], self[0].shape[0])
matrix = np.zeros((size[1]*rows, size[0]*columns, 3),
dtype=self[0].array.dtype)
for idx, image in enumerate(self):
column = idx % columns
row = idx // columns
image = imresize(image.array, size)
if image.ndim == 2:
image = np.expand_dims(image, axis=2).repeat(3, axis=2)
matrix[row*size[1]:(row+1)*size[1],
column*size[0]:(column+1)*size[0]] = image
return matrix
def size(self) -> Size:
"""The size of this image.
"""
if self.has_attribute('array'):
return Size(*self.shape[1::-1])
class ImageAdapter(ABC):
"""If an object is an ImageAdapter, it can adapt images to
some internal representation. It has to implement the
:py:class:`image_to_internal` and :py:class:`internal_to_image`
methods. Such an object can then be extended to do specific
image processing.
The :py:class:`ImageAdapter` keeps a map of known
:py:class:`ImageExtension`. If a subclass of
:py:class:`ImageAdapter` also subclasses a base class of these
extensions it will be adapted to also subclass the corresponding
extension, e.g., a :py:class:`ImageAdapter` that is a `Tool` will
become an `ImageTool`, provided the mapping of `Tool` to
`ImageTool` has been registered with the `ImageAdapter` class.
Creating `ImageTool` as an :py:class:`ImageExtension` of
`base=Tool` will automatically do the registration.
"""
_image_extensions: Dict[type, type] = {}
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
for base, replacement in ImageAdapter._image_extensions.items():
if base in cls.__mro__ and replacement not in cls.__mro__:
new_bases = []
found = False
for base_class in cls.__bases__:
if base_class is base:
found = True
new_bases.append(replacement)
continue
if not found and issubclass(base_class, base):
new_bases.append(replacement)
found = True
new_bases.append(base_class)
LOG.debug("ImageAdapter.__init_subclass__(%s): %s -> %s",
cls, cls.__bases__, new_bases)
cls.__bases__ = tuple(new_bases)
def image_to_internal(self, image: Imagelike) -> np.ndarray:
"""
"""
# FIXME[hack]: batch handling
from dltb.base.data import Data
if isinstance(image, Data) and image.is_batch:
result = np.ndarray((len(image), 227, 227, 3))
for index, img in enumerate(image.array):
result[index] = self._image_to_internal(img)
return result
elif isinstance(image, list):
result = np.ndarray((len(image), 227, 227, 3))
for index, img in enumerate(image):
result[index] = self._image_to_internal(img)
return result
image = self._image_to_internal(image)
return image[np.newaxis]
@abstractmethod
def _image_to_internal(self, image: Imagelike) -> Any:
"to be implemented by subclasses"
@abstractmethod
def internal_to_image(self, data: Any) -> Imagelike:
"to be implemented by subclasses"
class ImageExtension(ImageAdapter, ABC):
# pylint: disable=abstract-method
"""An :py:class:`ImageExtension` extends some base class to be able to
process images. In that it makes use of the :py:class:`ImageAdapter`
interface.
In addition to deriving from :py:class:`ImageAdapter`, the
:py:class:`ImageExtension` introduces some "behind the scene
magic": a class `ImageTool` that is declared as an `ImageExtension`
with base `Tool` is registered with the :py:class:`ImageAdapter`
class, so that any common subclass of :py:class:`ImageAdapter`
and `Tool` will automagically become an `ImageTool`.
"""
def __init_subclass__(cls, base: type = None, **kwargs) -> None:
# pylint: disable=arguments-differ
super().__init_subclass__(**kwargs)
if base is not None:
new_bases = [ImageAdapter, base]
for base_class in cls.__bases__:
if base_class is not ImageExtension:
new_bases.append(base_class)
cls.__bases__ = tuple(new_bases)
ImageAdapter._image_extensions[base] = cls
class ImageObservable(Observable, method='image_changed',
changes={'image_changed'}):
"""A base for classes that can create and change images.
"""
@property
def image(self) -> Imagelike:
"""Provide the current image.
"""
class ImageGenerator(ImageObservable):
# pylint: disable=too-few-public-methods
"""An image :py:class:`Generator` can generate images.
"""
# FIXME[todo]: spell this out
class ImageIO:
# pylint: disable=too-few-public-methods
"""An abstract interface to read, write and display images.
"""
class ImageReader(ImageIO, Implementable):
"""An :py:class:`ImageReader` can read images from file or URL.
The :py:meth:`read` method is the central method of this class.
"""
def __str__(self) -> str:
return type(self).__module__ + '.' + type(self).__name__
def read(self, filename: str, **kwargs) -> np.ndarray:
"""Read an image from a file or URL.
"""
raise NotImplementedError(f"{self.__class__.__name__} claims to "
"be an ImageReader, but does not implement "
"the read method.")
class ImageWriter(ImageIO, Implementable):
"""An :py:class:`ImageWriter` can write iamges to files or upload them
to a given URL. The :py:meth:`write` method is the central method
of this class.
"""
def write(self, filename: str, image: Imagelike, **kwargs) -> None:
"""Write an `image` to a file with the given `filename`.
"""
raise NotImplementedError(f"{self.__class__.__name__} claims to "
"be an ImageWriter, but does not implement "
"the write method.")
class ImageResizer(Implementable):
"""FIXME[todo]: there is also the network.resize module, which may be
incorporated!
Image resizing is implemented by various libraries, using slightly
incompatible interfaces. The idea of this class is to provide a
well defined resizing behaviour, that offers most of the functionality
found in the different libraries. Subclasses can be used to map
this interface to specific libraries.
Enlarging vs. Shrinking
-----------------------
Interpolation:
* Linear, cubic, ...
* Mean value:
Cropping
--------
* location: center, random, or fixed
* boundaries: if the crop size is larger than the image: either
fill boundaries with some value or return smaller image
Parameters
----------
* size:
scipy.misc.imresize:
size : int, float or tuple
- int - Percentage of current size.
- float - Fraction of current size.
- tuple - Size of the output image.
* zoom : float or sequence, optional
in scipy.ndimage.zoom:
"The zoom factor along the axes. If a float, zoom is the same
for each axis. If a sequence, zoom should contain one value
for each axis."
* downscale=2, float, optional
in skimage.transform.pyramid_reduce
"Downscale factor.
* preserve_range:
skimage.transform.pyramid_reduce:
"Whether to keep the original range of values. Otherwise, the
input image is converted according to the conventions of
img_as_float."
* interp='nearest'
in scipy.misc.imresize:
"Interpolation to use for re-sizing
('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic')."
* order: int, optional
in scipy.ndimage.zoom, skimage.transform.pyramid_reduce:
"The order of the spline interpolation, default is 3. The
order has to be in the range 0-5."
0: Nearest-neighbor
1: Bi-linear (default)
2: Bi-quadratic
3: Bi-cubic
4: Bi-quartic
5: Bi-quintic
* mode: str, optional
in scipy.misc.imresize:
"The PIL image mode ('P', 'L', etc.) to convert arr
before resizing."
* mode: str, optional
in scipy.ndimage.zoom, skimage.transform.pyramid_reduce:
"Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest',
'reflect' or 'wrap'). Default is 'constant'"
- 'constant' (default): Pads with a constant value.
- 'reflect': Pads with the reflection of the vector mirrored
on the first and last values of the vector along each axis.
- 'nearest':
- 'wrap': Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end
values are used to pad the beginning.
* cval: scalar, optional
in scipy.ndimage.zoom, skimage.transform.pyramid_reduce:
"Value used for points outside the boundaries of the input
if mode='constant'. Default is 0.0"
* prefilter: bool, optional
in scipy.ndimage.zoom:
"The parameter prefilter determines if the input is
pre-filtered with spline_filter before interpolation
(necessary for spline interpolation of order > 1). If False,
it is assumed that the input is already filtered. Default is
True."
* sigma: float, optional
in skimage.transform.pyramid_reduce:
"Sigma for Gaussian filter. Default is 2 * downscale / 6.0
which corresponds to a filter mask twice the size of the
scale factor that covers more than 99% of the Gaussian
distribution."
Libraries providing resizing functionality
------------------------------------------
Scikit-Image:
* skimage.transform.resize:
image_resized = resize(image, (image.shape[0]//4, image.shape[1]//4),
anti_aliasing=True)
Documentation:
https://scikit-image.org/docs/dev/api/skimage.transform.html
#skimage.transform.resize
* skimage.transform.rescale:
image_rescaled = rescale(image, 0.25, anti_aliasing=False)
* skimage.transform.downscale_local_mean:
image_downscaled = downscale_local_mean(image, (4, 3))
https://scikit-image.org/docs/dev/api/skimage.transform.html
#skimage.transform.downscale_local_mean
Pillow:
* PIL.Image.resize:
OpenCV:
* cv2.resize:
cv2.resize(image,(width,height))
Mahotas:
* mahotas.imresize:
mahotas.imresize(img, nsize, order=3)
This function works in two ways: if nsize is a tuple or list of
integers, then the result will be of this size; otherwise, this
function behaves the same as mh.interpolate.zoom
* mahotas.interpolate.zoom
imutils:
* imutils.resize
Scipy (deprecated):
* scipy.misc.imresize:
The documentation of scipy.misc.imresize says that imresize is
deprecated! Use skimage.transform.resize instead. But it seems
skimage.transform.resize gives different results from
scipy.misc.imresize.
https://stackoverflow.com/questions/49374829/scipy-misc-imresize-deprecated-but-skimage-transform-resize-gives-different-resu
SciPy: scipy.misc.imresize is deprecated in SciPy 1.0.0,
and will be removed in 1.3.0. Use Pillow instead:
numpy.array(Image.fromarray(arr).resize())
* scipy.ndimage.interpolation.zoom:
* scipy.ndimage.zoom:
* skimage.transform.pyramid_reduce: Smooth and then downsample image.
"""
def resize(self, image: np.ndarray,
size: Size, **_kwargs) -> np.ndarray:
"""Resize an image to the given size.
Arguments
---------
image:
The image to be scaled.
size:
The target size.
"""
if type(self).scale is ImageResizer.scale:
raise NotImplementedError(f"{type(self)} claims to be an "
"ImageResizer, but does not implement "
"the resize method.")
image_size = image.shape[:2]
scale = (size[0]/image_size[0], size[1]/image_size[1])
return self.scale(image, scale=scale)
def scale(self, image: np.ndarray,
scale: Union[float, Tuple[float, float]],
**kwargs) -> np.ndarray:
"""Scale an image image by a given factor.
Arguments
---------
image:
The image to be scaled.
scale:
Either a single float value being the common
scale factor for horizontal and vertical direction, or
a pair of scale factors for these two axes.
"""
if type(self).resize is ImageResizer.resize:
raise NotImplementedError(f"{type(self)} claims to be an "
"ImageResizer, but does not implement "
"the scale method.")
if isinstance(scale, float):
scale = (scale, scale)
image_size = image.shape[:2]
size = Size(int(image_size[0] * scale[0]),
int(image_size[1] * scale[1]))
return self.resize(image, size=size, **kwargs)
@staticmethod
def crop(image: Imagelike, size: Size, **_kwargs) -> np.ndarray:
"""Crop an :py:class:`Image` to a given size.
If no position is provided, a center crop will be performed.
"""
# FIXME[todo]: deal with sizes extending the original size
# FIXME[todo]: allow center/random/position crop
image = Image.as_array(image)
old_size = image.shape[:2]
center = old_size[0]//2, old_size[1]//2
point1 = center[0] - size[0]//2, center[1] - size[1]//2
point2 = point1[0] + size[0], point1[1] + size[1]
return image[point1[0]:point2[0], point1[1]:point2[1]]
class ImageWarper(Implementable):
"""
"""
@staticmethod
def warp(image: Imagelike, transformation: np.ndarray,
size: Size) -> np.ndarray:
"""Warp an image by applying a transformation.
To be implemented by subclasses.
"""
@staticmethod
def compute_transformation(points: np.ndarray,
reference: np.ndarray) -> np.ndarray:
"""Obtain a tranformation for aligning key points to
reference positions
To be implemented by subclasses.
Arguments
---------
points:
A sequence of points to be mapped onto the reference points,
given as (x,y) coordinates
reference:
A sequence with the same number of points serving as reference
points to which `points` should be moved.
Result
------
transformation:
A affine transformation matrix. This is a 2x3 matrix,
allowing to compute [x',y'] = matrix * [x,y,1].
Note
----
Affine transformations are more general than similarity
transformations, which can always be decomposed into a
combination of scaling, rotating, and translating. General
affine tansformations can not be decomposed in this way.
The affine transformation matrix contains the following entries:
```
cos(theta) * s -sin(theta) * s t_x
sin(theta) * s cos(theta) * s t_y
```
with theta being the rotation angle, s the scaling factor and
t the translation.
"""
@classmethod
def align(cls, image: Imagelike, points, reference,
size: Sizelike) -> np.ndarray:
"""Align an image by applying an (affine) transformation that maps
key points to reference positions.
Arguments
---------
image:
The image to align.
points:
A sequence of points to be mapped onto the reference points,
given as (x,y) coordinates
reference:
A sequence with the same number of points serving as reference
points to which `points` should be moved.
size:
The size of the resulting image.
Result
------
aligned:
The aligned image.
"""
transformation = cls.align(points, reference)
return cls.align(image, transformation, size)
class ImageOperator:
"""An :py:class:`ImageOperator` can be applied to an image to
obtain some transformation of that image.
"""
def __call__(self, image: np.ndarray) -> np.ndarray:
"""Perform the actual operation.
"""
raise NotImplementedError(f"{self.__class__.__name__} claims to "
"be an ImageOperator, but does not "
"implement the `__call__` method.")
def transform(self, source: str, target: str) -> None:
"""Transform a source file into a target file.
"""
# FIXME[concept]: this requires the util.image module!
# pylint: disable=import-outside-toplevel
from ..util.image import imread, imwrite
imwrite(target, self(imread(source)))
def transform_data(self, image: Image,
target: str, source: str = None) -> None:
"""Apply image operator to an :py:class:`Image` data object.
"""
image.add_attribute(target, value=self(image.get_attribute(source)))
class ImageDisplay(ImageIO, Implementable, ImageGenerator.Observer):
"""An :py:class:`ImageDisplay` can display images. Typically, it will
use some graphical user interface to open a window in which the
image is displayed. It may also provide some additional controls
to addapt display properties.
Blocking and non-blocking display
---------------------------------
There are two ways how an image can be displayed. In blocking
mode the execution of the main program is paused while the image
is displayed and is only continued when the display is closed. In
non-blocking mode, the the execution of the main program is
continued while the image is displayed.
The blocking behaviour can be controlled by the `blocking`
argument. It can be set to `True` (running the GUI event loop in
the calling thread and thereby blocking it) or `False` (running
the GUI event loop in some other thread). It can also be set to
`None` (meaning that no GUI event loop is started, which is
similar to the non-blocking mode, however it will usually result
in an inresponsive display window if no additional actions are
undertaken; see the section on "GUI Event loop" below for more
information).
Ending the display
------------------
Different conditions can be set up to determine when the display
should end. The most natural one is to wait until the display
window is closed (using the standard controls of the window
system). Additionally, the display can be terminated when a key is
pressed on the keyboard or after a given amount of time.
If run in a multi-threaded setting, it is also possible to end
the display programatically, calling :py:meth:`close`.
The next question is: what should happen once the display ended?
Again the most natural way is to close the window. However,
if more images are going to be displayed it may be more suitable
to leave the window on screen an just remove the image, until the
next image is available.
GUI Event loop
--------------
An :py:class:`ImageDisplay` displays the image using some
graphical user interface (GUI). Such a GUI usually requires to
run an event loop to stay responsive, that is to react to mouse
and other actions, like resizing, closing and even repainting the
window. The event loop regularly checks if such events have
occured and processes them. Running a display without an event
loop usually results in unpleasant behaviour and hence should be
avoided.
Nevertheless, running an event loop is not always straight forward.
Different GUI libraries use different concepts. For example, some
libraries require that event loops are run in the main thread of
the application, which can not always be realized (for example, it
would not be possible to realize a non-blocking display in the
main thread). The :py:class:`ImageDisplay` provides different
means to deal with such problems.
Usage scenarios
---------------
Example 1: show an image in a window and block until the window is
closed:
>>> display = Display() # blocking=True (default)
>>> display.show(imagelike)
Example 2: show an image in a window without blocking (the event loop
for the window will be run in a separate thread):
>>> display = Display(blocking=False)
>>> display.show(imagelike)
Example 3: show an image in a window without blocking. No event loop
is started for the window and it is the caller's responsibility to
regularly call display.process_events() to keep the interface
responsive.
>>> display = Display(blocking=None)
>>> display.show(imagelike)
Example 4: show an image for five seconds duration.
After 5 seconds the display is closed.
>>> display = Display()
>>> display.show(imagelike, timeout=5.0)
Example 5: show multiple images, each for five seconds, but don't close
the window in between:
>>> with Display() as display:
>>> for image in images:
>>> display.show(image, timeout=5.0)
Example 6: presenter:
>>> def presenter(display, video):
>>> while frame in video:
>>> if display.closed:
>>> break
>>> display.show(frame)
>>>
>>> display = Display()
>>> display.present(presenter, (video,))
"""
_event_loop: Optional[threading.Thread]
def __init__(self, module: Union[str, List[str]] = None,
blocking: bool = True, **kwargs) -> None:
# pylint: disable=unused-argument
super().__init__(**kwargs)
# _opened: a flag indicating the current state of the display
# window: True = window is open (visible), False = window is closed
self._opened: bool = False
# _blocking: a flag indicating if the display should operate
# in blocking mode (True) or non-blocking mode (False).
self._blocking: bool = blocking
# _entered: a counter to for tracing how often the context manager
# is used (usually it should only be used once!)
self._entered: int = 0
# _event_loop: some Thread object, referring to the thread running the
# event loop. If None, then currently no event loop is running.
self._event_loop = None
# _presentation: a Thread object running a presentation, initiated
# by the method `present`
self._presentation: Optional[threading.Thread] = None
@property
def blocking(self) -> bool:
"""Blocking behaviour of this image :py:class:`Display`. `True` means
that an event loop is run in the calling thread and execution
of the program is blocked while showing an image, `False`
means that the event loop is executed in a background thread
while the calling thread immediately returns. `None` means
that no event loop is started. The caller is responsible for
processing events, by regurlarly calling either
:py:meth:`process_events` or :py:meth:`show` (which internally
calls :py:meth:`process_events`).
"""
return self._blocking
@blocking.setter
def blocking(self, blocking: bool) -> None:
if blocking is self._blocking:
return # nothing to do
if not self.closed:
raise RuntimeError("Cannot change blocking state of open Display.")
self._blocking = blocking
#
# context manager
#
def __enter__(self) -> 'ImageDisplay':
self._entered += 1
if self._entered > 1:
LOG.warning("Entering Display multiple times: %d", self._entered)
else:
LOG.debug("Entering Display")
self.open()
return self
def __exit__(self, _exception_type, _exception_value, _traceback) -> None:
LOG.debug("Exiting Display (%d)", self._entered)
self._entered -= 1
if self._entered == 0:
self.close()
#
# public interface
#
def show(self, image: Imagelike, blocking: bool = None, close: bool = None,
timeout: float = None, **kwargs) -> None:
"""Display the given image.
This method may optionally pause execution of the main program
to display the image, if the wait_for_key or timeout arguments
are given. If both are given, the first event that occurs
will stop pausing.
Arguments
---------
image: Imagelike
The image to display. This may be a single image or a
batch of images.
blocking: bool
A flag indicating if the image should be shown in blocking
mode (`True`) or non-blocking mode (`False`). If no value
is specified, the value of the property :py:prop:`blocking`
is used.
close: bool
A flag indicating if the display should be closed after
showing. Closing the display will also end all event
loops that are running. If no value is provided, the
display will be kept open, if it was already open when
this method is called, and it will be closed in case it
was closed before.
wait_for_key: bool
A flag indicating if the display should pause execution
and wait or a key press.
timeout: float
Time in seconds to pause execution.
"""
if self._presentation is not None:
blocking = None
else:
blocking = self._blocking if blocking is None else blocking
if close is None:
close = self.closed and (blocking is True)
# make sure the window is open
if self.closed:
if self._presentation is threading.current_thread():
raise RuntimeError("Presentation is trying to use closed "
"ImageDisplay.")
self.open()
# show the image
array = Image.as_array(image, dtype=np.uint8)
LOG.debug("Showing image of shape %s, blocking=%s, close=%s, "
"timout=%s, event loop=%s, presentation=%s",
array.shape, blocking, close, timeout,
self.event_loop_is_running(), self._presentation is not None)
self._show(array, **kwargs)
# run the event loop
if blocking is True:
if not self.event_loop_is_running() is None:
self._run_blocking_event_loop(timeout=timeout)
elif blocking is False:
if timeout is not None:
LOG.warning("Setting timeout (%f) has no effect "
" for non-blocking image Display", timeout)
if not self.event_loop_is_running():
self._run_nonblocking_event_loop()
elif blocking is None:
self._process_events()
# close the window if desired
if close:
if self._entered > 0:
LOG.warning("Closing image Display inside a context manager.")
self.close()
def present(self, presenter, args=(), kwargs={}) -> None:
# pylint: disable=dangerous-default-value
"""Run the given presenter in a background thread while
executing the GUI event loop in the calling thread (which
by some GUI library is supposed to be the main thread).
The presenter will get the display as its first argument,
and `args`, `kwargs` as additional arguments. The presenter
may update the display by calling the :py:meth:`show` method.
The presenter should observe the display's `closed` property
and finish presentation once it is set to `True`.
Arguments
---------
presenter:
A function expecting a display object as first argument
and `args`, and `kwargs` as additional arguments.
"""
def target() -> None:
# pylint: disable=broad-except
LOG.info("ImageDisplay[background]: calling presenter")
try:
presenter(self, *args, **kwargs)
except BaseException as exception:
LOG.error("Unhandled exception in presentation.")
handle_exception(exception)
finally:
self.close()
with self:
LOG.info("ImageDisplay[main]: Starting presentation")
self._presentation = threading.Thread(target=target)
self._presentation.start()
self._run_blocking_event_loop()
def open(self) -> None:
"""Open this :py:class:`ImageDisplay`.
"""
if not self._opened and self._presentation is None:
self._open()
self._opened = True
def close(self) -> None:
"""Close this :py:class:`ImageDisplay`. This should also stop
all background threads, like event loops or ongoing presentatons
"""
LOG.info("Closing ImageDisplay "
"(opened=%s, presentation=%s, event loop=%s)",
self._opened, self._presentation is not None,
self.event_loop_is_running())
if self._opened:
self._opened = False
self._close()
presentation = self._presentation
if presentation is not None:
# we have started a presentation in a background Thread and
# hence we will wait that this presentation finishes. In
# order for this to work smoothly, the presentation should
# regularly check the display.closed property and exit
# (before calling display.show) if that flag is True.
if presentation is not threading.current_thread():
presentation.join()
self._presentation = None
event_loop = self._event_loop
if isinstance(event_loop, threading.Thread):
if event_loop is not threading.current_thread():
event_loop.join()
self._event_loop = None
@property
def opened(self) -> bool:
"""Check if this image :py:class:`Display` is opened, meaning
the display window is shown and an event loop is running.
"""
return self._opened
@property
def closed(self) -> bool:
"""Check if this image :py:class:`Display` is closed, meaning
that no window is shown (and no event loop is running).
"""
return not self._opened
#
# ImageObserver
#
def image_changed(self, tool, change) -> None:
"""Implementation of the :py:class:`ImageObserver` interface.
The display will be updated if the image has changed.
"""
if change.image_changed:
self.show(tool.image)
#
# methods to be implemented by subclasses
#
def _open(self) -> None:
"""Open the display window. The function is only called if
no window is open yet.
"""
raise NotImplementedError(f"{type(self)} claims to be a ImageDisplay, "
"but does not implement an _open() method.")
def _show(self, image: np.ndarray, wait_for_key: bool = False,
timeout: float = None, **kwargs) -> None:
raise NotImplementedError(f"{type(self).__name__} claims to "
"be an ImageDisplay, but does not implement "
"the _show method.")
def _close(self) -> None:
raise NotImplementedError(f"{type(self)} claims to be a ImageDisplay, "
"but does not implement an _close() method.")
def _process_events(self) -> None:
raise NotImplementedError(f"{type(self)} claims to be a ImageDisplay, "
"but does not implement "
"_process_events().")
def _run_event_loop(self) -> None:
if self.blocking is True:
self._run_blocking_event_loop()
elif self.blocking is False:
self._run_nonblocking_event_loop()
def _dummy_event_loop(self, timeout: float = None) -> None:
# pylint: disable=broad-except
interval = 0.1
start = time.time()
try:
print("ImageDisplay: start dummy event loop. "
f"closed={self.closed}")
while (not self.closed and
(timeout is None or time.time() < start + timeout)):
self._process_events()
time.sleep(interval)
except BaseException as exception:
LOG.error("Unhandled exception in event loop")
handle_exception(exception)
finally:
LOG.info("ImageDisplay: ended dummy event loop (closed=%s).",
self.closed)
self._event_loop = None
self.close()
def _run_blocking_event_loop(self, timeout: float = None) -> None:
self._event_loop = threading.current_thread()
self._dummy_event_loop(timeout)
def _run_nonblocking_event_loop(self) -> None:
"""Start a dummy event loop. This event loop will run in the
background and regularly trigger event processing. This may be
slightly less responsive than running the official event loop,
but it has the advantage that this can be done from a background
Thread, allowing to return the main thread to the caller.
In other words: this function is intended to realize a non-blocking
image display with responsive image window.
FIXME[todo]: check how this behaves under heavy load (GPU computation)
and if in case of problems, resorting to a QThread would improve
the situation.
"""
if self.event_loop_is_running():
raise RuntimeError("Only one event loop is allowed.")
self._event_loop = \
threading.Thread(target=self._nonblocking_event_loop)
self._event_loop.start()
def _nonblocking_event_loop(self) -> None:
self._dummy_event_loop()
def event_loop_is_running(self) -> bool:
"""Check if an event loop is currently running.
"""
return self._event_loop is not None
# ------------------------------------------------------------------------
# FIXME[old/todo]: currently used by ./contrib/styletransfer.py ...
def run(self, tool):
"""Monitor the operation of a Processor. This will observe
the processor and update the display whenever new data
are available.
"""
self.observe(tool, interests=ImageGenerator.Change('image_changed'))
try:
print("Starting thread")
thread = threading.Thread(target=tool.loop)
thread.start()
# FIXME[old/todo]: run the main event loop of the GUI to get
# a responsive interface - this is probably framework
# dependent and should be realized in different subclasses
# before we can design a general API.
# Also we would need some stopping mechanism to end the
# display (by key press or buttons, but also programmatically)
# self._application.exec_()
print("Application main event loop finished")
except KeyboardInterrupt:
print("Keyboard interrupt.")
tool.stop()
thread.join()
print("Thread joined")
# FIXME[old/todo]: currently used by ./dltb/thirdparty/qt.py/dltb/thirdparty/qt.py ...
@property
def active(self) -> bool:
"""Check if this :py:class:`ImageDisplay` is active.
"""
return True # FIXME[hack]
class Location:
"""A :py:class:`Location` identifies an area in a two-dimensional
space. A typical location is a bounding box (realized by the
subclass :py:class:`BoundingBox`), but this abstract definition
also allows for alternative ways to describe a location.
"""
def __init__(self, points) -> None:
pass
def __contains__(self, point) -> bool:
"""Checks if the given point lies in this :py:class:`Location`.
To be implemented by subclasses.
"""
def mark_image(self, image: Imagelike, color=(1, 0, 0)):
"""Mark this :py:class:`Location` in some given image.
Arguments
---------
image:
"""
raise NotImplementedError(f"Location {self.__class__.__name__} "
f"does not provide a method for marking "
f"an image.")
def extract_from_image(self, image: Imagelike) -> np.ndarray:
"""Extract this :py:class:`Location` from a given image.
Arguments
---------
image:
The image from which the location is to be extracted.
"""
raise NotImplementedError(f"Location {self.__class__.__name__} "
f"does not provide a method for extraction "
f"from an image.")
def scale(self, factor: Union[float, Tuple[float, float]],
reference: str = 'origin') -> None:
"""Scale this :py:class:`location` by the given factor.
All coordinates will be multiplied by this value.
"""
raise NotImplementedError(f"Location {self.__class__.__name__} "
f"does not provide a method for scaling.")
class PointsBasedLocation:
"""A :py:class:`PointsBasedLocation` is a :py:class:`Location`
that can be described by points, like a polygon area, or more
simple: a bounding box.
Attributes
----------
_points: np.ndarray
An array of shape (n, 2), providing n points in form of (x, y)
coordinates.
"""
def __init__(self, points: np.ndarray) -> None:
super().__init__()
self._points = points
def __contains__(self, point) -> bool:
return ((self._points[:, 0].min() <= point[0] <=
self._points[:, 0].max()) and
(self._points[:, 1].min() <= point[1] <=
self._points[:, 1].max()))
def __getitem__(self, idx):
return self._points[idx]
def mark_image(self, image: np.ndarray, color=(1, 0, 0)):
"""Mark this :py:class:`PointsBasedLocation` in an image.
"""
for point in self._points:
image[max(point[1]-1, 0):min(point[1]+1, image.shape[0]),
max(point[0]-1, 0):min(point[0]+1, image.shape[1])] = color
def extract_from_image(self, image: Imagelike) -> np.ndarray:
"""Extract this :py:class:`Location` from a given image.
Arguments
---------
image:
The image from which this :py:class:`PointsBasedLocation`
is to be extracted.
"""
image = Image.as_array(image)
height, width = image.shape[:2]
point1_x, point1_y = self._points.min(axis=0)
point2_x, point2_y = self._points.max(axis=0)
point1_x, point1_y = max(0, int(point1_x)), max(0, int(point1_y))
point2_x, point2_y = \
min(width, int(point2_x)), min(height, int(point2_y))
return image[point1_y:point2_y, point1_x:point2_x]
def scale(self, factor: Union[float, Tuple[float, float]],
reference: str = 'origin') -> None:
"""Scale the :py:class:`Location`.
Arguments
---------
factor:
The scaling factor. This can either be a float, or a pair
of floats in which case the first number is the horizontal (x)
scaling factor and the second numger is the vertical (y)
scaling factor.
"""
if reference == 'origin':
reference = np.ndarray((0, 0))
elif reference == 'center':
reference = self._points.mean(axis=0)
else:
reference = np.asarray(reference)
self._points = (self._points - reference) * factor + reference
@property
def points(self) -> np.ndarray:
"""The points specifying this :py:class:`PointsBasedLocation`.
This is an array of shape (n, 2), providing n points in form of (x, y)
coordinates.
"""
return self._points
def __len__(self):
return len(self._points)
class Landmarks(PointsBasedLocation):
"""Landmarks are an ordered list of points.
"""
def __len__(self) -> int:
return 0 if self._points is None else len(self._points)
def __str__(self) -> str:
return f"Landmarks with {len(self)} points."
class BoundingBox(PointsBasedLocation):
# pylint: disable=invalid-name
"""A bounding box describes a rectangular arae in an image.
"""
def __init__(self, x1=None, y1=None, x2=None, y2=None,
x=None, y=None, width=None, height=None) -> None:
super().__init__(np.ndarray((2, 2)))
if x1 is not None:
self.x1 = x1
elif x is not None:
self.x1 = x
if y1 is not None:
self.y1 = y1
elif y is not None:
self.y1 = y
if x2 is not None:
self.x2 = x2
elif width is not None:
self.width = width
if y2 is not None:
self.y2 = y2
elif height is not None:
self.height = height
@property
def x1(self):
"""The horizontal position of the left border of this
:py:class:`BoundingBox`.
"""
return self._points[0, 0]
@x1.setter
def x1(self, x1):
self._points[0, 0] = x1
@property
def y1(self):
"""The vertical position of the upper border of this
:py:class:`BoundingBox`.
"""
return self._points[0, 1]
@y1.setter
def y1(self, y1):
self._points[0, 1] = y1
@property
def x2(self):
"""The horizontal position of the right border of this
:py:class:`BoundingBox`.
"""
return self._points[1, 0]
@x2.setter
def x2(self, x2):
self._points[1, 0] = max(x2, self.x1) # Avoid negative width
@property
def y2(self):
"""The vertical position of the lower border of this
:py:class:`BoundingBox`.
"""
return self._points[1, 1]
@y2.setter
def y2(self, y2):
self._points[1, 1] = max(y2, self.y1) # Avoid negative height
@property
def x(self):
"""The horizontal position of the left border of this
:py:class:`BoundingBox`.
"""
return self.x1
@x.setter
def x(self, x):
self.x1 = x
@property
def y(self):
"""The vertical position of the upper border of this
:py:class:`BoundingBox`.
"""
return self.y1
@y.setter
def y(self, y):
self.y1 = y
@property
def width(self):
"""The width of the :py:class:`BoundingBox`.
"""
return self.x2 - self.x1
@width.setter
def width(self, width):
self.x2 = self.x1 + width
@property
def height(self):
"""The height of the :py:class:`BoundingBox`.
"""
return self.y2 - self.y1
@height.setter
def height(self, height):
self.y2 = self.y1 + height
@property
def size(self) -> Size:
"""The :py:class:`Size` of this :py:class:`BoundingBox`.
"""
return Size(self.width, self.height)
def mark_image(self, image: np.ndarray, color=None) -> None:
color = color or (0, 255, 0)
size = image.shape[1::-1]
thickness = max(1, max(size)//300)
t1 = thickness//2
t2 = (thickness+1)//2
x1 = max(int(self.x1), t2)
y1 = max(int(self.y1), t2)
x2 = min(int(self.x2), size[0]-t1)
y2 = min(int(self.y2), size[1]-t1)
# print(f"mark_image[{self}]: image size={size}"
# f"shape={image.shape}, {image.dtype}:"
# f"{image.min()}-{image.max()}, box:({x1}, {y1}) - ({x2}, {y2})")
for offset in range(-t2, t1):
image[(y1+offset, y2+offset), x1:x2] = color
image[y1:y2, (x1+offset, x2+offset)] = color
def crop(self, image: Imagelike, size: Optional[Size] = None) -> Imagelike:
"""Crop the bounding box from an image.
Arguments
---------
size:
The size of the resulting crop. If different from the size
of this :py:class:`BoundingBox`, the
"""
image = Image.as_array(image)
if size is None:
size = self.size
img_width, img_height, img_channels = image.shape
result = np.ndarray((size.height, size.width, img_channels),
image.dtype)
x1_source, x1_target = max(0, self.x1), max(-self.x1, 0)
y1_source, y1_target = max(0, self.y1), max(-self.y1, 0)
x2_source, x2_target = min(img_width, self.x2), \
min(size.width - (self.x2 - img_width), size.width)
y2_source, y2_target = min(img_height, self.y2), \
min(size.height - (self.y2 - img_height), size.height)
result[y1_target: y2_target, x1_target:x2_target] = \
image[y1_source: y2_source, x1_source:x2_source]
if size != self.size:
pass # FIXME[todo]
return result
def extract_from_image(self, image: Imagelike, padding: bool = True,
copy: bool = None) -> np.ndarray:
"""Extract the region described by the bounding box from an image.
"""
image = Image.as_array(image)
image_size = image.shape[1::-1]
channels = 1 if image.ndim < 3 else image.shape[2]
x1, x2 = int(self.x1), int(self.x2)
y1, y2 = int(self.y1), int(self.y2)
invalid = (x1 < 0 or x2 > image_size[0] or
y1 < 0 or y2 > image_size[1])
if invalid and padding:
copy = True
else:
# no padding: resize bounding box to become valid
x1, x2 = max(x1, 0), min(x2, image_size[0])
y1, y2 = max(y1, 0), min(y2, image_size[1])
invalid = False
width, height = x2 - x1, y2 - y1
if copy:
shape = (height, width) + ((channels, ) if channels > 1 else ())
box = np.zeros(shape, dtype=image.dtype)
slice_box0 = slice(max(-y1, 0), height-max(y2-image_size[1], 0))
slice_box1 = slice(max(-x1, 0), width-max(x2-image_size[0], 0))
slice_image0 = slice(max(y1, 0), min(y2, image_size[1]))
slice_image1 = slice(max(x1, 0), min(x2, image_size[0]))
LOG.debug("Extracting[%s]: image[%s, %s] -> box[%s, %s]", self,
slice_image0, slice_image1, slice_box0, slice_box1)
box[slice_box0, slice_box1] = image[slice_image0, slice_image1]
else:
box = image[y1:y2, x1:x2]
return box
def __str__(self) -> str:
"""String representation of this :py:class:`BoundingBox`.
"""
# return f"({self.x1},{self.y1})-({self.x2},{self.y2})"
# return (f"BoundingBox at ({self.x}, {self.y})"
# f" of size {self.width} x {self.height}")
return (f"BoundingBox from ({self.x1}, {self.y1})"
f" to ({self.x2}, {self.y2})")
def __add__(self, other: 'BoundingBox') -> 'BoundingBox':
"""Adding two bounding boxes means to create a new bounding box
that bounds both of them.
"""
return BoundingBox(x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2))
def __mul__(self, other: 'BoundingBox') -> 'BoundingBox':
"""Multiplying two bounding boxes means to form the intersection.
"""
return BoundingBox(x1=max(self.x1, other.x1),
y1=max(self.y1, other.y1),
x2=min(self.x2, other.x2),
y2=min(self.y2, other.y2))
def area(self):
"""Compute the area of this :py:class:`BoundingBox`.
"""
return self.width * self.height
@property
def center(self) -> Tuple[float, float]:
"""The center of this bounding box as an (x,y) pair.
"""
return ((self.x1 + self.x2)/2, (self.y1 + self.y2)/2)
class Region:
"""A region in an image, optionally annotated with attributes.
Attributes
----------
_location:
The location of the region. This can be a :py:class:`BoundingBox`
or any other description of a location (a contour, etc.).
_attributes: dict
A dictionary with further attributes describing the region,
e.g., a label.
"""
_location: Location
_atributes = None
color_min_confidence: np.ndarray = np.asarray((255., 0., 0.)) # red
color_max_confidence: np.ndarray = np.asarray((0., 255., 0.)) # green
def __init__(self, location: Location, **attributes):
self._location = location
self._attributes = attributes
def __str__(self) -> str:
return f"{self._location} with {len(self._attributes)} attributes"
def __contains__(self, point) -> bool:
return point in self._location
def __getattr__(self, name: str) -> Any:
if name in self._attributes:
return self._attributes[name]
raise AttributeError(f"Region has no attribute '{name}'. Valid "
f"attributes are: {self._attributes.keys()}")
def __len__(self) -> int:
return len(self._attributes)
@property
def location(self):
"""The :py:class:`Location` describing this :py:class:`Region`.
"""
return self._location
def mark_image(self, image: Imagelike, color: Tuple = None):
"""Mark this :py:class:`region` in a given image.
Arguments
---------
image:
The image into which the region is to be marked.
color:
The color to be used for marking.
"""
# FIXME[concept]: how to proceed for images that can not (easily)
# be modified in place (e.g. filename/URL) -> should we rather
# return the marked image?
if color is None and 'confidence' in self._attributes:
confidence = max(0, min(1.0, self._attributes['confidence']))
mark_color = ((1-confidence) * self.color_min_confidence +
confidence * self.color_max_confidence)
color = tuple(mark_color.astype(np.uint8))
image = Image.as_array(image)
self._location.mark_image(image, color=color)
def extract_from_image(self, image: Imagelike, **kwargs) -> np.ndarray:
"""Extract this :py:class:`Region` from a given image.
Arguments
---------
image:
The image from the the region is to be extracted.
Result
------
patch:
A numpy array (`dtype=np.uint8`) containing the extracted
region.
"""
return self._location.extract_from_image(image, **kwargs)
def scale(self, factor: Union[float, Tuple[float, float]],
reference: str = 'origin') -> None:
"""Scale this region by a given factor.
Arguments
---------
factor:
The scaling factor. This can either be a float, or a pair
of floats in which case the first number is the horizontal (x)
scaling factor and the second numger is the vertical (y)
scaling factor.
reference:
The reference point. The default is `'origin'`, meaning
all coordinates are scaled with respect to the origin.
Another special value is `'center'`, meaning that
the center of the region should be taken as reference
point.
"""
if self._location is not None:
self._location.scale(factor)
| en | 0.78523 | Defintion of abstract classes for image handling. The central data structure is :py:class:`Image`, a subclass of :py:class:`Data`, specialized to work with images. It provides, for example, properties like size and channels. Relation to other `image` modules in the Deep Learning ToolBox: * :py:mod:`dltb.util.image`: This defines general functions for image I/O and basic image operations. That module should be standalone, not (directly) requiring other parts of the toolbox (besides util) or third party modules (besides numpy). However, implementation for the interfaces defined there are provided by third party modules, which are automagically loaded if needed. * :py:mod:`dltb.tool.image`: Extension of the :py:class:`Tool` API to provide a class :py:class:`ImageTool` which can work on `Image` data objects. So that module obviously depends on :py:mod:``dltb.base.image` and it may make use of functionality provided by :py:mod:`dltb.util.image`. # standard imports # third-party imports # toolbox imports # logging # FIXME[todo]: create an interface to work with different image/data formats # (as started in dltb.thirdparty.pil) # * add a way to specify the default format for reading images # - in dltb.util.image.imread(format='pil') # - for Imagesources # * add on the fly conversion for Data objects, e.g. # data.pil should # - check if property pil already exists # - if not: invoke Image.as_pil(data) # - store the result as property data.pil # - return it # * this method could be extended: # - just store filename and load on demand # - compute size on demand # # Imagelike is intended to be everything that can be used as # an image. # # np.ndarray: # The raw image data # str: # A URL. Allow to instantiate size from any `Sizeable` objects and also from a pair of arguments. Allow to compare `Size` to any `Sizeable` objects. Enumeration of potential colorspace for representing images. # pylint: disable=too-few-public-methods Data structure for representing image format. This includes the datatype of the image, colorspace, and min and max values. It may also include an image size. The minimal possible pixel value in an image. The minimal possible pixel value in an image. A collection of image related functions. Register a new image converter. An image converter is a function, that can convert a given image into another format. Arguments --------- source: The input type of the converter, that is the type of its first argument of the `convert` function. convert: The actual converter function. This function takes two arguments: `image` is the image to convert and `bool` is a flag indicating if the image data should be copied. target: The output format. This can be `image` (the converter produces an instance of `Image`) or `array` (a numpy array), or another string identifying a third party format, if available. # FIXME[todo]: make this more flexible, use introspection, # get rid off the copy parameter, deal with other arguments The names of supported image formats. Get image-like object as numpy array. This may act as the identity function in case `image` is already an array, or it may extract the relevant property, or it may even load an image from a filename. Arguments --------- image: Imagelike An image like object to turn into an array. copy: bool A flag indicating if the data should be copied or if the original data is to be returned (if possible). dtype: Numpy datatype, e.g., numpy.float32. colorspace: Colorspace The colorspace in which the pixels in the resulting array are encoded. If no colorspace is given, or if the colorspace of the input image Image is unknown, no color conversion is performed. # FIXME[hack]: local imports to avoid circular module # dependencies ... # pylint: disable=import-outside-toplevel # grayscale image # RGBD # /256. Get image-like objec as :py:class:`Data` object. # just reuse the given Image instance # FIXME[todo]: it would be good to have the possibility to # indicate desired attributes, e.g. 'array', 'pil', that # should be filled during initialization. # just reuse the given Image instance # make sure super().__init__() is called even if # preparing the array fails. If ommitted, the object may # be in an incomplete state, causing problems at destruction. Provide a visualization of this image. The may be simply the image (in case of a single image) In case of a batch, it can be an image galery. # self is a batch of images: create a matrix showing all images. The size of this image. If an object is an ImageAdapter, it can adapt images to some internal representation. It has to implement the :py:class:`image_to_internal` and :py:class:`internal_to_image` methods. Such an object can then be extended to do specific image processing. The :py:class:`ImageAdapter` keeps a map of known :py:class:`ImageExtension`. If a subclass of :py:class:`ImageAdapter` also subclasses a base class of these extensions it will be adapted to also subclass the corresponding extension, e.g., a :py:class:`ImageAdapter` that is a `Tool` will become an `ImageTool`, provided the mapping of `Tool` to `ImageTool` has been registered with the `ImageAdapter` class. Creating `ImageTool` as an :py:class:`ImageExtension` of `base=Tool` will automatically do the registration. # FIXME[hack]: batch handling # pylint: disable=abstract-method An :py:class:`ImageExtension` extends some base class to be able to process images. In that it makes use of the :py:class:`ImageAdapter` interface. In addition to deriving from :py:class:`ImageAdapter`, the :py:class:`ImageExtension` introduces some "behind the scene magic": a class `ImageTool` that is declared as an `ImageExtension` with base `Tool` is registered with the :py:class:`ImageAdapter` class, so that any common subclass of :py:class:`ImageAdapter` and `Tool` will automagically become an `ImageTool`. # pylint: disable=arguments-differ A base for classes that can create and change images. Provide the current image. # pylint: disable=too-few-public-methods An image :py:class:`Generator` can generate images. # FIXME[todo]: spell this out # pylint: disable=too-few-public-methods An abstract interface to read, write and display images. An :py:class:`ImageReader` can read images from file or URL. The :py:meth:`read` method is the central method of this class. Read an image from a file or URL. An :py:class:`ImageWriter` can write iamges to files or upload them to a given URL. The :py:meth:`write` method is the central method of this class. Write an `image` to a file with the given `filename`. FIXME[todo]: there is also the network.resize module, which may be incorporated! Image resizing is implemented by various libraries, using slightly incompatible interfaces. The idea of this class is to provide a well defined resizing behaviour, that offers most of the functionality found in the different libraries. Subclasses can be used to map this interface to specific libraries. Enlarging vs. Shrinking ----------------------- Interpolation: * Linear, cubic, ... * Mean value: Cropping -------- * location: center, random, or fixed * boundaries: if the crop size is larger than the image: either fill boundaries with some value or return smaller image Parameters ---------- * size: scipy.misc.imresize: size : int, float or tuple - int - Percentage of current size. - float - Fraction of current size. - tuple - Size of the output image. * zoom : float or sequence, optional in scipy.ndimage.zoom: "The zoom factor along the axes. If a float, zoom is the same for each axis. If a sequence, zoom should contain one value for each axis." * downscale=2, float, optional in skimage.transform.pyramid_reduce "Downscale factor. * preserve_range: skimage.transform.pyramid_reduce: "Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float." * interp='nearest' in scipy.misc.imresize: "Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic')." * order: int, optional in scipy.ndimage.zoom, skimage.transform.pyramid_reduce: "The order of the spline interpolation, default is 3. The order has to be in the range 0-5." 0: Nearest-neighbor 1: Bi-linear (default) 2: Bi-quadratic 3: Bi-cubic 4: Bi-quartic 5: Bi-quintic * mode: str, optional in scipy.misc.imresize: "The PIL image mode ('P', 'L', etc.) to convert arr before resizing." * mode: str, optional in scipy.ndimage.zoom, skimage.transform.pyramid_reduce: "Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'" - 'constant' (default): Pads with a constant value. - 'reflect': Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - 'nearest': - 'wrap': Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. * cval: scalar, optional in scipy.ndimage.zoom, skimage.transform.pyramid_reduce: "Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0" * prefilter: bool, optional in scipy.ndimage.zoom: "The parameter prefilter determines if the input is pre-filtered with spline_filter before interpolation (necessary for spline interpolation of order > 1). If False, it is assumed that the input is already filtered. Default is True." * sigma: float, optional in skimage.transform.pyramid_reduce: "Sigma for Gaussian filter. Default is 2 * downscale / 6.0 which corresponds to a filter mask twice the size of the scale factor that covers more than 99% of the Gaussian distribution." Libraries providing resizing functionality ------------------------------------------ Scikit-Image: * skimage.transform.resize: image_resized = resize(image, (image.shape[0]//4, image.shape[1]//4), anti_aliasing=True) Documentation: https://scikit-image.org/docs/dev/api/skimage.transform.html #skimage.transform.resize * skimage.transform.rescale: image_rescaled = rescale(image, 0.25, anti_aliasing=False) * skimage.transform.downscale_local_mean: image_downscaled = downscale_local_mean(image, (4, 3)) https://scikit-image.org/docs/dev/api/skimage.transform.html #skimage.transform.downscale_local_mean Pillow: * PIL.Image.resize: OpenCV: * cv2.resize: cv2.resize(image,(width,height)) Mahotas: * mahotas.imresize: mahotas.imresize(img, nsize, order=3) This function works in two ways: if nsize is a tuple or list of integers, then the result will be of this size; otherwise, this function behaves the same as mh.interpolate.zoom * mahotas.interpolate.zoom imutils: * imutils.resize Scipy (deprecated): * scipy.misc.imresize: The documentation of scipy.misc.imresize says that imresize is deprecated! Use skimage.transform.resize instead. But it seems skimage.transform.resize gives different results from scipy.misc.imresize. https://stackoverflow.com/questions/49374829/scipy-misc-imresize-deprecated-but-skimage-transform-resize-gives-different-resu SciPy: scipy.misc.imresize is deprecated in SciPy 1.0.0, and will be removed in 1.3.0. Use Pillow instead: numpy.array(Image.fromarray(arr).resize()) * scipy.ndimage.interpolation.zoom: * scipy.ndimage.zoom: * skimage.transform.pyramid_reduce: Smooth and then downsample image. Resize an image to the given size. Arguments --------- image: The image to be scaled. size: The target size. Scale an image image by a given factor. Arguments --------- image: The image to be scaled. scale: Either a single float value being the common scale factor for horizontal and vertical direction, or a pair of scale factors for these two axes. Crop an :py:class:`Image` to a given size. If no position is provided, a center crop will be performed. # FIXME[todo]: deal with sizes extending the original size # FIXME[todo]: allow center/random/position crop Warp an image by applying a transformation. To be implemented by subclasses. Obtain a tranformation for aligning key points to reference positions To be implemented by subclasses. Arguments --------- points: A sequence of points to be mapped onto the reference points, given as (x,y) coordinates reference: A sequence with the same number of points serving as reference points to which `points` should be moved. Result ------ transformation: A affine transformation matrix. This is a 2x3 matrix, allowing to compute [x',y'] = matrix * [x,y,1]. Note ---- Affine transformations are more general than similarity transformations, which can always be decomposed into a combination of scaling, rotating, and translating. General affine tansformations can not be decomposed in this way. The affine transformation matrix contains the following entries: ``` cos(theta) * s -sin(theta) * s t_x sin(theta) * s cos(theta) * s t_y ``` with theta being the rotation angle, s the scaling factor and t the translation. Align an image by applying an (affine) transformation that maps key points to reference positions. Arguments --------- image: The image to align. points: A sequence of points to be mapped onto the reference points, given as (x,y) coordinates reference: A sequence with the same number of points serving as reference points to which `points` should be moved. size: The size of the resulting image. Result ------ aligned: The aligned image. An :py:class:`ImageOperator` can be applied to an image to obtain some transformation of that image. Perform the actual operation. Transform a source file into a target file. # FIXME[concept]: this requires the util.image module! # pylint: disable=import-outside-toplevel Apply image operator to an :py:class:`Image` data object. An :py:class:`ImageDisplay` can display images. Typically, it will use some graphical user interface to open a window in which the image is displayed. It may also provide some additional controls to addapt display properties. Blocking and non-blocking display --------------------------------- There are two ways how an image can be displayed. In blocking mode the execution of the main program is paused while the image is displayed and is only continued when the display is closed. In non-blocking mode, the the execution of the main program is continued while the image is displayed. The blocking behaviour can be controlled by the `blocking` argument. It can be set to `True` (running the GUI event loop in the calling thread and thereby blocking it) or `False` (running the GUI event loop in some other thread). It can also be set to `None` (meaning that no GUI event loop is started, which is similar to the non-blocking mode, however it will usually result in an inresponsive display window if no additional actions are undertaken; see the section on "GUI Event loop" below for more information). Ending the display ------------------ Different conditions can be set up to determine when the display should end. The most natural one is to wait until the display window is closed (using the standard controls of the window system). Additionally, the display can be terminated when a key is pressed on the keyboard or after a given amount of time. If run in a multi-threaded setting, it is also possible to end the display programatically, calling :py:meth:`close`. The next question is: what should happen once the display ended? Again the most natural way is to close the window. However, if more images are going to be displayed it may be more suitable to leave the window on screen an just remove the image, until the next image is available. GUI Event loop -------------- An :py:class:`ImageDisplay` displays the image using some graphical user interface (GUI). Such a GUI usually requires to run an event loop to stay responsive, that is to react to mouse and other actions, like resizing, closing and even repainting the window. The event loop regularly checks if such events have occured and processes them. Running a display without an event loop usually results in unpleasant behaviour and hence should be avoided. Nevertheless, running an event loop is not always straight forward. Different GUI libraries use different concepts. For example, some libraries require that event loops are run in the main thread of the application, which can not always be realized (for example, it would not be possible to realize a non-blocking display in the main thread). The :py:class:`ImageDisplay` provides different means to deal with such problems. Usage scenarios --------------- Example 1: show an image in a window and block until the window is closed: >>> display = Display() # blocking=True (default) >>> display.show(imagelike) Example 2: show an image in a window without blocking (the event loop for the window will be run in a separate thread): >>> display = Display(blocking=False) >>> display.show(imagelike) Example 3: show an image in a window without blocking. No event loop is started for the window and it is the caller's responsibility to regularly call display.process_events() to keep the interface responsive. >>> display = Display(blocking=None) >>> display.show(imagelike) Example 4: show an image for five seconds duration. After 5 seconds the display is closed. >>> display = Display() >>> display.show(imagelike, timeout=5.0) Example 5: show multiple images, each for five seconds, but don't close the window in between: >>> with Display() as display: >>> for image in images: >>> display.show(image, timeout=5.0) Example 6: presenter: >>> def presenter(display, video): >>> while frame in video: >>> if display.closed: >>> break >>> display.show(frame) >>> >>> display = Display() >>> display.present(presenter, (video,)) # pylint: disable=unused-argument # _opened: a flag indicating the current state of the display # window: True = window is open (visible), False = window is closed # _blocking: a flag indicating if the display should operate # in blocking mode (True) or non-blocking mode (False). # _entered: a counter to for tracing how often the context manager # is used (usually it should only be used once!) # _event_loop: some Thread object, referring to the thread running the # event loop. If None, then currently no event loop is running. # _presentation: a Thread object running a presentation, initiated # by the method `present` Blocking behaviour of this image :py:class:`Display`. `True` means that an event loop is run in the calling thread and execution of the program is blocked while showing an image, `False` means that the event loop is executed in a background thread while the calling thread immediately returns. `None` means that no event loop is started. The caller is responsible for processing events, by regurlarly calling either :py:meth:`process_events` or :py:meth:`show` (which internally calls :py:meth:`process_events`). # nothing to do # # context manager # # # public interface # Display the given image. This method may optionally pause execution of the main program to display the image, if the wait_for_key or timeout arguments are given. If both are given, the first event that occurs will stop pausing. Arguments --------- image: Imagelike The image to display. This may be a single image or a batch of images. blocking: bool A flag indicating if the image should be shown in blocking mode (`True`) or non-blocking mode (`False`). If no value is specified, the value of the property :py:prop:`blocking` is used. close: bool A flag indicating if the display should be closed after showing. Closing the display will also end all event loops that are running. If no value is provided, the display will be kept open, if it was already open when this method is called, and it will be closed in case it was closed before. wait_for_key: bool A flag indicating if the display should pause execution and wait or a key press. timeout: float Time in seconds to pause execution. # make sure the window is open # show the image # run the event loop # close the window if desired # pylint: disable=dangerous-default-value Run the given presenter in a background thread while executing the GUI event loop in the calling thread (which by some GUI library is supposed to be the main thread). The presenter will get the display as its first argument, and `args`, `kwargs` as additional arguments. The presenter may update the display by calling the :py:meth:`show` method. The presenter should observe the display's `closed` property and finish presentation once it is set to `True`. Arguments --------- presenter: A function expecting a display object as first argument and `args`, and `kwargs` as additional arguments. # pylint: disable=broad-except Open this :py:class:`ImageDisplay`. Close this :py:class:`ImageDisplay`. This should also stop all background threads, like event loops or ongoing presentatons # we have started a presentation in a background Thread and # hence we will wait that this presentation finishes. In # order for this to work smoothly, the presentation should # regularly check the display.closed property and exit # (before calling display.show) if that flag is True. Check if this image :py:class:`Display` is opened, meaning the display window is shown and an event loop is running. Check if this image :py:class:`Display` is closed, meaning that no window is shown (and no event loop is running). # # ImageObserver # Implementation of the :py:class:`ImageObserver` interface. The display will be updated if the image has changed. # # methods to be implemented by subclasses # Open the display window. The function is only called if no window is open yet. # pylint: disable=broad-except Start a dummy event loop. This event loop will run in the background and regularly trigger event processing. This may be slightly less responsive than running the official event loop, but it has the advantage that this can be done from a background Thread, allowing to return the main thread to the caller. In other words: this function is intended to realize a non-blocking image display with responsive image window. FIXME[todo]: check how this behaves under heavy load (GPU computation) and if in case of problems, resorting to a QThread would improve the situation. Check if an event loop is currently running. # ------------------------------------------------------------------------ # FIXME[old/todo]: currently used by ./contrib/styletransfer.py ... Monitor the operation of a Processor. This will observe the processor and update the display whenever new data are available. # FIXME[old/todo]: run the main event loop of the GUI to get # a responsive interface - this is probably framework # dependent and should be realized in different subclasses # before we can design a general API. # Also we would need some stopping mechanism to end the # display (by key press or buttons, but also programmatically) # self._application.exec_() # FIXME[old/todo]: currently used by ./dltb/thirdparty/qt.py/dltb/thirdparty/qt.py ... Check if this :py:class:`ImageDisplay` is active. # FIXME[hack] A :py:class:`Location` identifies an area in a two-dimensional space. A typical location is a bounding box (realized by the subclass :py:class:`BoundingBox`), but this abstract definition also allows for alternative ways to describe a location. Checks if the given point lies in this :py:class:`Location`. To be implemented by subclasses. Mark this :py:class:`Location` in some given image. Arguments --------- image: Extract this :py:class:`Location` from a given image. Arguments --------- image: The image from which the location is to be extracted. Scale this :py:class:`location` by the given factor. All coordinates will be multiplied by this value. A :py:class:`PointsBasedLocation` is a :py:class:`Location` that can be described by points, like a polygon area, or more simple: a bounding box. Attributes ---------- _points: np.ndarray An array of shape (n, 2), providing n points in form of (x, y) coordinates. Mark this :py:class:`PointsBasedLocation` in an image. Extract this :py:class:`Location` from a given image. Arguments --------- image: The image from which this :py:class:`PointsBasedLocation` is to be extracted. Scale the :py:class:`Location`. Arguments --------- factor: The scaling factor. This can either be a float, or a pair of floats in which case the first number is the horizontal (x) scaling factor and the second numger is the vertical (y) scaling factor. The points specifying this :py:class:`PointsBasedLocation`. This is an array of shape (n, 2), providing n points in form of (x, y) coordinates. Landmarks are an ordered list of points. # pylint: disable=invalid-name A bounding box describes a rectangular arae in an image. The horizontal position of the left border of this :py:class:`BoundingBox`. The vertical position of the upper border of this :py:class:`BoundingBox`. The horizontal position of the right border of this :py:class:`BoundingBox`. # Avoid negative width The vertical position of the lower border of this :py:class:`BoundingBox`. # Avoid negative height The horizontal position of the left border of this :py:class:`BoundingBox`. The vertical position of the upper border of this :py:class:`BoundingBox`. The width of the :py:class:`BoundingBox`. The height of the :py:class:`BoundingBox`. The :py:class:`Size` of this :py:class:`BoundingBox`. # print(f"mark_image[{self}]: image size={size}" # f"shape={image.shape}, {image.dtype}:" # f"{image.min()}-{image.max()}, box:({x1}, {y1}) - ({x2}, {y2})") Crop the bounding box from an image. Arguments --------- size: The size of the resulting crop. If different from the size of this :py:class:`BoundingBox`, the # FIXME[todo] Extract the region described by the bounding box from an image. # no padding: resize bounding box to become valid String representation of this :py:class:`BoundingBox`. # return f"({self.x1},{self.y1})-({self.x2},{self.y2})" # return (f"BoundingBox at ({self.x}, {self.y})" # f" of size {self.width} x {self.height}") Adding two bounding boxes means to create a new bounding box that bounds both of them. Multiplying two bounding boxes means to form the intersection. Compute the area of this :py:class:`BoundingBox`. The center of this bounding box as an (x,y) pair. A region in an image, optionally annotated with attributes. Attributes ---------- _location: The location of the region. This can be a :py:class:`BoundingBox` or any other description of a location (a contour, etc.). _attributes: dict A dictionary with further attributes describing the region, e.g., a label. # red # green The :py:class:`Location` describing this :py:class:`Region`. Mark this :py:class:`region` in a given image. Arguments --------- image: The image into which the region is to be marked. color: The color to be used for marking. # FIXME[concept]: how to proceed for images that can not (easily) # be modified in place (e.g. filename/URL) -> should we rather # return the marked image? Extract this :py:class:`Region` from a given image. Arguments --------- image: The image from the the region is to be extracted. Result ------ patch: A numpy array (`dtype=np.uint8`) containing the extracted region. Scale this region by a given factor. Arguments --------- factor: The scaling factor. This can either be a float, or a pair of floats in which case the first number is the horizontal (x) scaling factor and the second numger is the vertical (y) scaling factor. reference: The reference point. The default is `'origin'`, meaning all coordinates are scaled with respect to the origin. Another special value is `'center'`, meaning that the center of the region should be taken as reference point. | 3.316097 | 3 |
TraitsUI/examples/HDF5_tree_demo_using_h5py.py | marshallmcdonnell/interactive_plotting | 0 | 6630160 | """This demo shows how to use Traits TreeEditors with PyTables to walk the
heirarchy of an HDF5 file. This only picks out arrays and groups, but could
easily be extended to other structures, like tables.
In the demo, the path to the selected item is printed whenever the selection
changes. In order to run, a path to an existing HDF5 database must be given
at the bottom of this file.
"""
from __future__ import print_function
from traits.api import HasTraits, Str, List, Instance
from traitsui.api import TreeEditor, TreeNode, View, Item, Group
import sys
import tables as tb
import h5py
# View for objects that aren't edited
no_view = View()
# HDF5 Nodes in the tree
class Hdf5ArrayNode(HasTraits):
name = Str('<unknown>')
path = Str('<unknown>')
parent_path = Str('<unknown>')
class Hdf5GroupNode(HasTraits):
name = Str('<unknown>')
path = Str('<unknown>')
parent_path = Str('<unknown>')
# Can't have recursive traits? Really?
#groups = List( Hdf5GroupNode )
groups = List
arrays = List(Hdf5ArrayNode)
groups_and_arrays = List
class Hdf5FileNode(HasTraits):
name = Str('<unknown>')
path = Str('/')
groups = List(Hdf5GroupNode)
arrays = List(Hdf5ArrayNode)
groups_and_arrays = List
# Recurssively build tree, there is probably a better way of doing this.
def _get_sub_arrays(group, h5file):
"""Return a list of all arrays immediately below a group in an HDF5 file."""
l = []
for array in h5file.iter_nodes(group, classname='Array'):
a = Hdf5ArrayNode(
name=array._v_name,
path=array._v_pathname,
parent_path=array._v_parent._v_pathname,
)
l.append(a)
return l
def _get_sub_groups(group, h5file):
"""Return a list of all groups and arrays immediately below a group in an HDF5 file."""
l = []
for subgroup in h5file.iter_nodes(group, classname='Group'):
g = Hdf5GroupNode(
name=subgroup._v_name,
path=subgroup._v_pathname,
parent_path=subgroup._v_parent._v_pathname,
)
subarrays = _get_sub_arrays(subgroup, h5file)
if subarrays != []:
g.arrays = subarrays
subgroups = _get_sub_groups(subgroup, h5file)
if subgroups != []:
g.groups = subgroups
g.groups_and_arrays = []
g.groups_and_arrays.extend(subgroups)
g.groups_and_arrays.extend(subarrays)
l.append(g)
return l
def _new_get_sub_arrays(group, h5file):
"""Return a list of all arrays immediately below a group in an HDF5 file."""
l = []
for array in h5file.iter_nodes(group, classname='Array'):
a = Hdf5ArrayNode(
name=array._v_name,
path=array._v_pathname,
parent_path=array._v_parent._v_pathname,
)
l.append(a)
return l
def _new_get_sub_groups(group, h5file):
"""Return a list of all groups and arrays immediately below a group in an HDF5 file."""
l = []
for key, val in dict(group).iteritems():
det = group[key+'/instrument/detector/detector_positions']
print(key, det.value)
exit()
for subgroup in h5file.iter_nodes(group, classname='Group'):
g = Hdf5GroupNode(
name=subgroup._v_name,
path=subgroup._v_pathname,
parent_path=subgroup._v_parent._v_pathname,
)
subarrays = _get_sub_arrays(subgroup, h5file)
if subarrays != []:
g.arrays = subarrays
subgroups = _get_sub_groups(subgroup, h5file)
if subgroups != []:
g.groups = subgroups
g.groups_and_arrays = []
g.groups_and_arrays.extend(subgroups)
g.groups_and_arrays.extend(subarrays)
l.append(g)
return l
def _hdf5_tree(filename):
"""Return a list of all groups and arrays below the root group of an HDF5 file."""
try_pytables = False
if try_pytables:
h5file = tb.open_file(filename, 'r')
print("\nPyTables\n-------")
print(h5file.root)
file_tree = Hdf5FileNode(
name=filename,
groups=_get_sub_groups(h5file.root, h5file),
arrays=_get_sub_arrays(h5file.root, h5file),
)
file_tree.groups_and_arrays = []
file_tree.groups_and_arrays.extend(file_tree.groups)
file_tree.groups_and_arrays.extend(file_tree.arrays)
h5file.close()
# h5py attempt
try_h5py = True
if try_h5py:
h5py_file = h5py.File(filename, 'r')
print("\nh5py \n-------")
print(h5py_file.parent)
print(h5py_file.name)
new_file_tree = Hdf5FileNode(
name=filename,
groups=_new_get_sub_groups(h5py_file.parent, h5py_file),
arrays=_new_get_sub_arrays(h5py_file.parent, h5py_file)
)
return file_tree
# Get a tree editor
def _hdf5_tree_editor(selected=''):
"""Return a TreeEditor specifically for HDF5 file trees."""
return TreeEditor(
nodes=[
TreeNode(
node_for=[Hdf5FileNode],
auto_open=True,
children='groups_and_arrays',
label='name',
view=no_view,
),
TreeNode(
node_for=[Hdf5GroupNode],
auto_open=False,
children='groups_and_arrays',
label='name',
view=no_view,
),
TreeNode(
node_for=[Hdf5ArrayNode],
auto_open=False,
children='',
label='name',
view=no_view,
),
],
editable=False,
selected=selected,
)
if __name__ == '__main__':
from traits.api import Any
class ATree(HasTraits):
h5_tree = Instance(Hdf5FileNode)
node = Any
traits_view = View(
Group(
Item('h5_tree',
editor=_hdf5_tree_editor(selected='node'),
resizable=True
),
orientation='vertical',
),
title='HDF5 Tree Example',
buttons=['Undo', 'OK', 'Cancel'],
resizable=True,
width=.3,
height=.3
)
def _node_changed(self):
print(self.node.path)
if len(sys.argv) == 2:
a_tree = ATree(h5_tree=_hdf5_tree(sys.argv[1]))
a_tree.configure_traits()
else:
print("ERROR: Wrong number of arguements.")
print("Usage: python HDF5_tree_demo.py <hdf5 file>")
# a_tree.edit_traits()
| """This demo shows how to use Traits TreeEditors with PyTables to walk the
heirarchy of an HDF5 file. This only picks out arrays and groups, but could
easily be extended to other structures, like tables.
In the demo, the path to the selected item is printed whenever the selection
changes. In order to run, a path to an existing HDF5 database must be given
at the bottom of this file.
"""
from __future__ import print_function
from traits.api import HasTraits, Str, List, Instance
from traitsui.api import TreeEditor, TreeNode, View, Item, Group
import sys
import tables as tb
import h5py
# View for objects that aren't edited
no_view = View()
# HDF5 Nodes in the tree
class Hdf5ArrayNode(HasTraits):
name = Str('<unknown>')
path = Str('<unknown>')
parent_path = Str('<unknown>')
class Hdf5GroupNode(HasTraits):
name = Str('<unknown>')
path = Str('<unknown>')
parent_path = Str('<unknown>')
# Can't have recursive traits? Really?
#groups = List( Hdf5GroupNode )
groups = List
arrays = List(Hdf5ArrayNode)
groups_and_arrays = List
class Hdf5FileNode(HasTraits):
name = Str('<unknown>')
path = Str('/')
groups = List(Hdf5GroupNode)
arrays = List(Hdf5ArrayNode)
groups_and_arrays = List
# Recurssively build tree, there is probably a better way of doing this.
def _get_sub_arrays(group, h5file):
"""Return a list of all arrays immediately below a group in an HDF5 file."""
l = []
for array in h5file.iter_nodes(group, classname='Array'):
a = Hdf5ArrayNode(
name=array._v_name,
path=array._v_pathname,
parent_path=array._v_parent._v_pathname,
)
l.append(a)
return l
def _get_sub_groups(group, h5file):
"""Return a list of all groups and arrays immediately below a group in an HDF5 file."""
l = []
for subgroup in h5file.iter_nodes(group, classname='Group'):
g = Hdf5GroupNode(
name=subgroup._v_name,
path=subgroup._v_pathname,
parent_path=subgroup._v_parent._v_pathname,
)
subarrays = _get_sub_arrays(subgroup, h5file)
if subarrays != []:
g.arrays = subarrays
subgroups = _get_sub_groups(subgroup, h5file)
if subgroups != []:
g.groups = subgroups
g.groups_and_arrays = []
g.groups_and_arrays.extend(subgroups)
g.groups_and_arrays.extend(subarrays)
l.append(g)
return l
def _new_get_sub_arrays(group, h5file):
"""Return a list of all arrays immediately below a group in an HDF5 file."""
l = []
for array in h5file.iter_nodes(group, classname='Array'):
a = Hdf5ArrayNode(
name=array._v_name,
path=array._v_pathname,
parent_path=array._v_parent._v_pathname,
)
l.append(a)
return l
def _new_get_sub_groups(group, h5file):
"""Return a list of all groups and arrays immediately below a group in an HDF5 file."""
l = []
for key, val in dict(group).iteritems():
det = group[key+'/instrument/detector/detector_positions']
print(key, det.value)
exit()
for subgroup in h5file.iter_nodes(group, classname='Group'):
g = Hdf5GroupNode(
name=subgroup._v_name,
path=subgroup._v_pathname,
parent_path=subgroup._v_parent._v_pathname,
)
subarrays = _get_sub_arrays(subgroup, h5file)
if subarrays != []:
g.arrays = subarrays
subgroups = _get_sub_groups(subgroup, h5file)
if subgroups != []:
g.groups = subgroups
g.groups_and_arrays = []
g.groups_and_arrays.extend(subgroups)
g.groups_and_arrays.extend(subarrays)
l.append(g)
return l
def _hdf5_tree(filename):
"""Return a list of all groups and arrays below the root group of an HDF5 file."""
try_pytables = False
if try_pytables:
h5file = tb.open_file(filename, 'r')
print("\nPyTables\n-------")
print(h5file.root)
file_tree = Hdf5FileNode(
name=filename,
groups=_get_sub_groups(h5file.root, h5file),
arrays=_get_sub_arrays(h5file.root, h5file),
)
file_tree.groups_and_arrays = []
file_tree.groups_and_arrays.extend(file_tree.groups)
file_tree.groups_and_arrays.extend(file_tree.arrays)
h5file.close()
# h5py attempt
try_h5py = True
if try_h5py:
h5py_file = h5py.File(filename, 'r')
print("\nh5py \n-------")
print(h5py_file.parent)
print(h5py_file.name)
new_file_tree = Hdf5FileNode(
name=filename,
groups=_new_get_sub_groups(h5py_file.parent, h5py_file),
arrays=_new_get_sub_arrays(h5py_file.parent, h5py_file)
)
return file_tree
# Get a tree editor
def _hdf5_tree_editor(selected=''):
"""Return a TreeEditor specifically for HDF5 file trees."""
return TreeEditor(
nodes=[
TreeNode(
node_for=[Hdf5FileNode],
auto_open=True,
children='groups_and_arrays',
label='name',
view=no_view,
),
TreeNode(
node_for=[Hdf5GroupNode],
auto_open=False,
children='groups_and_arrays',
label='name',
view=no_view,
),
TreeNode(
node_for=[Hdf5ArrayNode],
auto_open=False,
children='',
label='name',
view=no_view,
),
],
editable=False,
selected=selected,
)
if __name__ == '__main__':
from traits.api import Any
class ATree(HasTraits):
h5_tree = Instance(Hdf5FileNode)
node = Any
traits_view = View(
Group(
Item('h5_tree',
editor=_hdf5_tree_editor(selected='node'),
resizable=True
),
orientation='vertical',
),
title='HDF5 Tree Example',
buttons=['Undo', 'OK', 'Cancel'],
resizable=True,
width=.3,
height=.3
)
def _node_changed(self):
print(self.node.path)
if len(sys.argv) == 2:
a_tree = ATree(h5_tree=_hdf5_tree(sys.argv[1]))
a_tree.configure_traits()
else:
print("ERROR: Wrong number of arguements.")
print("Usage: python HDF5_tree_demo.py <hdf5 file>")
# a_tree.edit_traits()
| en | 0.85452 | This demo shows how to use Traits TreeEditors with PyTables to walk the heirarchy of an HDF5 file. This only picks out arrays and groups, but could easily be extended to other structures, like tables. In the demo, the path to the selected item is printed whenever the selection changes. In order to run, a path to an existing HDF5 database must be given at the bottom of this file. # View for objects that aren't edited # HDF5 Nodes in the tree # Can't have recursive traits? Really? #groups = List( Hdf5GroupNode ) # Recurssively build tree, there is probably a better way of doing this. Return a list of all arrays immediately below a group in an HDF5 file. Return a list of all groups and arrays immediately below a group in an HDF5 file. Return a list of all arrays immediately below a group in an HDF5 file. Return a list of all groups and arrays immediately below a group in an HDF5 file. Return a list of all groups and arrays below the root group of an HDF5 file. # h5py attempt # Get a tree editor Return a TreeEditor specifically for HDF5 file trees. # a_tree.edit_traits() | 2.888122 | 3 |
BioSTEAM 2.x.x/biorefineries/actag/_units.py | yoelcortes/Bioindustrial-Complex | 2 | 6630161 | # -*- coding: utf-8 -*-
"""
"""
import biosteam as bst
from thermosteam import PRxn, Rxn
__all__ = ('OleinCrystallizer', 'Fermentation')
class OleinCrystallizer(bst.BatchCrystallizer):
def __init__(self, ID='', ins=None, outs=(), thermo=None, *,
T, crystal_TAG_purity=0.95, melt_AcTAG_purity=0.90,
order=None):
bst.BatchCrystallizer.__init__(self, ID, ins, outs, thermo,
tau=5, V=1e6, T=T)
self.melt_AcTAG_purity = melt_AcTAG_purity
self.crystal_TAG_purity = crystal_TAG_purity
@property
def Hnet(self):
feed = self.ins[0]
effluent = self.outs[0]
if 's' in feed.phases:
H_in = - sum([i.Hfus * j for i,j in zip(self.chemicals, feed['s'].mol) if i.Hfus])
else:
H_in = 0.
solids = effluent['s']
H_out = - sum([i.Hfus * j for i,j in zip(self.chemicals, solids.mol) if i.Hfus])
return H_out - H_in
def _run(self):
outlet = self.outs[0]
outlet.phases = ('s', 'l')
crystal_TAG_purity = self.crystal_TAG_purity
melt_AcTAG_purity = self.melt_AcTAG_purity
feed = self.ins[0]
TAG, AcTAG = feed.imass['TAG', 'AcTAG'].value
total = TAG + AcTAG
minimum_melt_purity = AcTAG / total
minimum_crystal_purity = TAG / total
outlet.empty()
if crystal_TAG_purity < minimum_crystal_purity:
outlet.imol['s'] = feed.mol
elif melt_AcTAG_purity < minimum_melt_purity:
outlet.imol['l'] = feed.mol
else: # Lever rule
crystal_AcTAG_purity = (1. - crystal_TAG_purity)
melt_fraction = (minimum_melt_purity - crystal_AcTAG_purity) / (melt_AcTAG_purity - crystal_AcTAG_purity)
melt = melt_fraction * total
AcTAG_melt = melt * melt_AcTAG_purity
TAG_melt = melt - AcTAG
outlet.imass['l', ('AcTAG', 'TAG')] = [AcTAG_melt, TAG_melt]
outlet.imol['s'] = feed.mol - outlet.imol['l']
outlet.T = self.T
class Fermentation(bst.BatchBioreactor):
line = 'Fermentation'
def __init__(self, ID='', ins=None, outs=(), thermo=None, *,
tau, N=None, V=None, T=305.15, P=101325., Nmin=2, Nmax=36):
bst.BatchBioreactor.__init__(self, ID, ins, outs, thermo,
tau=tau, N=N, V=V, T=T, P=P, Nmin=Nmin, Nmax=Nmax)
self._load_components()
chemicals = self.chemicals
self.hydrolysis_reaction = Rxn('Sucrose + Water -> 2Glucose', 'Sucrose', 1.00, chemicals)
self.fermentation_reaction = PRxn([
Rxn('Glucose -> 2.04 Water + 1.67 CO2 + 0.106 AcetylDiOlein', 'Glucose', 0.156, chemicals),
Rxn('Glucose -> 2.1 Water + 1.72 CO2 + 0.075 TriOlein', 'Glucose', 0.165, chemicals),
Rxn('Glucose -> Cells', 'Glucose', 0.10, chemicals, basis='wt').copy(basis='mol'),
])
self.CSL_to_constituents = Rxn(
'CSL -> 0.5 H2O + 0.25 LacticAcid + 0.25 Protein', 'CSL', 1.0000, chemicals, basis='wt',
)
def _run(self):
vent, effluent = self.outs
effluent.mix_from(self.ins)
self.CSL_to_constituents(effluent)
self.hydrolysis_reaction.force_reaction(effluent)
effluent.mol[effluent.mol < 0.] = 0.
self.fermentation_reaction(effluent)
vent.empty()
vent.receive_vent(effluent)
| # -*- coding: utf-8 -*-
"""
"""
import biosteam as bst
from thermosteam import PRxn, Rxn
__all__ = ('OleinCrystallizer', 'Fermentation')
class OleinCrystallizer(bst.BatchCrystallizer):
def __init__(self, ID='', ins=None, outs=(), thermo=None, *,
T, crystal_TAG_purity=0.95, melt_AcTAG_purity=0.90,
order=None):
bst.BatchCrystallizer.__init__(self, ID, ins, outs, thermo,
tau=5, V=1e6, T=T)
self.melt_AcTAG_purity = melt_AcTAG_purity
self.crystal_TAG_purity = crystal_TAG_purity
@property
def Hnet(self):
feed = self.ins[0]
effluent = self.outs[0]
if 's' in feed.phases:
H_in = - sum([i.Hfus * j for i,j in zip(self.chemicals, feed['s'].mol) if i.Hfus])
else:
H_in = 0.
solids = effluent['s']
H_out = - sum([i.Hfus * j for i,j in zip(self.chemicals, solids.mol) if i.Hfus])
return H_out - H_in
def _run(self):
outlet = self.outs[0]
outlet.phases = ('s', 'l')
crystal_TAG_purity = self.crystal_TAG_purity
melt_AcTAG_purity = self.melt_AcTAG_purity
feed = self.ins[0]
TAG, AcTAG = feed.imass['TAG', 'AcTAG'].value
total = TAG + AcTAG
minimum_melt_purity = AcTAG / total
minimum_crystal_purity = TAG / total
outlet.empty()
if crystal_TAG_purity < minimum_crystal_purity:
outlet.imol['s'] = feed.mol
elif melt_AcTAG_purity < minimum_melt_purity:
outlet.imol['l'] = feed.mol
else: # Lever rule
crystal_AcTAG_purity = (1. - crystal_TAG_purity)
melt_fraction = (minimum_melt_purity - crystal_AcTAG_purity) / (melt_AcTAG_purity - crystal_AcTAG_purity)
melt = melt_fraction * total
AcTAG_melt = melt * melt_AcTAG_purity
TAG_melt = melt - AcTAG
outlet.imass['l', ('AcTAG', 'TAG')] = [AcTAG_melt, TAG_melt]
outlet.imol['s'] = feed.mol - outlet.imol['l']
outlet.T = self.T
class Fermentation(bst.BatchBioreactor):
line = 'Fermentation'
def __init__(self, ID='', ins=None, outs=(), thermo=None, *,
tau, N=None, V=None, T=305.15, P=101325., Nmin=2, Nmax=36):
bst.BatchBioreactor.__init__(self, ID, ins, outs, thermo,
tau=tau, N=N, V=V, T=T, P=P, Nmin=Nmin, Nmax=Nmax)
self._load_components()
chemicals = self.chemicals
self.hydrolysis_reaction = Rxn('Sucrose + Water -> 2Glucose', 'Sucrose', 1.00, chemicals)
self.fermentation_reaction = PRxn([
Rxn('Glucose -> 2.04 Water + 1.67 CO2 + 0.106 AcetylDiOlein', 'Glucose', 0.156, chemicals),
Rxn('Glucose -> 2.1 Water + 1.72 CO2 + 0.075 TriOlein', 'Glucose', 0.165, chemicals),
Rxn('Glucose -> Cells', 'Glucose', 0.10, chemicals, basis='wt').copy(basis='mol'),
])
self.CSL_to_constituents = Rxn(
'CSL -> 0.5 H2O + 0.25 LacticAcid + 0.25 Protein', 'CSL', 1.0000, chemicals, basis='wt',
)
def _run(self):
vent, effluent = self.outs
effluent.mix_from(self.ins)
self.CSL_to_constituents(effluent)
self.hydrolysis_reaction.force_reaction(effluent)
effluent.mol[effluent.mol < 0.] = 0.
self.fermentation_reaction(effluent)
vent.empty()
vent.receive_vent(effluent)
| en | 0.835336 | # -*- coding: utf-8 -*- # Lever rule | 2.159739 | 2 |
server.py | maneeshd/expensifier | 2 | 6630162 | <filename>server.py
"""
Author: <NAME> <<EMAIL>>
Server for expensifier using Flask + Flask-Compress middleware
"""
from os import urandom
from os.path import join, realpath, dirname
from flask import Flask, send_file
from flask_compress import Compress
# Directories for the app
CUR_DIR = realpath(dirname(__file__))
HOME = join(CUR_DIR, "dist")
# App creation & configuration
app = Flask(__name__, static_folder=HOME, static_url_path="")
app.config["SECRET_KEY"] = urandom(16).hex()
app.config["COMPRESS_MIMETYPES"] = [
"text/html",
"text/css",
"text/xml",
"application/json",
"application/javascript",
"image/jpg",
"image/png",
"image/x-icon",
]
Compress(app)
@app.route("/")
@app.route("/contact")
def index():
return send_file(join(HOME, "index.html"), mimetype="text/html")
if __name__ == "__main__":
# Development Server, Run using Gunicorn in production
app.run("127.0.0.1", 8080, debug=True)
| <filename>server.py
"""
Author: <NAME> <<EMAIL>>
Server for expensifier using Flask + Flask-Compress middleware
"""
from os import urandom
from os.path import join, realpath, dirname
from flask import Flask, send_file
from flask_compress import Compress
# Directories for the app
CUR_DIR = realpath(dirname(__file__))
HOME = join(CUR_DIR, "dist")
# App creation & configuration
app = Flask(__name__, static_folder=HOME, static_url_path="")
app.config["SECRET_KEY"] = urandom(16).hex()
app.config["COMPRESS_MIMETYPES"] = [
"text/html",
"text/css",
"text/xml",
"application/json",
"application/javascript",
"image/jpg",
"image/png",
"image/x-icon",
]
Compress(app)
@app.route("/")
@app.route("/contact")
def index():
return send_file(join(HOME, "index.html"), mimetype="text/html")
if __name__ == "__main__":
# Development Server, Run using Gunicorn in production
app.run("127.0.0.1", 8080, debug=True)
| en | 0.644251 | Author: <NAME> <<EMAIL>> Server for expensifier using Flask + Flask-Compress middleware # Directories for the app # App creation & configuration # Development Server, Run using Gunicorn in production | 2.509157 | 3 |
typewriter.py | john-pettigrew/noisy_typewriter | 0 | 6630163 | #!/usr/bin/env python3
import time
from Xlib.display import Display
import os
from subprocess import Popen
def play_sound(file):
Popen(['mplayer', file])
disp = Display()
empty_keys = [0] * 32
return_key = ([0] * 4) + [16] + ([0] * 27)
last_keys = [0] * 32
while 1:
keys = disp.query_keymap()
if keys != last_keys and keys != empty_keys:
if keys == return_key:
play_sound('return_sound.wav')
else:
play_sound('key_sound.wav')
last_keys = keys
time.sleep(0.05)
| #!/usr/bin/env python3
import time
from Xlib.display import Display
import os
from subprocess import Popen
def play_sound(file):
Popen(['mplayer', file])
disp = Display()
empty_keys = [0] * 32
return_key = ([0] * 4) + [16] + ([0] * 27)
last_keys = [0] * 32
while 1:
keys = disp.query_keymap()
if keys != last_keys and keys != empty_keys:
if keys == return_key:
play_sound('return_sound.wav')
else:
play_sound('key_sound.wav')
last_keys = keys
time.sleep(0.05)
| fr | 0.221828 | #!/usr/bin/env python3 | 2.755546 | 3 |
parslr/__main__.py | maximmenshikov/parslr | 0 | 6630164 | import sys
import os
from parslr.Parslr import Parslr
from parslr.parslr_args import prepare_parser
args = prepare_parser().parse_args()
p = Parslr(args.antlr, args.tmp_path)
ret_val = p.generate_parser(args.grammar)
if ret_val != 0:
print("Failed to generate parser")
sys.exit(ret_val)
ret_val = p.compile()
if ret_val != 0:
print("Failed to compile parser")
sys.exit(ret_val)
grammar = os.path.splitext(os.path.basename(args.grammar))[0]
if os.path.isdir(args.input):
# A directory with cases
sys.exit(p.run_test_rig_on_dir(
grammar, args.rule, args.input, args.output))
else:
# Just one file
sys.exit(len(p.run_test_rig(grammar, args.rule, args.input)))
| import sys
import os
from parslr.Parslr import Parslr
from parslr.parslr_args import prepare_parser
args = prepare_parser().parse_args()
p = Parslr(args.antlr, args.tmp_path)
ret_val = p.generate_parser(args.grammar)
if ret_val != 0:
print("Failed to generate parser")
sys.exit(ret_val)
ret_val = p.compile()
if ret_val != 0:
print("Failed to compile parser")
sys.exit(ret_val)
grammar = os.path.splitext(os.path.basename(args.grammar))[0]
if os.path.isdir(args.input):
# A directory with cases
sys.exit(p.run_test_rig_on_dir(
grammar, args.rule, args.input, args.output))
else:
# Just one file
sys.exit(len(p.run_test_rig(grammar, args.rule, args.input)))
| en | 0.863063 | # A directory with cases # Just one file | 2.523848 | 3 |
rioxarray/rioxarray.py | TomAugspurger/rioxarray | 0 | 6630165 | # -- coding: utf-8 --
"""
This module is an extension for xarray to provide rasterio capabilities
to xarray datasets/dataarrays.
Credits: The `reproject` functionality was adopted from https://github.com/opendatacube/datacube-core # noqa
Source file:
- https://github.com/opendatacube/datacube-core/blob/084c84d78cb6e1326c7fbbe79c5b5d0bef37c078/datacube/api/geo_xarray.py # noqa
datacube is licensed under the Apache License, Version 2.0:
- https://github.com/opendatacube/datacube-core/blob/1d345f08a10a13c316f81100936b0ad8b1a374eb/LICENSE # noqa
"""
import copy
import math
import warnings
from typing import Iterable
from uuid import uuid4
import numpy as np
import pyproj
import rasterio.warp
import rasterio.windows
import xarray
from affine import Affine
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio.features import geometry_mask
from scipy.interpolate import griddata
from rioxarray.crs import crs_to_wkt
from rioxarray.exceptions import (
DimensionError,
DimensionMissingCoordinateError,
InvalidDimensionOrder,
MissingCRS,
NoDataInBounds,
OneDimensionalRaster,
RioXarrayError,
TooManyDimensions,
)
FILL_VALUE_NAMES = ("_FillValue", "missing_value", "fill_value", "nodata")
UNWANTED_RIO_ATTRS = ("nodatavals", "crs", "is_tiled", "res")
DEFAULT_GRID_MAP = "spatial_ref"
def affine_to_coords(affine, width, height, x_dim="x", y_dim="y"):
"""Generate 1d pixel centered coordinates from affine.
Based on code from the xarray rasterio backend.
Parameters
----------
affine: :obj:`affine.Affine`
The affine of the grid.
width: int
The width of the grid.
height: int
The height of the grid.
x_dim: str, optional
The name of the X dimension. Default is 'x'.
y_dim: str, optional
The name of the Y dimension. Default is 'y'.
Returns
-------
dict: x and y coordinate arrays.
"""
x_coords, _ = affine * (np.arange(width) + 0.5, np.zeros(width) + 0.5)
_, y_coords = affine * (np.zeros(height) + 0.5, np.arange(height) + 0.5)
return {y_dim: y_coords, x_dim: x_coords}
def _generate_attrs(src_data_array, dst_nodata):
# add original attributes
new_attrs = copy.deepcopy(src_data_array.attrs)
# remove all nodata information
for unwanted_attr in FILL_VALUE_NAMES + UNWANTED_RIO_ATTRS:
new_attrs.pop(unwanted_attr, None)
# add nodata information
fill_value = (
src_data_array.rio.nodata
if src_data_array.rio.nodata is not None
else dst_nodata
)
if src_data_array.rio.encoded_nodata is None and fill_value is not None:
new_attrs["_FillValue"] = fill_value
# add raster spatial information
new_attrs["grid_mapping"] = src_data_array.rio.grid_mapping
return new_attrs
def add_xy_grid_meta(coords, crs=None):
raise RuntimeError(
"add_xy_grid_meta has been removed. Use rio.write_coordinate_system instead.",
)
def add_spatial_ref(in_ds, dst_crs, grid_mapping_name):
raise RuntimeError("add_spatial_ref has been removed. Use rio.write_crs instead.")
def _add_attrs_proj(new_data_array, src_data_array):
"""Make sure attributes and projection correct"""
# make sure dimension information is preserved
if new_data_array.rio._x_dim is None:
new_data_array.rio._x_dim = src_data_array.rio.x_dim
if new_data_array.rio._y_dim is None:
new_data_array.rio._y_dim = src_data_array.rio.y_dim
# make sure attributes preserved
new_attrs = _generate_attrs(src_data_array, None)
# remove fill value if it already exists in the encoding
# this is for data arrays pulling the encoding from a
# source data array instead of being generated anew.
if "_FillValue" in new_data_array.encoding:
new_attrs.pop("_FillValue", None)
new_data_array.rio.set_attrs(new_attrs, inplace=True)
# make sure projection added
new_data_array.rio.write_crs(src_data_array.rio.crs, inplace=True)
new_data_array.rio.write_coordinate_system(inplace=True)
new_data_array.rio.write_transform(inplace=True)
# make sure encoding added
new_data_array.encoding = src_data_array.encoding.copy()
return new_data_array
def _warp_spatial_coords(data_array, affine, width, height):
"""get spatial coords in new transform"""
new_spatial_coords = affine_to_coords(affine, width, height)
return {
"x": xarray.IndexVariable("x", new_spatial_coords["x"]),
"y": xarray.IndexVariable("y", new_spatial_coords["y"]),
}
def _get_nonspatial_coords(src_data_array):
coords = {}
for coord in set(src_data_array.coords) - {
src_data_array.rio.x_dim,
src_data_array.rio.y_dim,
DEFAULT_GRID_MAP,
}:
if src_data_array[coord].dims:
coords[coord] = xarray.IndexVariable(
src_data_array[coord].dims,
src_data_array[coord].values,
src_data_array[coord].attrs,
)
else:
coords[coord] = xarray.Variable(
src_data_array[coord].dims,
src_data_array[coord].values,
src_data_array[coord].attrs,
)
return coords
def _make_coords(src_data_array, dst_affine, dst_width, dst_height):
"""Generate the coordinates of the new projected `xarray.DataArray`"""
coords = _get_nonspatial_coords(src_data_array)
new_coords = _warp_spatial_coords(src_data_array, dst_affine, dst_width, dst_height)
new_coords.update(coords)
return new_coords
def _make_dst_affine(
src_data_array, src_crs, dst_crs, dst_resolution=None, dst_shape=None
):
"""Determine the affine of the new projected `xarray.DataArray`"""
src_bounds = src_data_array.rio.bounds()
src_height, src_width = src_data_array.rio.shape
dst_height, dst_width = dst_shape if dst_shape is not None else (None, None)
if isinstance(dst_resolution, Iterable):
dst_resolution = tuple(abs(res_val) for res_val in dst_resolution)
elif dst_resolution is not None:
dst_resolution = abs(dst_resolution)
resolution_or_width_height = {
k: v
for k, v in [
("resolution", dst_resolution),
("dst_height", dst_height),
("dst_width", dst_width),
]
if v is not None
}
dst_affine, dst_width, dst_height = rasterio.warp.calculate_default_transform(
src_crs,
dst_crs,
src_width,
src_height,
*src_bounds,
**resolution_or_width_height,
)
return dst_affine, dst_width, dst_height
def _write_metatata_to_raster(raster_handle, xarray_dataset, tags):
"""
Write the metadata stored in the xarray object to raster metadata
"""
tags = xarray_dataset.attrs if tags is None else {**xarray_dataset.attrs, **tags}
# write scales and offsets
try:
raster_handle.scales = tags["scales"]
except KeyError:
try:
raster_handle.scales = (tags["scale_factor"],) * raster_handle.count
except KeyError:
pass
try:
raster_handle.offsets = tags["offsets"]
except KeyError:
try:
raster_handle.offsets = (tags["add_offset"],) * raster_handle.count
except KeyError:
pass
# filter out attributes that should be written in a different location
skip_tags = (
UNWANTED_RIO_ATTRS
+ FILL_VALUE_NAMES
+ ("transform", "scales", "scale_factor", "add_offset", "offsets")
)
# this is for when multiple values are used
# in this case, it will be stored in the raster description
if not isinstance(tags.get("long_name"), str):
skip_tags += ("long_name",)
tags = {key: value for key, value in tags.items() if key not in skip_tags}
raster_handle.update_tags(**tags)
# write band name information
long_name = xarray_dataset.attrs.get("long_name")
if isinstance(long_name, (tuple, list)):
if len(long_name) != raster_handle.count:
raise RioXarrayError(
"Number of names in the 'long_name' attribute does not equal "
"the number of bands."
)
for iii, band_description in enumerate(long_name):
raster_handle.set_band_description(iii + 1, band_description)
else:
band_description = long_name or xarray_dataset.name
if band_description:
for iii in range(raster_handle.count):
raster_handle.set_band_description(iii + 1, band_description)
def _get_data_var_message(obj):
"""
Get message for named data variables.
"""
try:
return f" Data variable: {obj.name}" if obj.name else ""
except AttributeError:
return ""
def _ensure_nodata_dtype(original_nodata, new_dtype):
"""
Convert the nodata to the new datatype and raise warning
if the value of the nodata value changed.
"""
original_nodata = float(original_nodata)
nodata = np.dtype(new_dtype).type(original_nodata)
if not np.isnan(nodata) and original_nodata != nodata:
warnings.warn(
f"The nodata value ({original_nodata}) has been automatically "
f"changed to ({nodata}) to match the dtype of the data."
)
return nodata
class XRasterBase(object):
"""This is the base class for the GIS extensions for xarray"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._x_dim = None
self._y_dim = None
# Determine the spatial dimensions of the `xarray.DataArray`
if "x" in self._obj.dims and "y" in self._obj.dims:
self._x_dim = "x"
self._y_dim = "y"
elif "longitude" in self._obj.dims and "latitude" in self._obj.dims:
self._x_dim = "longitude"
self._y_dim = "latitude"
else:
# look for coordinates with CF attributes
for coord in self._obj.coords:
# make sure to only look in 1D coordinates
# that has the same dimension name as the coordinate
if self._obj.coords[coord].dims != (coord,):
continue
elif (self._obj.coords[coord].attrs.get("axis", "").upper() == "X") or (
self._obj.coords[coord].attrs.get("standard_name", "").lower()
in ("longitude", "projection_x_coordinate")
):
self._x_dim = coord
elif (self._obj.coords[coord].attrs.get("axis", "").upper() == "Y") or (
self._obj.coords[coord].attrs.get("standard_name", "").lower()
in ("latitude", "projection_y_coordinate")
):
self._y_dim = coord
# properties
self._count = None
self._height = None
self._width = None
self._crs = None
@property
def crs(self):
""":obj:`rasterio.crs.CRS`:
Retrieve projection from :obj:`xarray.Dataset` | :obj:`xarray.DataArray`
"""
if self._crs is not None:
return None if self._crs is False else self._crs
# look in grid_mapping
try:
self.set_crs(
pyproj.CRS.from_cf(self._obj.coords[self.grid_mapping].attrs),
inplace=True,
)
except (KeyError, pyproj.exceptions.CRSError):
try:
# look in attrs for 'crs'
self.set_crs(self._obj.attrs["crs"], inplace=True)
except KeyError:
self._crs = False
return None
return self._crs
def _get_obj(self, inplace):
"""
Get the object to modify.
Parameters
----------
inplace: bool
If True, returns self.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`
"""
if inplace:
return self._obj
obj_copy = self._obj.copy(deep=True)
# preserve attribute information
obj_copy.rio._x_dim = self._x_dim
obj_copy.rio._y_dim = self._y_dim
obj_copy.rio._width = self._width
obj_copy.rio._height = self._height
obj_copy.rio._crs = self._crs
return obj_copy
def set_crs(self, input_crs, inplace=True):
"""
Set the CRS value for the Dataset/DataArray without modifying
the dataset/data array.
Parameters
----------
input_crs: object
Anything accepted by `rasterio.crs.CRS.from_user_input`.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Dataset with crs attribute.
"""
crs = CRS.from_wkt(crs_to_wkt(input_crs))
obj = self._get_obj(inplace=inplace)
obj.rio._crs = crs
return obj
@property
def grid_mapping(self):
"""
str: The CF grid_mapping attribute. 'spatial_ref' is the default.
"""
try:
return self._obj.attrs["grid_mapping"]
except KeyError:
pass
grid_mapping = DEFAULT_GRID_MAP
# search the dataset for the grid mapping name
if hasattr(self._obj, "data_vars"):
grid_mappings = set()
for var in self._obj.data_vars:
try:
self._obj[var].rio.x_dim
self._obj[var].rio.y_dim
except DimensionError:
continue
try:
grid_mapping = self._obj[var].attrs["grid_mapping"]
grid_mappings.add(grid_mapping)
except KeyError:
pass
if len(grid_mappings) > 1:
raise RioXarrayError("Multiple grid mappings exist.")
return grid_mapping
def write_grid_mapping(self, grid_mapping_name=DEFAULT_GRID_MAP, inplace=False):
"""
Write the CF grid_mapping attribute.
Parameters
----------
grid_mapping_name: str, optional
Name of the grid_mapping coordinate.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with CF compliant CRS information.
"""
data_obj = self._get_obj(inplace=inplace)
if hasattr(data_obj, "data_vars"):
for var in data_obj.data_vars:
try:
x_dim = data_obj[var].rio.x_dim
y_dim = data_obj[var].rio.y_dim
except DimensionError:
continue
data_obj[var].rio.update_attrs(
dict(grid_mapping=grid_mapping_name), inplace=True
).rio.set_spatial_dims(x_dim=x_dim, y_dim=y_dim, inplace=True)
return data_obj.rio.update_attrs(
dict(grid_mapping=grid_mapping_name), inplace=True
)
def write_crs(self, input_crs=None, grid_mapping_name=None, inplace=False):
"""
Write the CRS to the dataset in a CF compliant manner.
Parameters
----------
input_crs: object
Anything accepted by `rasterio.crs.CRS.from_user_input`.
grid_mapping_name: str, optional
Name of the grid_mapping coordinate to store the CRS information in.
Default is the grid_mapping name of the dataset.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with CF compliant CRS information.
"""
if input_crs is not None:
data_obj = self.set_crs(input_crs, inplace=inplace)
else:
data_obj = self._get_obj(inplace=inplace)
# get original transform
transform = self._cached_transform()
# remove old grid maping coordinate if exists
grid_mapping_name = (
self.grid_mapping if grid_mapping_name is None else grid_mapping_name
)
try:
del data_obj.coords[grid_mapping_name]
except KeyError:
pass
if data_obj.rio.crs is None:
raise MissingCRS(
"CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'."
)
# add grid mapping coordinate
data_obj.coords[grid_mapping_name] = xarray.Variable((), 0)
grid_map_attrs = pyproj.CRS.from_user_input(data_obj.rio.crs).to_cf()
# spatial_ref is for compatibility with GDAL
crs_wkt = crs_to_wkt(data_obj.rio.crs)
grid_map_attrs["spatial_ref"] = crs_wkt
grid_map_attrs["crs_wkt"] = crs_wkt
if transform is not None:
grid_map_attrs["GeoTransform"] = " ".join(
[str(item) for item in transform.to_gdal()]
)
data_obj.coords[grid_mapping_name].rio.set_attrs(grid_map_attrs, inplace=True)
return data_obj.rio.write_grid_mapping(
grid_mapping_name=grid_mapping_name, inplace=True
)
def estimate_utm_crs(self, datum_name="WGS 84"):
"""Returns the estimated UTM CRS based on the bounds of the dataset.
.. versionadded:: 0.2
.. note:: Requires pyproj 3+
Parameters
----------
datum_name : str, optional
The name of the datum to use in the query. Default is WGS 84.
Returns
-------
rasterio.crs.CRS
"""
try:
from pyproj.aoi import AreaOfInterest
from pyproj.database import query_utm_crs_info
except ImportError:
raise RuntimeError("pyproj 3+ required for estimate_utm_crs.")
if self.crs is None:
raise RuntimeError("crs must be set to estimate UTM CRS.")
# ensure using geographic coordinates
if self.crs.is_geographic:
minx, miny, maxx, maxy = self.bounds(recalc=True)
else:
minx, miny, maxx, maxy = self.transform_bounds("EPSG:4326", recalc=True)
x_center = np.mean([minx, maxx])
y_center = np.mean([miny, maxy])
utm_crs_list = query_utm_crs_info(
datum_name=datum_name,
area_of_interest=AreaOfInterest(
west_lon_degree=x_center,
south_lat_degree=y_center,
east_lon_degree=x_center,
north_lat_degree=y_center,
),
)
try:
return CRS.from_epsg(utm_crs_list[0].code)
except IndexError:
raise RuntimeError("Unable to determine UTM CRS")
def _cached_transform(self):
"""
Get the transform from:
1. The GeoTransform metatada property in the grid mapping
2. The transform attribute.
"""
try:
# look in grid_mapping
return Affine.from_gdal(
*np.fromstring(
self._obj.coords[self.grid_mapping].attrs["GeoTransform"], sep=" "
)
)
except KeyError:
try:
return Affine(*self._obj.attrs["transform"][:6])
except KeyError:
pass
return None
def write_transform(self, transform=None, grid_mapping_name=None, inplace=False):
"""
.. versionadded:: 0.0.30
Write the GeoTransform to the dataset where GDAL can read it in.
https://gdal.org/drivers/raster/netcdf.html#georeference
Parameters
----------
transform: affine.Affine, optional
The transform of the dataset. If not provided, it will be calculated.
grid_mapping_name: str, optional
Name of the grid_mapping coordinate to store the transform information in.
Default is the grid_mapping name of the dataset.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with Geo Transform written.
"""
transform = transform or self.transform(recalc=True)
data_obj = self._get_obj(inplace=inplace)
# delete the old attribute to prevent confusion
data_obj.attrs.pop("transform", None)
grid_mapping_name = (
self.grid_mapping if grid_mapping_name is None else grid_mapping_name
)
try:
grid_map_attrs = data_obj.coords[grid_mapping_name].attrs.copy()
except KeyError:
data_obj.coords[grid_mapping_name] = xarray.Variable((), 0)
grid_map_attrs = data_obj.coords[grid_mapping_name].attrs.copy()
grid_map_attrs["GeoTransform"] = " ".join(
[str(item) for item in transform.to_gdal()]
)
data_obj.coords[grid_mapping_name].rio.set_attrs(grid_map_attrs, inplace=True)
return data_obj.rio.write_grid_mapping(
grid_mapping_name=grid_mapping_name, inplace=True
)
def transform(self, recalc=False):
"""
Parameters
----------
recalc: bool, optional
If True, it will re-calculate the transform instead of using
the cached transform.
Returns
-------
:obj:`affine.Afffine`:
The affine of the :obj:`xarray.Dataset` | :obj:`xarray.DataArray`
"""
try:
src_left, _, _, src_top = self.bounds(recalc=recalc)
src_resolution_x, src_resolution_y = self.resolution(recalc=recalc)
except (DimensionMissingCoordinateError, DimensionError):
return Affine.identity()
return Affine.translation(src_left, src_top) * Affine.scale(
src_resolution_x, src_resolution_y
)
def write_coordinate_system(self, inplace=False):
"""
Write the coordinate system CF metadata.
.. versionadded:: 0.0.30
Parameters
----------
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
The dataset with the CF coordinate system attributes added.
"""
data_obj = self._get_obj(inplace=inplace)
# add metadata to x,y coordinates
is_projected = data_obj.rio.crs and data_obj.rio.crs.is_projected
is_geographic = data_obj.rio.crs and data_obj.rio.crs.is_geographic
x_coord_attrs = dict(data_obj.coords[self.x_dim].attrs)
x_coord_attrs["axis"] = "X"
y_coord_attrs = dict(data_obj.coords[self.y_dim].attrs)
y_coord_attrs["axis"] = "Y"
if is_projected:
units = None
if hasattr(data_obj.rio.crs, "linear_units_factor"):
unit_factor = data_obj.rio.crs.linear_units_factor[-1]
if unit_factor != 1:
units = f"{unit_factor} metre"
else:
units = "metre"
# X metadata
x_coord_attrs["long_name"] = "x coordinate of projection"
x_coord_attrs["standard_name"] = "projection_x_coordinate"
if units:
x_coord_attrs["units"] = units
# Y metadata
y_coord_attrs["long_name"] = "y coordinate of projection"
y_coord_attrs["standard_name"] = "projection_y_coordinate"
if units:
y_coord_attrs["units"] = units
elif is_geographic:
# X metadata
x_coord_attrs["long_name"] = "longitude"
x_coord_attrs["standard_name"] = "longitude"
x_coord_attrs["units"] = "degrees_east"
# Y metadata
y_coord_attrs["long_name"] = "latitude"
y_coord_attrs["standard_name"] = "latitude"
y_coord_attrs["units"] = "degrees_north"
data_obj.coords[self.y_dim].attrs = y_coord_attrs
data_obj.coords[self.x_dim].attrs = x_coord_attrs
return data_obj
def set_attrs(self, new_attrs, inplace=False):
"""
Set the attributes of the dataset/dataarray and reset
rioxarray properties to re-search for them.
Parameters
----------
new_attrs: dict
A dictionary of new attributes.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with new attributes.
"""
data_obj = self._get_obj(inplace=inplace)
# set the attributes
data_obj.attrs = new_attrs
# reset rioxarray properties depending
# on attributes to be generated
data_obj.rio._nodata = None
data_obj.rio._crs = None
return data_obj
def update_attrs(self, new_attrs, inplace=False):
"""
Update the attributes of the dataset/dataarray and reset
rioxarray properties to re-search for them.
Parameters
----------
new_attrs: dict
A dictionary of new attributes to update with.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with updated attributes.
"""
data_attrs = dict(self._obj.attrs)
data_attrs.update(**new_attrs)
return self.set_attrs(data_attrs, inplace=inplace)
def set_spatial_dims(self, x_dim, y_dim, inplace=True):
"""
This sets the spatial dimensions of the dataset.
Parameters
----------
x_dim: str
The name of the x dimension.
y_dim: str
The name of the y dimension.
inplace: bool, optional
If True, it will modify the dataframe in place.
Otherwise it will return a modified copy.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Dataset with spatial dimensions set.
"""
def set_dims(obj, in_x_dim, in_y_dim):
if in_x_dim in obj.dims:
obj.rio._x_dim = x_dim
else:
raise DimensionError(
f"x dimension ({x_dim}) not found.{_get_data_var_message(obj)}"
)
if y_dim in obj.dims:
obj.rio._y_dim = y_dim
else:
raise DimensionError(
f"y dimension ({x_dim}) not found.{_get_data_var_message(obj)}"
)
data_obj = self._get_obj(inplace=inplace)
set_dims(data_obj, x_dim, y_dim)
return data_obj
@property
def x_dim(self):
"""str: The dimension for the X-axis."""
if self._x_dim is not None:
return self._x_dim
raise DimensionError(
"x dimension not found. 'set_spatial_dims()' can address this."
f"{_get_data_var_message(self._obj)}"
)
@property
def y_dim(self):
"""str: The dimension for the Y-axis."""
if self._y_dim is not None:
return self._y_dim
raise DimensionError(
"x dimension not found. 'set_spatial_dims()' can address this."
f"{_get_data_var_message(self._obj)}"
)
@property
def width(self):
"""int: Returns the width of the dataset (x dimension size)"""
if self._width is not None:
return self._width
self._width = self._obj[self.x_dim].size
return self._width
@property
def height(self):
"""int: Returns the height of the dataset (y dimension size)"""
if self._height is not None:
return self._height
self._height = self._obj[self.y_dim].size
return self._height
@property
def shape(self):
"""tuple(int, int): Returns the shape (height, width)"""
return (self.height, self.width)
def _check_dimensions(self):
"""
This function validates that the dimensions 2D/3D and
they are are in the proper order.
Returns
-------
str or None: Name extra dimension.
"""
extra_dims = list(set(list(self._obj.dims)) - set([self.x_dim, self.y_dim]))
if len(extra_dims) > 1:
raise TooManyDimensions(
"Only 2D and 3D data arrays supported."
f"{_get_data_var_message(self._obj)}"
)
elif extra_dims and self._obj.dims != (extra_dims[0], self.y_dim, self.x_dim):
raise InvalidDimensionOrder(
"Invalid dimension order. Expected order: {0}. "
"You can use `DataArray.transpose{0}`"
" to reorder your dimensions.".format(
(extra_dims[0], self.y_dim, self.x_dim)
)
+ f"{_get_data_var_message(self._obj)}"
)
elif not extra_dims and self._obj.dims != (self.y_dim, self.x_dim):
raise InvalidDimensionOrder(
"Invalid dimension order. Expected order: {0}"
"You can use `DataArray.transpose{0}` "
"to reorder your dimensions.".format((self.y_dim, self.x_dim))
+ f"{_get_data_var_message(self._obj)}"
)
return extra_dims[0] if extra_dims else None
@property
def count(self):
"""int: Returns the band count (z dimension size)"""
if self._count is not None:
return self._count
extra_dim = self._check_dimensions()
self._count = 1
if extra_dim is not None:
self._count = self._obj[extra_dim].size
return self._count
def _internal_bounds(self):
"""Determine the internal bounds of the `xarray.DataArray`"""
if self.x_dim not in self._obj.coords:
raise DimensionMissingCoordinateError(f"{self.x_dim} missing coordinates.")
elif self.y_dim not in self._obj.coords:
raise DimensionMissingCoordinateError(f"{self.y_dim} missing coordinates.")
try:
left = float(self._obj[self.x_dim][0])
right = float(self._obj[self.x_dim][-1])
top = float(self._obj[self.y_dim][0])
bottom = float(self._obj[self.y_dim][-1])
except IndexError:
raise NoDataInBounds(
"Unable to determine bounds from coordinates."
f"{_get_data_var_message(self._obj)}"
)
return left, bottom, right, top
def resolution(self, recalc=False):
"""
Parameters
----------
recalc: bool, optional
Will force the resolution to be recalculated instead of using the
transform attribute.
Returns
-------
x_resolution, y_resolution: float
The resolution of the `xarray.DataArray` | `xarray.Dataset`
"""
transform = self._cached_transform()
if (
not recalc or self.width == 1 or self.height == 1
) and transform is not None:
resolution_x = transform.a
resolution_y = transform.e
return resolution_x, resolution_y
# if the coordinates of the spatial dimensions are missing
# use the cached transform resolution
try:
left, bottom, right, top = self._internal_bounds()
except DimensionMissingCoordinateError:
if transform is None:
raise
resolution_x = transform.a
resolution_y = transform.e
return resolution_x, resolution_y
if self.width == 1 or self.height == 1:
raise OneDimensionalRaster(
"Only 1 dimenional array found. Cannot calculate the resolution."
f"{_get_data_var_message(self._obj)}"
)
resolution_x = (right - left) / (self.width - 1)
resolution_y = (bottom - top) / (self.height - 1)
return resolution_x, resolution_y
def bounds(self, recalc=False):
"""
Parameters
----------
recalc: bool, optional
Will force the bounds to be recalculated instead of using the
transform attribute.
Returns
-------
left, bottom, right, top: float
Outermost coordinates of the `xarray.DataArray` | `xarray.Dataset`.
"""
resolution_x, resolution_y = self.resolution(recalc=recalc)
try:
# attempt to get bounds from xarray coordinate values
left, bottom, right, top = self._internal_bounds()
left -= resolution_x / 2.0
right += resolution_x / 2.0
top -= resolution_y / 2.0
bottom += resolution_y / 2.0
except DimensionMissingCoordinateError:
transform = self._cached_transform()
left = transform.c
top = transform.f
right = left + resolution_x * self.width
bottom = top + resolution_y * self.height
return left, bottom, right, top
def isel_window(self, window):
"""
Use a rasterio.window.Window to select a subset of the data.
.. warning:: Float indices are converted to integers.
Parameters
----------
window: :class:`rasterio.window.Window`
The window of the dataset to read.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
The data in the window.
"""
(row_start, row_stop), (col_start, col_stop) = window.toranges()
row_start = math.ceil(row_start) if row_start < 0 else math.floor(row_start)
row_stop = math.floor(row_stop) if row_stop < 0 else math.ceil(row_stop)
col_start = math.ceil(col_start) if col_start < 0 else math.floor(col_start)
col_stop = math.floor(col_stop) if col_stop < 0 else math.ceil(col_stop)
row_slice = slice(int(row_start), int(row_stop))
col_slice = slice(int(col_start), int(col_stop))
return (
self._obj.isel({self.y_dim: row_slice, self.x_dim: col_slice})
.copy() # this is to prevent sharing coordinates with the original dataset
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.write_transform(
transform=rasterio.windows.transform(
rasterio.windows.Window.from_slices(
rows=row_slice,
cols=col_slice,
width=self.width,
height=self.height,
),
self.transform(recalc=True),
),
inplace=True,
)
)
def slice_xy(self, minx, miny, maxx, maxy):
"""Slice the array by x,y bounds.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
The data in the slice.
"""
left, bottom, right, top = self._internal_bounds()
if top > bottom:
y_slice = slice(maxy, miny)
else:
y_slice = slice(miny, maxy)
if left > right:
x_slice = slice(maxx, minx)
else:
x_slice = slice(minx, maxx)
subset = (
self._obj.sel({self.x_dim: x_slice, self.y_dim: y_slice})
.copy() # this is to prevent sharing coordinates with the original dataset
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.write_transform(inplace=True)
)
return subset
def transform_bounds(self, dst_crs, densify_pts=21, recalc=False):
"""Transform bounds from src_crs to dst_crs.
Optionally densifying the edges (to account for nonlinear transformations
along these edges) and extracting the outermost bounds.
Note: this does not account for the antimeridian.
Parameters
----------
dst_crs: str, :obj:`rasterio.crs.CRS`, or dict
Target coordinate reference system.
densify_pts: uint, optional
Number of points to add to each edge to account for nonlinear
edges produced by the transform process. Large numbers will produce
worse performance. Default: 21 (gdal default).
recalc: bool, optional
Will force the bounds to be recalculated instead of using the transform
attribute.
Returns
-------
left, bottom, right, top: float
Outermost coordinates in target coordinate reference system.
"""
return rasterio.warp.transform_bounds(
self.crs, dst_crs, *self.bounds(recalc=recalc), densify_pts=densify_pts
)
@xarray.register_dataarray_accessor("rio")
class RasterArray(XRasterBase):
"""This is the GIS extension for :obj:`xarray.DataArray`"""
def __init__(self, xarray_obj):
super(RasterArray, self).__init__(xarray_obj)
# properties
self._nodata = None
def set_nodata(self, input_nodata, inplace=True):
"""
Set the nodata value for the DataArray without modifying
the data array.
Parameters
----------
input_nodata: object
Valid nodata for dtype.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.DataArray`:
Dataset with nodata attribute set.
"""
obj = self._get_obj(inplace=inplace)
obj.rio._nodata = input_nodata
return obj
def write_nodata(self, input_nodata, inplace=False):
"""
Write the nodata to the DataArray in a CF compliant manner.
Parameters
----------
input_nodata: object
Nodata value for the DataArray.
If input_nodata is None, it will remove the _FillValue attribute.
inplace: bool, optional
If True, it will write to the existing DataArray. Default is False.
Returns
-------
:obj:`xarray.DataArray`:
Modified DataArray with CF compliant nodata information.
"""
data_obj = self._get_obj(inplace=inplace)
input_nodata = False if input_nodata is None else input_nodata
if input_nodata is not False:
input_nodata = _ensure_nodata_dtype(input_nodata, self._obj.dtype)
data_obj.rio.update_attrs(dict(_FillValue=input_nodata), inplace=True)
else:
new_vars = dict(data_obj.attrs)
new_vars.pop("_FillValue", None)
data_obj.rio.set_attrs(new_vars, inplace=True)
data_obj.rio.set_nodata(input_nodata, inplace=True)
return data_obj
@property
def encoded_nodata(self):
"""Return the encoded nodata value for the dataset if encoded."""
encoded_nodata = self._obj.encoding.get("_FillValue")
if encoded_nodata is None:
return None
return _ensure_nodata_dtype(encoded_nodata, self._obj.dtype)
@property
def nodata(self):
"""Get the nodata value for the dataset."""
if self._nodata is not None:
return None if self._nodata is False else self._nodata
if self.encoded_nodata is not None:
self._nodata = np.nan
else:
self._nodata = self._obj.attrs.get(
"_FillValue",
self._obj.attrs.get(
"missing_value",
self._obj.attrs.get("fill_value", self._obj.attrs.get("nodata")),
),
)
# look in places used by `xarray.open_rasterio`
if self._nodata is None:
try:
self._nodata = self._obj._file_obj.acquire().nodata
except AttributeError:
try:
self._nodata = self._obj.attrs["nodatavals"][0]
except (KeyError, IndexError):
pass
if self._nodata is None:
self._nodata = False
return None
self._nodata = _ensure_nodata_dtype(self._nodata, self._obj.dtype)
return self._nodata
def reproject(
self,
dst_crs,
resolution=None,
shape=None,
transform=None,
resampling=Resampling.nearest,
):
"""
Reproject :obj:`xarray.DataArray` objects
Powered by `rasterio.warp.reproject`
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
.. versionadded:: 0.0.27 shape
.. versionadded:: 0.0.28 transform
Parameters
----------
dst_crs: str
OGC WKT string or Proj.4 string.
resolution: float or tuple(float, float), optional
Size of a destination pixel in destination projection units
(e.g. degrees or metres).
shape: tuple(int, int), optional
Shape of the destination in pixels (dst_height, dst_width). Cannot be used
together with resolution.
transform: optional
The destination transform.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
-------
:obj:`xarray.DataArray`:
The reprojected DataArray.
"""
if resolution is not None and (shape is not None or transform is not None):
raise RioXarrayError("resolution cannot be used with shape or transform.")
if self.crs is None:
raise MissingCRS(
"CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'."
f"{_get_data_var_message(self._obj)}"
)
src_affine = self.transform(recalc=True)
if transform is None:
dst_affine, dst_width, dst_height = _make_dst_affine(
self._obj, self.crs, dst_crs, resolution, shape
)
else:
dst_affine = transform
if shape is not None:
dst_height, dst_width = shape
else:
dst_height, dst_width = self.shape
extra_dim = self._check_dimensions()
if extra_dim:
dst_data = np.zeros(
(self._obj[extra_dim].size, dst_height, dst_width),
dtype=self._obj.dtype.type,
)
else:
dst_data = np.zeros((dst_height, dst_width), dtype=self._obj.dtype.type)
try:
dst_nodata = self._obj.dtype.type(
self.nodata if self.nodata is not None else -9999
)
except ValueError:
# if integer, set nodata to -9999
dst_nodata = self._obj.dtype.type(-9999)
src_nodata = self._obj.dtype.type(
self.nodata if self.nodata is not None else dst_nodata
)
rasterio.warp.reproject(
source=self._obj.values,
destination=dst_data,
src_transform=src_affine,
src_crs=self.crs,
src_nodata=src_nodata,
dst_transform=dst_affine,
dst_crs=dst_crs,
dst_nodata=dst_nodata,
resampling=resampling,
)
# add necessary attributes
new_attrs = _generate_attrs(self._obj, dst_nodata)
# make sure dimensions with coordinates renamed to x,y
dst_dims = []
for dim in self._obj.dims:
if dim == self.x_dim:
dst_dims.append("x")
elif dim == self.y_dim:
dst_dims.append("y")
else:
dst_dims.append(dim)
xda = xarray.DataArray(
name=self._obj.name,
data=dst_data,
coords=_make_coords(self._obj, dst_affine, dst_width, dst_height),
dims=tuple(dst_dims),
attrs=new_attrs,
)
xda.encoding = self._obj.encoding
xda.rio.write_transform(dst_affine, inplace=True)
xda.rio.write_crs(dst_crs, inplace=True)
xda.rio.write_coordinate_system(inplace=True)
return xda
def reproject_match(self, match_data_array, resampling=Resampling.nearest):
"""
Reproject a DataArray object to match the resolution, projection,
and region of another DataArray.
Powered by `rasterio.warp.reproject`
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
Parameters
----------
match_data_array: :obj:`xarray.DataArray` | :obj:`xarray.Dataset`
DataArray of the target resolution and projection.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
--------
:obj:`xarray.DataArray`:
Contains the data from the src_data_array, reprojected to match
match_data_array.
"""
dst_crs = crs_to_wkt(match_data_array.rio.crs)
return self.reproject(
dst_crs,
transform=match_data_array.rio.transform(recalc=True),
shape=match_data_array.rio.shape,
resampling=resampling,
)
def pad_xy(self, minx, miny, maxx, maxy, constant_values):
"""Pad the array to x,y bounds.
.. versionadded:: 0.0.29
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
constant_values: scalar
The value used for padding. If None, nodata will be used if it is
set, and np.nan otherwise.
Returns
-------
:obj:`xarray.DataArray`:
The padded object.
"""
left, bottom, right, top = self._internal_bounds()
resolution_x, resolution_y = self.resolution()
y_before = y_after = 0
x_before = x_after = 0
y_coord = self._obj[self.y_dim]
x_coord = self._obj[self.x_dim]
if top - resolution_y < maxy:
new_y_coord = np.arange(bottom, maxy, -resolution_y)[::-1]
y_before = len(new_y_coord) - len(y_coord)
y_coord = new_y_coord
top = y_coord[0]
if bottom + resolution_y > miny:
new_y_coord = np.arange(top, miny, resolution_y)
y_after = len(new_y_coord) - len(y_coord)
y_coord = new_y_coord
bottom = y_coord[-1]
if left - resolution_x > minx:
new_x_coord = np.arange(right, minx, -resolution_x)[::-1]
x_before = len(new_x_coord) - len(x_coord)
x_coord = new_x_coord
left = x_coord[0]
if right + resolution_x < maxx:
new_x_coord = np.arange(left, maxx, resolution_x)
x_after = len(new_x_coord) - len(x_coord)
x_coord = new_x_coord
right = x_coord[-1]
if constant_values is None:
constant_values = np.nan if self.nodata is None else self.nodata
superset = self._obj.pad(
pad_width={
self.x_dim: (x_before, x_after),
self.y_dim: (y_before, y_after),
},
constant_values=constant_values,
).rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
superset[self.x_dim] = x_coord
superset[self.y_dim] = y_coord
superset.rio.write_transform(inplace=True)
return superset
def pad_box(self, minx, miny, maxx, maxy, constant_values=None):
"""Pad the :obj:`xarray.DataArray` to a bounding box
.. versionadded:: 0.0.29
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
constant_values: scalar
The value used for padding. If None, nodata will be used if it is
set, and np.nan otherwise.
Returns
-------
:obj:`xarray.DataArray`:
The padded object.
"""
resolution_x, resolution_y = self.resolution()
pad_minx = minx - abs(resolution_x) / 2.0
pad_miny = miny - abs(resolution_y) / 2.0
pad_maxx = maxx + abs(resolution_x) / 2.0
pad_maxy = maxy + abs(resolution_y) / 2.0
pd_array = self.pad_xy(pad_minx, pad_miny, pad_maxx, pad_maxy, constant_values)
# make sure correct attributes preserved & projection added
_add_attrs_proj(pd_array, self._obj)
return pd_array
def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3):
"""Clip the :obj:`xarray.DataArray` by a bounding box.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
auto_expand: bool
If True, it will expand clip search if only 1D raster found with clip.
auto_expand_limit: int
maximum number of times the clip will be retried before raising
an exception.
Returns
-------
:obj:`xarray.DataArray`:
The clipped object.
"""
if self.width == 1 or self.height == 1:
raise OneDimensionalRaster(
"At least one of the raster x,y coordinates has only one point."
f"{_get_data_var_message(self._obj)}"
)
# make sure that if the coordinates are
# in reverse order that it still works
resolution_x, resolution_y = self.resolution()
if resolution_y < 0:
top = maxy
bottom = miny
else:
top = miny
bottom = maxy
if resolution_x < 0:
left = maxx
right = minx
else:
left = minx
right = maxx
# pull the data out
window = rasterio.windows.from_bounds(
left=np.array(left).item(),
bottom=np.array(bottom).item(),
right=np.array(right).item(),
top=np.array(top).item(),
transform=self.transform(recalc=True),
width=self.width,
height=self.height,
)
cl_array = self.isel_window(window)
# check that the window has data in it
if cl_array.rio.width <= 1 or cl_array.rio.height <= 1:
if auto_expand and auto_expand < auto_expand_limit:
resolution_x, resolution_y = self.resolution()
return self.clip_box(
minx=minx - abs(resolution_x) / 2.0,
miny=miny - abs(resolution_y) / 2.0,
maxx=maxx + abs(resolution_x) / 2.0,
maxy=maxy + abs(resolution_y) / 2.0,
auto_expand=int(auto_expand) + 1,
auto_expand_limit=auto_expand_limit,
)
if cl_array.rio.width < 1 or cl_array.rio.height < 1:
raise NoDataInBounds(
f"No data found in bounds.{_get_data_var_message(self._obj)}"
)
elif cl_array.rio.width == 1 or cl_array.rio.height == 1:
raise OneDimensionalRaster(
"At least one of the clipped raster x,y coordinates"
" has only one point."
f"{_get_data_var_message(self._obj)}"
)
# make sure correct attributes preserved & projection added
_add_attrs_proj(cl_array, self._obj)
return cl_array
def clip(self, geometries, crs=None, all_touched=False, drop=True, invert=False):
"""
Crops a :obj:`xarray.DataArray` by geojson like geometry dicts.
Powered by `rasterio.features.geometry_mask`.
Examples:
>>> geometry = ''' {"type": "Polygon",
... "coordinates": [
... [[-94.07955380199459, 41.69085871273774],
... [-94.06082436942204, 41.69103313774798],
... [-94.06063203899649, 41.67932439500822],
... [-94.07935807746362, 41.679150041277325],
... [-94.07955380199459, 41.69085871273774]]]}'''
>>> cropping_geometries = [geojson.loads(geometry)]
>>> xds = xarray.open_rasterio('cool_raster.tif')
>>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326)
Parameters
----------
geometries: list
A list of geojson geometry dicts.
crs: :obj:`rasterio.crs.CRS`, optional
The CRS of the input geometries. Default is to assume it is the same
as the dataset.
all_touched : bool, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
drop: bool, optional
If True, drop the data outside of the extent of the mask geoemtries
Otherwise, it will return the same raster with the data masked.
Default is True.
invert: boolean, optional
If False, pixels that do not overlap shapes will be set as nodata.
Otherwise, pixels that overlap the shapes will be set as nodata.
False by default.
Returns
-------
:obj:`xarray.DataArray`:
The clipped object.
"""
if self.crs is None:
raise MissingCRS(
"CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'."
f"{_get_data_var_message(self._obj)}"
)
crs = CRS.from_wkt(crs_to_wkt(crs)) if crs is not None else self.crs
if self.crs != crs:
geometries = [
rasterio.warp.transform_geom(crs, self.crs, geometry)
for geometry in geometries
]
clip_mask_arr = geometry_mask(
geometries=geometries,
out_shape=(int(self.height), int(self.width)),
transform=self.transform(recalc=True),
invert=not invert,
all_touched=all_touched,
)
clip_mask_xray = xarray.DataArray(
clip_mask_arr,
dims=(self.y_dim, self.x_dim),
)
cropped_ds = self._obj.where(clip_mask_xray)
if drop:
cropped_ds.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
cropped_ds = cropped_ds.rio.isel_window(
rasterio.windows.get_data_window(
np.ma.masked_array(clip_mask_arr, ~clip_mask_arr)
)
)
if self.nodata is not None and not np.isnan(self.nodata):
cropped_ds = cropped_ds.fillna(self.nodata)
cropped_ds = cropped_ds.astype(self._obj.dtype)
if (
cropped_ds.coords[self.x_dim].size < 1
or cropped_ds.coords[self.y_dim].size < 1
):
raise NoDataInBounds(
f"No data found in bounds.{_get_data_var_message(self._obj)}"
)
# make sure correct attributes preserved & projection added
_add_attrs_proj(cropped_ds, self._obj)
return cropped_ds
def _interpolate_na(self, src_data, method="nearest"):
"""
This method uses scipy.interpolate.griddata to interpolate missing data.
Parameters
----------
method: {‘linear’, ‘nearest’, ‘cubic’}, optional
The method to use for interpolation in `scipy.interpolate.griddata`.
Returns
-------
:class:`numpy.ndarray`:
An interpolated :class:`numpy.ndarray`.
"""
src_data_flat = src_data.flatten()
try:
data_isnan = np.isnan(self.nodata)
except TypeError:
data_isnan = False
if not data_isnan:
data_bool = src_data_flat != self.nodata
else:
data_bool = ~np.isnan(src_data_flat)
if not data_bool.any():
return src_data
x_coords, y_coords = np.meshgrid(
self._obj.coords[self.x_dim].values, self._obj.coords[self.y_dim].values
)
return griddata(
points=(x_coords.flatten()[data_bool], y_coords.flatten()[data_bool]),
values=src_data_flat[data_bool],
xi=(x_coords, y_coords),
method=method,
fill_value=self.nodata,
)
def interpolate_na(self, method="nearest"):
"""
This method uses scipy.interpolate.griddata to interpolate missing data.
Parameters
----------
method: {‘linear’, ‘nearest’, ‘cubic’}, optional
The method to use for interpolation in `scipy.interpolate.griddata`.
Returns
-------
:obj:`xarray.DataArray`:
An interpolated :obj:`xarray.DataArray` object.
"""
extra_dim = self._check_dimensions()
if extra_dim:
interp_data = []
for _, sub_xds in self._obj.groupby(extra_dim):
interp_data.append(
self._interpolate_na(sub_xds.load().data, method=method)
)
interp_data = np.array(interp_data)
else:
interp_data = self._interpolate_na(self._obj.load().data, method=method)
interp_array = xarray.DataArray(
name=self._obj.name,
data=interp_data,
coords=self._obj.coords,
dims=self._obj.dims,
attrs=self._obj.attrs,
)
interp_array.encoding = self._obj.encoding
# make sure correct attributes preserved & projection added
_add_attrs_proj(interp_array, self._obj)
return interp_array
def to_raster(
self,
raster_path,
driver="GTiff",
dtype=None,
tags=None,
windowed=False,
recalc_transform=True,
**profile_kwargs,
):
"""
Export the DataArray to a raster file.
Parameters
----------
raster_path: str
The path to output the raster to.
driver: str, optional
The name of the GDAL/rasterio driver to use to export the raster.
Default is "GTiff".
dtype: str, optional
The data type to write the raster to. Default is the datasets dtype.
tags: dict, optional
A dictionary of tags to write to the raster.
windowed: bool, optional
If True, it will write using the windows of the output raster.
This only works if the output raster is tiled. As such, if you
set this to True, the output raster will be tiled.
Default is False.
**profile_kwargs
Additional keyword arguments to pass into writing the raster. The
nodata, transform, crs, count, width, and height attributes
are ignored.
"""
dtype = str(self._obj.dtype) if dtype is None else dtype
# get the output profile from the rasterio object
# if opened with xarray.open_rasterio()
try:
out_profile = self._obj._file_obj.acquire().profile
except AttributeError:
out_profile = {}
out_profile.update(profile_kwargs)
# filter out the generated attributes
out_profile = {
key: value
for key, value in out_profile.items()
if key
not in (
"driver",
"height",
"width",
"crs",
"transform",
"nodata",
"count",
"dtype",
)
}
rio_nodata = (
self.encoded_nodata if self.encoded_nodata is not None else self.nodata
)
if rio_nodata is not None:
# Ensure dtype of output data matches the expected dtype.
# This check is added here as the dtype of the data is
# converted right before writing.
rio_nodata = _ensure_nodata_dtype(rio_nodata, dtype)
with rasterio.open(
raster_path,
"w",
driver=driver,
height=int(self.height),
width=int(self.width),
count=int(self.count),
dtype=dtype,
crs=self.crs,
transform=self.transform(recalc=recalc_transform),
nodata=rio_nodata,
**out_profile,
) as dst:
_write_metatata_to_raster(dst, self._obj, tags)
# write data to raster
if windowed:
window_iter = dst.block_windows(1)
else:
window_iter = [(None, None)]
for _, window in window_iter:
if window is not None:
out_data = self.isel_window(window)
else:
out_data = self._obj
if self.encoded_nodata is not None:
out_data = out_data.fillna(self.encoded_nodata)
data = out_data.values.astype(dtype)
if data.ndim == 2:
dst.write(data, 1, window=window)
else:
dst.write(data, window=window)
@xarray.register_dataset_accessor("rio")
class RasterDataset(XRasterBase):
"""This is the GIS extension for :class:`xarray.Dataset`"""
@property
def vars(self):
"""list: Returns non-coordinate varibles"""
return list(self._obj.data_vars)
@property
def crs(self):
""":obj:`rasterio.crs.CRS`:
Retrieve projection from `xarray.Dataset`
"""
if self._crs is not None:
return None if self._crs is False else self._crs
self._crs = super().crs
if self._crs is not None:
return self._crs
# ensure all the CRS of the variables are the same
crs_list = []
for var in self.vars:
if self._obj[var].rio.crs is not None:
crs_list.append(self._obj[var].rio.crs)
try:
crs = crs_list[0]
except IndexError:
crs = None
if crs is None:
self._crs = False
return None
elif all(crs_i == crs for crs_i in crs_list):
self._crs = crs
else:
raise RioXarrayError(
"CRS in DataArrays differ in the Dataset: {}".format(crs_list)
)
return self._crs
def reproject(
self,
dst_crs,
resolution=None,
shape=None,
transform=None,
resampling=Resampling.nearest,
):
"""
Reproject :class:`xarray.Dataset` objects
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
.. versionadded:: 0.0.27 shape
.. versionadded:: 0.0.28 transform
Parameters
----------
dst_crs: str
OGC WKT string or Proj.4 string.
resolution: float or tuple(float, float), optional
Size of a destination pixel in destination projection units
(e.g. degrees or metres).
shape: tuple(int, int), optional
Shape of the destination in pixels (dst_height, dst_width). Cannot be used
together with resolution.
transform: optional
The destination transform.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
--------
:class:`xarray.Dataset`:
The reprojected Dataset.
"""
resampled_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
resampled_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.reproject(
dst_crs,
resolution=resolution,
shape=shape,
transform=transform,
resampling=resampling,
)
)
return resampled_dataset
def reproject_match(self, match_data_array, resampling=Resampling.nearest):
"""
Reproject a Dataset object to match the resolution, projection,
and region of another DataArray.
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
Parameters
----------
match_data_array: :obj:`xarray.DataArray` | :obj:`xarray.Dataset`
Dataset with the target resolution and projection.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
--------
:obj:`xarray.Dataset`:
Contains the data from the src_data_array,
reprojected to match match_data_array.
"""
resampled_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
resampled_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.reproject_match(match_data_array, resampling=resampling)
)
return resampled_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def pad_box(self, minx, miny, maxx, maxy):
"""Pad the :class:`xarray.Dataset` to a bounding box.
.. warning:: Only works if all variables in the dataset have the
same coordinates.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
Returns
-------
:obj:`xarray.Dataset`:
The padded object.
"""
padded_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
padded_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.pad_box(minx, miny, maxx, maxy)
)
return padded_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3):
"""Clip the :class:`xarray.Dataset` by a bounding box.
.. warning:: Only works if all variables in the dataset have the
same coordinates.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
auto_expand: bool
If True, it will expand clip search if only 1D raster found with clip.
auto_expand_limit: int
maximum number of times the clip will be retried before raising
an exception.
Returns
-------
:obj:`Dataset`:
The clipped object.
"""
clipped_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
clipped_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.clip_box(
minx,
miny,
maxx,
maxy,
auto_expand=auto_expand,
auto_expand_limit=auto_expand_limit,
)
)
return clipped_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def clip(self, geometries, crs=None, all_touched=False, drop=True, invert=False):
"""
Crops a :class:`xarray.Dataset` by geojson like geometry dicts.
.. warning:: Only works if all variables in the dataset have the same
coordinates.
Powered by `rasterio.features.geometry_mask`.
Examples:
>>> geometry = ''' {"type": "Polygon",
... "coordinates": [
... [[-94.07955380199459, 41.69085871273774],
... [-94.06082436942204, 41.69103313774798],
... [-94.06063203899649, 41.67932439500822],
... [-94.07935807746362, 41.679150041277325],
... [-94.07955380199459, 41.69085871273774]]]}'''
>>> cropping_geometries = [geojson.loads(geometry)]
>>> xds = xarray.open_rasterio('cool_raster.tif')
>>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326)
Parameters
----------
geometries: list
A list of geojson geometry dicts.
crs: :obj:`rasterio.crs.CRS`, optional
The CRS of the input geometries. Default is to assume it is the same
as the dataset.
all_touched : boolean, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
drop: bool, optional
If True, drop the data outside of the extent of the mask geoemtries
Otherwise, it will return the same raster with the data masked.
Default is True.
invert: boolean, optional
If False, pixels that do not overlap shapes will be set as nodata.
Otherwise, pixels that overlap the shapes will be set as nodata.
False by default.
Returns
-------
:obj:`xarray.Dataset`:
The clipped object.
"""
clipped_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
clipped_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.clip(
geometries,
crs=crs,
all_touched=all_touched,
drop=drop,
invert=invert,
)
)
return clipped_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def interpolate_na(self, method="nearest"):
"""
This method uses `scipy.interpolate.griddata` to interpolate missing data.
Parameters
----------
method: {‘linear’, ‘nearest’, ‘cubic’}, optional
The method to use for interpolation in `scipy.interpolate.griddata`.
Returns
-------
:obj:`xarray.DataArray`:
The interpolated object.
"""
interpolated_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
interpolated_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.interpolate_na(method=method)
)
return interpolated_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def to_raster(
self,
raster_path,
driver="GTiff",
dtype=None,
tags=None,
windowed=False,
recalc_transform=True,
**profile_kwargs,
):
"""
Export the Dataset to a raster file. Only works with 2D data.
Parameters
----------
raster_path: str
The path to output the raster to.
driver: str, optional
The name of the GDAL/rasterio driver to use to export the raster.
Default is "GTiff".
dtype: str, optional
The data type to write the raster to. Default is the datasets dtype.
tags: dict, optional
A dictionary of tags to write to the raster.
windowed: bool, optional
If True, it will write using the windows of the output raster.
This only works if the output raster is tiled. As such, if you
set this to True, the output raster will be tiled.
Default is False.
**profile_kwargs
Additional keyword arguments to pass into writing the raster. The
nodata, transform, crs, count, width, and height attributes
are ignored.
"""
variable_dim = "band_{}".format(uuid4())
data_array = self._obj.to_array(dim=variable_dim)
# write data array names to raster
data_array.attrs["long_name"] = data_array[variable_dim].values.tolist()
# ensure raster metadata preserved
scales = []
offsets = []
nodatavals = []
for data_var in data_array[variable_dim].values:
scales.append(self._obj[data_var].attrs.get("scale_factor", 1.0))
offsets.append(self._obj[data_var].attrs.get("add_offset", 0.0))
nodatavals.append(self._obj[data_var].rio.nodata)
data_array.attrs["scales"] = scales
data_array.attrs["offsets"] = offsets
nodata = nodatavals[0]
if (
all(nodataval == nodata for nodataval in nodatavals)
or np.isnan(nodatavals).all()
):
data_array.rio.write_nodata(nodata, inplace=True)
else:
raise RioXarrayError(
"All nodata values must be the same when exporting to raster. "
"Current values: {}".format(nodatavals)
)
if self.crs is not None:
data_array.rio.write_crs(self.crs, inplace=True)
# write it to a raster
data_array.rio.to_raster(
raster_path=raster_path,
driver=driver,
dtype=dtype,
tags=tags,
windowed=windowed,
recalc_transform=recalc_transform,
**profile_kwargs,
)
| # -- coding: utf-8 --
"""
This module is an extension for xarray to provide rasterio capabilities
to xarray datasets/dataarrays.
Credits: The `reproject` functionality was adopted from https://github.com/opendatacube/datacube-core # noqa
Source file:
- https://github.com/opendatacube/datacube-core/blob/084c84d78cb6e1326c7fbbe79c5b5d0bef37c078/datacube/api/geo_xarray.py # noqa
datacube is licensed under the Apache License, Version 2.0:
- https://github.com/opendatacube/datacube-core/blob/1d345f08a10a13c316f81100936b0ad8b1a374eb/LICENSE # noqa
"""
import copy
import math
import warnings
from typing import Iterable
from uuid import uuid4
import numpy as np
import pyproj
import rasterio.warp
import rasterio.windows
import xarray
from affine import Affine
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio.features import geometry_mask
from scipy.interpolate import griddata
from rioxarray.crs import crs_to_wkt
from rioxarray.exceptions import (
DimensionError,
DimensionMissingCoordinateError,
InvalidDimensionOrder,
MissingCRS,
NoDataInBounds,
OneDimensionalRaster,
RioXarrayError,
TooManyDimensions,
)
FILL_VALUE_NAMES = ("_FillValue", "missing_value", "fill_value", "nodata")
UNWANTED_RIO_ATTRS = ("nodatavals", "crs", "is_tiled", "res")
DEFAULT_GRID_MAP = "spatial_ref"
def affine_to_coords(affine, width, height, x_dim="x", y_dim="y"):
"""Generate 1d pixel centered coordinates from affine.
Based on code from the xarray rasterio backend.
Parameters
----------
affine: :obj:`affine.Affine`
The affine of the grid.
width: int
The width of the grid.
height: int
The height of the grid.
x_dim: str, optional
The name of the X dimension. Default is 'x'.
y_dim: str, optional
The name of the Y dimension. Default is 'y'.
Returns
-------
dict: x and y coordinate arrays.
"""
x_coords, _ = affine * (np.arange(width) + 0.5, np.zeros(width) + 0.5)
_, y_coords = affine * (np.zeros(height) + 0.5, np.arange(height) + 0.5)
return {y_dim: y_coords, x_dim: x_coords}
def _generate_attrs(src_data_array, dst_nodata):
# add original attributes
new_attrs = copy.deepcopy(src_data_array.attrs)
# remove all nodata information
for unwanted_attr in FILL_VALUE_NAMES + UNWANTED_RIO_ATTRS:
new_attrs.pop(unwanted_attr, None)
# add nodata information
fill_value = (
src_data_array.rio.nodata
if src_data_array.rio.nodata is not None
else dst_nodata
)
if src_data_array.rio.encoded_nodata is None and fill_value is not None:
new_attrs["_FillValue"] = fill_value
# add raster spatial information
new_attrs["grid_mapping"] = src_data_array.rio.grid_mapping
return new_attrs
def add_xy_grid_meta(coords, crs=None):
raise RuntimeError(
"add_xy_grid_meta has been removed. Use rio.write_coordinate_system instead.",
)
def add_spatial_ref(in_ds, dst_crs, grid_mapping_name):
raise RuntimeError("add_spatial_ref has been removed. Use rio.write_crs instead.")
def _add_attrs_proj(new_data_array, src_data_array):
"""Make sure attributes and projection correct"""
# make sure dimension information is preserved
if new_data_array.rio._x_dim is None:
new_data_array.rio._x_dim = src_data_array.rio.x_dim
if new_data_array.rio._y_dim is None:
new_data_array.rio._y_dim = src_data_array.rio.y_dim
# make sure attributes preserved
new_attrs = _generate_attrs(src_data_array, None)
# remove fill value if it already exists in the encoding
# this is for data arrays pulling the encoding from a
# source data array instead of being generated anew.
if "_FillValue" in new_data_array.encoding:
new_attrs.pop("_FillValue", None)
new_data_array.rio.set_attrs(new_attrs, inplace=True)
# make sure projection added
new_data_array.rio.write_crs(src_data_array.rio.crs, inplace=True)
new_data_array.rio.write_coordinate_system(inplace=True)
new_data_array.rio.write_transform(inplace=True)
# make sure encoding added
new_data_array.encoding = src_data_array.encoding.copy()
return new_data_array
def _warp_spatial_coords(data_array, affine, width, height):
"""get spatial coords in new transform"""
new_spatial_coords = affine_to_coords(affine, width, height)
return {
"x": xarray.IndexVariable("x", new_spatial_coords["x"]),
"y": xarray.IndexVariable("y", new_spatial_coords["y"]),
}
def _get_nonspatial_coords(src_data_array):
coords = {}
for coord in set(src_data_array.coords) - {
src_data_array.rio.x_dim,
src_data_array.rio.y_dim,
DEFAULT_GRID_MAP,
}:
if src_data_array[coord].dims:
coords[coord] = xarray.IndexVariable(
src_data_array[coord].dims,
src_data_array[coord].values,
src_data_array[coord].attrs,
)
else:
coords[coord] = xarray.Variable(
src_data_array[coord].dims,
src_data_array[coord].values,
src_data_array[coord].attrs,
)
return coords
def _make_coords(src_data_array, dst_affine, dst_width, dst_height):
"""Generate the coordinates of the new projected `xarray.DataArray`"""
coords = _get_nonspatial_coords(src_data_array)
new_coords = _warp_spatial_coords(src_data_array, dst_affine, dst_width, dst_height)
new_coords.update(coords)
return new_coords
def _make_dst_affine(
src_data_array, src_crs, dst_crs, dst_resolution=None, dst_shape=None
):
"""Determine the affine of the new projected `xarray.DataArray`"""
src_bounds = src_data_array.rio.bounds()
src_height, src_width = src_data_array.rio.shape
dst_height, dst_width = dst_shape if dst_shape is not None else (None, None)
if isinstance(dst_resolution, Iterable):
dst_resolution = tuple(abs(res_val) for res_val in dst_resolution)
elif dst_resolution is not None:
dst_resolution = abs(dst_resolution)
resolution_or_width_height = {
k: v
for k, v in [
("resolution", dst_resolution),
("dst_height", dst_height),
("dst_width", dst_width),
]
if v is not None
}
dst_affine, dst_width, dst_height = rasterio.warp.calculate_default_transform(
src_crs,
dst_crs,
src_width,
src_height,
*src_bounds,
**resolution_or_width_height,
)
return dst_affine, dst_width, dst_height
def _write_metatata_to_raster(raster_handle, xarray_dataset, tags):
"""
Write the metadata stored in the xarray object to raster metadata
"""
tags = xarray_dataset.attrs if tags is None else {**xarray_dataset.attrs, **tags}
# write scales and offsets
try:
raster_handle.scales = tags["scales"]
except KeyError:
try:
raster_handle.scales = (tags["scale_factor"],) * raster_handle.count
except KeyError:
pass
try:
raster_handle.offsets = tags["offsets"]
except KeyError:
try:
raster_handle.offsets = (tags["add_offset"],) * raster_handle.count
except KeyError:
pass
# filter out attributes that should be written in a different location
skip_tags = (
UNWANTED_RIO_ATTRS
+ FILL_VALUE_NAMES
+ ("transform", "scales", "scale_factor", "add_offset", "offsets")
)
# this is for when multiple values are used
# in this case, it will be stored in the raster description
if not isinstance(tags.get("long_name"), str):
skip_tags += ("long_name",)
tags = {key: value for key, value in tags.items() if key not in skip_tags}
raster_handle.update_tags(**tags)
# write band name information
long_name = xarray_dataset.attrs.get("long_name")
if isinstance(long_name, (tuple, list)):
if len(long_name) != raster_handle.count:
raise RioXarrayError(
"Number of names in the 'long_name' attribute does not equal "
"the number of bands."
)
for iii, band_description in enumerate(long_name):
raster_handle.set_band_description(iii + 1, band_description)
else:
band_description = long_name or xarray_dataset.name
if band_description:
for iii in range(raster_handle.count):
raster_handle.set_band_description(iii + 1, band_description)
def _get_data_var_message(obj):
"""
Get message for named data variables.
"""
try:
return f" Data variable: {obj.name}" if obj.name else ""
except AttributeError:
return ""
def _ensure_nodata_dtype(original_nodata, new_dtype):
"""
Convert the nodata to the new datatype and raise warning
if the value of the nodata value changed.
"""
original_nodata = float(original_nodata)
nodata = np.dtype(new_dtype).type(original_nodata)
if not np.isnan(nodata) and original_nodata != nodata:
warnings.warn(
f"The nodata value ({original_nodata}) has been automatically "
f"changed to ({nodata}) to match the dtype of the data."
)
return nodata
class XRasterBase(object):
"""This is the base class for the GIS extensions for xarray"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._x_dim = None
self._y_dim = None
# Determine the spatial dimensions of the `xarray.DataArray`
if "x" in self._obj.dims and "y" in self._obj.dims:
self._x_dim = "x"
self._y_dim = "y"
elif "longitude" in self._obj.dims and "latitude" in self._obj.dims:
self._x_dim = "longitude"
self._y_dim = "latitude"
else:
# look for coordinates with CF attributes
for coord in self._obj.coords:
# make sure to only look in 1D coordinates
# that has the same dimension name as the coordinate
if self._obj.coords[coord].dims != (coord,):
continue
elif (self._obj.coords[coord].attrs.get("axis", "").upper() == "X") or (
self._obj.coords[coord].attrs.get("standard_name", "").lower()
in ("longitude", "projection_x_coordinate")
):
self._x_dim = coord
elif (self._obj.coords[coord].attrs.get("axis", "").upper() == "Y") or (
self._obj.coords[coord].attrs.get("standard_name", "").lower()
in ("latitude", "projection_y_coordinate")
):
self._y_dim = coord
# properties
self._count = None
self._height = None
self._width = None
self._crs = None
@property
def crs(self):
""":obj:`rasterio.crs.CRS`:
Retrieve projection from :obj:`xarray.Dataset` | :obj:`xarray.DataArray`
"""
if self._crs is not None:
return None if self._crs is False else self._crs
# look in grid_mapping
try:
self.set_crs(
pyproj.CRS.from_cf(self._obj.coords[self.grid_mapping].attrs),
inplace=True,
)
except (KeyError, pyproj.exceptions.CRSError):
try:
# look in attrs for 'crs'
self.set_crs(self._obj.attrs["crs"], inplace=True)
except KeyError:
self._crs = False
return None
return self._crs
def _get_obj(self, inplace):
"""
Get the object to modify.
Parameters
----------
inplace: bool
If True, returns self.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`
"""
if inplace:
return self._obj
obj_copy = self._obj.copy(deep=True)
# preserve attribute information
obj_copy.rio._x_dim = self._x_dim
obj_copy.rio._y_dim = self._y_dim
obj_copy.rio._width = self._width
obj_copy.rio._height = self._height
obj_copy.rio._crs = self._crs
return obj_copy
def set_crs(self, input_crs, inplace=True):
"""
Set the CRS value for the Dataset/DataArray without modifying
the dataset/data array.
Parameters
----------
input_crs: object
Anything accepted by `rasterio.crs.CRS.from_user_input`.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Dataset with crs attribute.
"""
crs = CRS.from_wkt(crs_to_wkt(input_crs))
obj = self._get_obj(inplace=inplace)
obj.rio._crs = crs
return obj
@property
def grid_mapping(self):
"""
str: The CF grid_mapping attribute. 'spatial_ref' is the default.
"""
try:
return self._obj.attrs["grid_mapping"]
except KeyError:
pass
grid_mapping = DEFAULT_GRID_MAP
# search the dataset for the grid mapping name
if hasattr(self._obj, "data_vars"):
grid_mappings = set()
for var in self._obj.data_vars:
try:
self._obj[var].rio.x_dim
self._obj[var].rio.y_dim
except DimensionError:
continue
try:
grid_mapping = self._obj[var].attrs["grid_mapping"]
grid_mappings.add(grid_mapping)
except KeyError:
pass
if len(grid_mappings) > 1:
raise RioXarrayError("Multiple grid mappings exist.")
return grid_mapping
def write_grid_mapping(self, grid_mapping_name=DEFAULT_GRID_MAP, inplace=False):
"""
Write the CF grid_mapping attribute.
Parameters
----------
grid_mapping_name: str, optional
Name of the grid_mapping coordinate.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with CF compliant CRS information.
"""
data_obj = self._get_obj(inplace=inplace)
if hasattr(data_obj, "data_vars"):
for var in data_obj.data_vars:
try:
x_dim = data_obj[var].rio.x_dim
y_dim = data_obj[var].rio.y_dim
except DimensionError:
continue
data_obj[var].rio.update_attrs(
dict(grid_mapping=grid_mapping_name), inplace=True
).rio.set_spatial_dims(x_dim=x_dim, y_dim=y_dim, inplace=True)
return data_obj.rio.update_attrs(
dict(grid_mapping=grid_mapping_name), inplace=True
)
def write_crs(self, input_crs=None, grid_mapping_name=None, inplace=False):
"""
Write the CRS to the dataset in a CF compliant manner.
Parameters
----------
input_crs: object
Anything accepted by `rasterio.crs.CRS.from_user_input`.
grid_mapping_name: str, optional
Name of the grid_mapping coordinate to store the CRS information in.
Default is the grid_mapping name of the dataset.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with CF compliant CRS information.
"""
if input_crs is not None:
data_obj = self.set_crs(input_crs, inplace=inplace)
else:
data_obj = self._get_obj(inplace=inplace)
# get original transform
transform = self._cached_transform()
# remove old grid maping coordinate if exists
grid_mapping_name = (
self.grid_mapping if grid_mapping_name is None else grid_mapping_name
)
try:
del data_obj.coords[grid_mapping_name]
except KeyError:
pass
if data_obj.rio.crs is None:
raise MissingCRS(
"CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'."
)
# add grid mapping coordinate
data_obj.coords[grid_mapping_name] = xarray.Variable((), 0)
grid_map_attrs = pyproj.CRS.from_user_input(data_obj.rio.crs).to_cf()
# spatial_ref is for compatibility with GDAL
crs_wkt = crs_to_wkt(data_obj.rio.crs)
grid_map_attrs["spatial_ref"] = crs_wkt
grid_map_attrs["crs_wkt"] = crs_wkt
if transform is not None:
grid_map_attrs["GeoTransform"] = " ".join(
[str(item) for item in transform.to_gdal()]
)
data_obj.coords[grid_mapping_name].rio.set_attrs(grid_map_attrs, inplace=True)
return data_obj.rio.write_grid_mapping(
grid_mapping_name=grid_mapping_name, inplace=True
)
def estimate_utm_crs(self, datum_name="WGS 84"):
"""Returns the estimated UTM CRS based on the bounds of the dataset.
.. versionadded:: 0.2
.. note:: Requires pyproj 3+
Parameters
----------
datum_name : str, optional
The name of the datum to use in the query. Default is WGS 84.
Returns
-------
rasterio.crs.CRS
"""
try:
from pyproj.aoi import AreaOfInterest
from pyproj.database import query_utm_crs_info
except ImportError:
raise RuntimeError("pyproj 3+ required for estimate_utm_crs.")
if self.crs is None:
raise RuntimeError("crs must be set to estimate UTM CRS.")
# ensure using geographic coordinates
if self.crs.is_geographic:
minx, miny, maxx, maxy = self.bounds(recalc=True)
else:
minx, miny, maxx, maxy = self.transform_bounds("EPSG:4326", recalc=True)
x_center = np.mean([minx, maxx])
y_center = np.mean([miny, maxy])
utm_crs_list = query_utm_crs_info(
datum_name=datum_name,
area_of_interest=AreaOfInterest(
west_lon_degree=x_center,
south_lat_degree=y_center,
east_lon_degree=x_center,
north_lat_degree=y_center,
),
)
try:
return CRS.from_epsg(utm_crs_list[0].code)
except IndexError:
raise RuntimeError("Unable to determine UTM CRS")
def _cached_transform(self):
"""
Get the transform from:
1. The GeoTransform metatada property in the grid mapping
2. The transform attribute.
"""
try:
# look in grid_mapping
return Affine.from_gdal(
*np.fromstring(
self._obj.coords[self.grid_mapping].attrs["GeoTransform"], sep=" "
)
)
except KeyError:
try:
return Affine(*self._obj.attrs["transform"][:6])
except KeyError:
pass
return None
def write_transform(self, transform=None, grid_mapping_name=None, inplace=False):
"""
.. versionadded:: 0.0.30
Write the GeoTransform to the dataset where GDAL can read it in.
https://gdal.org/drivers/raster/netcdf.html#georeference
Parameters
----------
transform: affine.Affine, optional
The transform of the dataset. If not provided, it will be calculated.
grid_mapping_name: str, optional
Name of the grid_mapping coordinate to store the transform information in.
Default is the grid_mapping name of the dataset.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with Geo Transform written.
"""
transform = transform or self.transform(recalc=True)
data_obj = self._get_obj(inplace=inplace)
# delete the old attribute to prevent confusion
data_obj.attrs.pop("transform", None)
grid_mapping_name = (
self.grid_mapping if grid_mapping_name is None else grid_mapping_name
)
try:
grid_map_attrs = data_obj.coords[grid_mapping_name].attrs.copy()
except KeyError:
data_obj.coords[grid_mapping_name] = xarray.Variable((), 0)
grid_map_attrs = data_obj.coords[grid_mapping_name].attrs.copy()
grid_map_attrs["GeoTransform"] = " ".join(
[str(item) for item in transform.to_gdal()]
)
data_obj.coords[grid_mapping_name].rio.set_attrs(grid_map_attrs, inplace=True)
return data_obj.rio.write_grid_mapping(
grid_mapping_name=grid_mapping_name, inplace=True
)
def transform(self, recalc=False):
"""
Parameters
----------
recalc: bool, optional
If True, it will re-calculate the transform instead of using
the cached transform.
Returns
-------
:obj:`affine.Afffine`:
The affine of the :obj:`xarray.Dataset` | :obj:`xarray.DataArray`
"""
try:
src_left, _, _, src_top = self.bounds(recalc=recalc)
src_resolution_x, src_resolution_y = self.resolution(recalc=recalc)
except (DimensionMissingCoordinateError, DimensionError):
return Affine.identity()
return Affine.translation(src_left, src_top) * Affine.scale(
src_resolution_x, src_resolution_y
)
def write_coordinate_system(self, inplace=False):
"""
Write the coordinate system CF metadata.
.. versionadded:: 0.0.30
Parameters
----------
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
The dataset with the CF coordinate system attributes added.
"""
data_obj = self._get_obj(inplace=inplace)
# add metadata to x,y coordinates
is_projected = data_obj.rio.crs and data_obj.rio.crs.is_projected
is_geographic = data_obj.rio.crs and data_obj.rio.crs.is_geographic
x_coord_attrs = dict(data_obj.coords[self.x_dim].attrs)
x_coord_attrs["axis"] = "X"
y_coord_attrs = dict(data_obj.coords[self.y_dim].attrs)
y_coord_attrs["axis"] = "Y"
if is_projected:
units = None
if hasattr(data_obj.rio.crs, "linear_units_factor"):
unit_factor = data_obj.rio.crs.linear_units_factor[-1]
if unit_factor != 1:
units = f"{unit_factor} metre"
else:
units = "metre"
# X metadata
x_coord_attrs["long_name"] = "x coordinate of projection"
x_coord_attrs["standard_name"] = "projection_x_coordinate"
if units:
x_coord_attrs["units"] = units
# Y metadata
y_coord_attrs["long_name"] = "y coordinate of projection"
y_coord_attrs["standard_name"] = "projection_y_coordinate"
if units:
y_coord_attrs["units"] = units
elif is_geographic:
# X metadata
x_coord_attrs["long_name"] = "longitude"
x_coord_attrs["standard_name"] = "longitude"
x_coord_attrs["units"] = "degrees_east"
# Y metadata
y_coord_attrs["long_name"] = "latitude"
y_coord_attrs["standard_name"] = "latitude"
y_coord_attrs["units"] = "degrees_north"
data_obj.coords[self.y_dim].attrs = y_coord_attrs
data_obj.coords[self.x_dim].attrs = x_coord_attrs
return data_obj
def set_attrs(self, new_attrs, inplace=False):
"""
Set the attributes of the dataset/dataarray and reset
rioxarray properties to re-search for them.
Parameters
----------
new_attrs: dict
A dictionary of new attributes.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with new attributes.
"""
data_obj = self._get_obj(inplace=inplace)
# set the attributes
data_obj.attrs = new_attrs
# reset rioxarray properties depending
# on attributes to be generated
data_obj.rio._nodata = None
data_obj.rio._crs = None
return data_obj
def update_attrs(self, new_attrs, inplace=False):
"""
Update the attributes of the dataset/dataarray and reset
rioxarray properties to re-search for them.
Parameters
----------
new_attrs: dict
A dictionary of new attributes to update with.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Modified dataset with updated attributes.
"""
data_attrs = dict(self._obj.attrs)
data_attrs.update(**new_attrs)
return self.set_attrs(data_attrs, inplace=inplace)
def set_spatial_dims(self, x_dim, y_dim, inplace=True):
"""
This sets the spatial dimensions of the dataset.
Parameters
----------
x_dim: str
The name of the x dimension.
y_dim: str
The name of the y dimension.
inplace: bool, optional
If True, it will modify the dataframe in place.
Otherwise it will return a modified copy.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
Dataset with spatial dimensions set.
"""
def set_dims(obj, in_x_dim, in_y_dim):
if in_x_dim in obj.dims:
obj.rio._x_dim = x_dim
else:
raise DimensionError(
f"x dimension ({x_dim}) not found.{_get_data_var_message(obj)}"
)
if y_dim in obj.dims:
obj.rio._y_dim = y_dim
else:
raise DimensionError(
f"y dimension ({x_dim}) not found.{_get_data_var_message(obj)}"
)
data_obj = self._get_obj(inplace=inplace)
set_dims(data_obj, x_dim, y_dim)
return data_obj
@property
def x_dim(self):
"""str: The dimension for the X-axis."""
if self._x_dim is not None:
return self._x_dim
raise DimensionError(
"x dimension not found. 'set_spatial_dims()' can address this."
f"{_get_data_var_message(self._obj)}"
)
@property
def y_dim(self):
"""str: The dimension for the Y-axis."""
if self._y_dim is not None:
return self._y_dim
raise DimensionError(
"x dimension not found. 'set_spatial_dims()' can address this."
f"{_get_data_var_message(self._obj)}"
)
@property
def width(self):
"""int: Returns the width of the dataset (x dimension size)"""
if self._width is not None:
return self._width
self._width = self._obj[self.x_dim].size
return self._width
@property
def height(self):
"""int: Returns the height of the dataset (y dimension size)"""
if self._height is not None:
return self._height
self._height = self._obj[self.y_dim].size
return self._height
@property
def shape(self):
"""tuple(int, int): Returns the shape (height, width)"""
return (self.height, self.width)
def _check_dimensions(self):
"""
This function validates that the dimensions 2D/3D and
they are are in the proper order.
Returns
-------
str or None: Name extra dimension.
"""
extra_dims = list(set(list(self._obj.dims)) - set([self.x_dim, self.y_dim]))
if len(extra_dims) > 1:
raise TooManyDimensions(
"Only 2D and 3D data arrays supported."
f"{_get_data_var_message(self._obj)}"
)
elif extra_dims and self._obj.dims != (extra_dims[0], self.y_dim, self.x_dim):
raise InvalidDimensionOrder(
"Invalid dimension order. Expected order: {0}. "
"You can use `DataArray.transpose{0}`"
" to reorder your dimensions.".format(
(extra_dims[0], self.y_dim, self.x_dim)
)
+ f"{_get_data_var_message(self._obj)}"
)
elif not extra_dims and self._obj.dims != (self.y_dim, self.x_dim):
raise InvalidDimensionOrder(
"Invalid dimension order. Expected order: {0}"
"You can use `DataArray.transpose{0}` "
"to reorder your dimensions.".format((self.y_dim, self.x_dim))
+ f"{_get_data_var_message(self._obj)}"
)
return extra_dims[0] if extra_dims else None
@property
def count(self):
"""int: Returns the band count (z dimension size)"""
if self._count is not None:
return self._count
extra_dim = self._check_dimensions()
self._count = 1
if extra_dim is not None:
self._count = self._obj[extra_dim].size
return self._count
def _internal_bounds(self):
"""Determine the internal bounds of the `xarray.DataArray`"""
if self.x_dim not in self._obj.coords:
raise DimensionMissingCoordinateError(f"{self.x_dim} missing coordinates.")
elif self.y_dim not in self._obj.coords:
raise DimensionMissingCoordinateError(f"{self.y_dim} missing coordinates.")
try:
left = float(self._obj[self.x_dim][0])
right = float(self._obj[self.x_dim][-1])
top = float(self._obj[self.y_dim][0])
bottom = float(self._obj[self.y_dim][-1])
except IndexError:
raise NoDataInBounds(
"Unable to determine bounds from coordinates."
f"{_get_data_var_message(self._obj)}"
)
return left, bottom, right, top
def resolution(self, recalc=False):
"""
Parameters
----------
recalc: bool, optional
Will force the resolution to be recalculated instead of using the
transform attribute.
Returns
-------
x_resolution, y_resolution: float
The resolution of the `xarray.DataArray` | `xarray.Dataset`
"""
transform = self._cached_transform()
if (
not recalc or self.width == 1 or self.height == 1
) and transform is not None:
resolution_x = transform.a
resolution_y = transform.e
return resolution_x, resolution_y
# if the coordinates of the spatial dimensions are missing
# use the cached transform resolution
try:
left, bottom, right, top = self._internal_bounds()
except DimensionMissingCoordinateError:
if transform is None:
raise
resolution_x = transform.a
resolution_y = transform.e
return resolution_x, resolution_y
if self.width == 1 or self.height == 1:
raise OneDimensionalRaster(
"Only 1 dimenional array found. Cannot calculate the resolution."
f"{_get_data_var_message(self._obj)}"
)
resolution_x = (right - left) / (self.width - 1)
resolution_y = (bottom - top) / (self.height - 1)
return resolution_x, resolution_y
def bounds(self, recalc=False):
"""
Parameters
----------
recalc: bool, optional
Will force the bounds to be recalculated instead of using the
transform attribute.
Returns
-------
left, bottom, right, top: float
Outermost coordinates of the `xarray.DataArray` | `xarray.Dataset`.
"""
resolution_x, resolution_y = self.resolution(recalc=recalc)
try:
# attempt to get bounds from xarray coordinate values
left, bottom, right, top = self._internal_bounds()
left -= resolution_x / 2.0
right += resolution_x / 2.0
top -= resolution_y / 2.0
bottom += resolution_y / 2.0
except DimensionMissingCoordinateError:
transform = self._cached_transform()
left = transform.c
top = transform.f
right = left + resolution_x * self.width
bottom = top + resolution_y * self.height
return left, bottom, right, top
def isel_window(self, window):
"""
Use a rasterio.window.Window to select a subset of the data.
.. warning:: Float indices are converted to integers.
Parameters
----------
window: :class:`rasterio.window.Window`
The window of the dataset to read.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
The data in the window.
"""
(row_start, row_stop), (col_start, col_stop) = window.toranges()
row_start = math.ceil(row_start) if row_start < 0 else math.floor(row_start)
row_stop = math.floor(row_stop) if row_stop < 0 else math.ceil(row_stop)
col_start = math.ceil(col_start) if col_start < 0 else math.floor(col_start)
col_stop = math.floor(col_stop) if col_stop < 0 else math.ceil(col_stop)
row_slice = slice(int(row_start), int(row_stop))
col_slice = slice(int(col_start), int(col_stop))
return (
self._obj.isel({self.y_dim: row_slice, self.x_dim: col_slice})
.copy() # this is to prevent sharing coordinates with the original dataset
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.write_transform(
transform=rasterio.windows.transform(
rasterio.windows.Window.from_slices(
rows=row_slice,
cols=col_slice,
width=self.width,
height=self.height,
),
self.transform(recalc=True),
),
inplace=True,
)
)
def slice_xy(self, minx, miny, maxx, maxy):
"""Slice the array by x,y bounds.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray`:
The data in the slice.
"""
left, bottom, right, top = self._internal_bounds()
if top > bottom:
y_slice = slice(maxy, miny)
else:
y_slice = slice(miny, maxy)
if left > right:
x_slice = slice(maxx, minx)
else:
x_slice = slice(minx, maxx)
subset = (
self._obj.sel({self.x_dim: x_slice, self.y_dim: y_slice})
.copy() # this is to prevent sharing coordinates with the original dataset
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.write_transform(inplace=True)
)
return subset
def transform_bounds(self, dst_crs, densify_pts=21, recalc=False):
"""Transform bounds from src_crs to dst_crs.
Optionally densifying the edges (to account for nonlinear transformations
along these edges) and extracting the outermost bounds.
Note: this does not account for the antimeridian.
Parameters
----------
dst_crs: str, :obj:`rasterio.crs.CRS`, or dict
Target coordinate reference system.
densify_pts: uint, optional
Number of points to add to each edge to account for nonlinear
edges produced by the transform process. Large numbers will produce
worse performance. Default: 21 (gdal default).
recalc: bool, optional
Will force the bounds to be recalculated instead of using the transform
attribute.
Returns
-------
left, bottom, right, top: float
Outermost coordinates in target coordinate reference system.
"""
return rasterio.warp.transform_bounds(
self.crs, dst_crs, *self.bounds(recalc=recalc), densify_pts=densify_pts
)
@xarray.register_dataarray_accessor("rio")
class RasterArray(XRasterBase):
"""This is the GIS extension for :obj:`xarray.DataArray`"""
def __init__(self, xarray_obj):
super(RasterArray, self).__init__(xarray_obj)
# properties
self._nodata = None
def set_nodata(self, input_nodata, inplace=True):
"""
Set the nodata value for the DataArray without modifying
the data array.
Parameters
----------
input_nodata: object
Valid nodata for dtype.
inplace: bool, optional
If True, it will write to the existing dataset. Default is False.
Returns
-------
:obj:`xarray.DataArray`:
Dataset with nodata attribute set.
"""
obj = self._get_obj(inplace=inplace)
obj.rio._nodata = input_nodata
return obj
def write_nodata(self, input_nodata, inplace=False):
"""
Write the nodata to the DataArray in a CF compliant manner.
Parameters
----------
input_nodata: object
Nodata value for the DataArray.
If input_nodata is None, it will remove the _FillValue attribute.
inplace: bool, optional
If True, it will write to the existing DataArray. Default is False.
Returns
-------
:obj:`xarray.DataArray`:
Modified DataArray with CF compliant nodata information.
"""
data_obj = self._get_obj(inplace=inplace)
input_nodata = False if input_nodata is None else input_nodata
if input_nodata is not False:
input_nodata = _ensure_nodata_dtype(input_nodata, self._obj.dtype)
data_obj.rio.update_attrs(dict(_FillValue=input_nodata), inplace=True)
else:
new_vars = dict(data_obj.attrs)
new_vars.pop("_FillValue", None)
data_obj.rio.set_attrs(new_vars, inplace=True)
data_obj.rio.set_nodata(input_nodata, inplace=True)
return data_obj
@property
def encoded_nodata(self):
"""Return the encoded nodata value for the dataset if encoded."""
encoded_nodata = self._obj.encoding.get("_FillValue")
if encoded_nodata is None:
return None
return _ensure_nodata_dtype(encoded_nodata, self._obj.dtype)
@property
def nodata(self):
"""Get the nodata value for the dataset."""
if self._nodata is not None:
return None if self._nodata is False else self._nodata
if self.encoded_nodata is not None:
self._nodata = np.nan
else:
self._nodata = self._obj.attrs.get(
"_FillValue",
self._obj.attrs.get(
"missing_value",
self._obj.attrs.get("fill_value", self._obj.attrs.get("nodata")),
),
)
# look in places used by `xarray.open_rasterio`
if self._nodata is None:
try:
self._nodata = self._obj._file_obj.acquire().nodata
except AttributeError:
try:
self._nodata = self._obj.attrs["nodatavals"][0]
except (KeyError, IndexError):
pass
if self._nodata is None:
self._nodata = False
return None
self._nodata = _ensure_nodata_dtype(self._nodata, self._obj.dtype)
return self._nodata
def reproject(
self,
dst_crs,
resolution=None,
shape=None,
transform=None,
resampling=Resampling.nearest,
):
"""
Reproject :obj:`xarray.DataArray` objects
Powered by `rasterio.warp.reproject`
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
.. versionadded:: 0.0.27 shape
.. versionadded:: 0.0.28 transform
Parameters
----------
dst_crs: str
OGC WKT string or Proj.4 string.
resolution: float or tuple(float, float), optional
Size of a destination pixel in destination projection units
(e.g. degrees or metres).
shape: tuple(int, int), optional
Shape of the destination in pixels (dst_height, dst_width). Cannot be used
together with resolution.
transform: optional
The destination transform.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
-------
:obj:`xarray.DataArray`:
The reprojected DataArray.
"""
if resolution is not None and (shape is not None or transform is not None):
raise RioXarrayError("resolution cannot be used with shape or transform.")
if self.crs is None:
raise MissingCRS(
"CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'."
f"{_get_data_var_message(self._obj)}"
)
src_affine = self.transform(recalc=True)
if transform is None:
dst_affine, dst_width, dst_height = _make_dst_affine(
self._obj, self.crs, dst_crs, resolution, shape
)
else:
dst_affine = transform
if shape is not None:
dst_height, dst_width = shape
else:
dst_height, dst_width = self.shape
extra_dim = self._check_dimensions()
if extra_dim:
dst_data = np.zeros(
(self._obj[extra_dim].size, dst_height, dst_width),
dtype=self._obj.dtype.type,
)
else:
dst_data = np.zeros((dst_height, dst_width), dtype=self._obj.dtype.type)
try:
dst_nodata = self._obj.dtype.type(
self.nodata if self.nodata is not None else -9999
)
except ValueError:
# if integer, set nodata to -9999
dst_nodata = self._obj.dtype.type(-9999)
src_nodata = self._obj.dtype.type(
self.nodata if self.nodata is not None else dst_nodata
)
rasterio.warp.reproject(
source=self._obj.values,
destination=dst_data,
src_transform=src_affine,
src_crs=self.crs,
src_nodata=src_nodata,
dst_transform=dst_affine,
dst_crs=dst_crs,
dst_nodata=dst_nodata,
resampling=resampling,
)
# add necessary attributes
new_attrs = _generate_attrs(self._obj, dst_nodata)
# make sure dimensions with coordinates renamed to x,y
dst_dims = []
for dim in self._obj.dims:
if dim == self.x_dim:
dst_dims.append("x")
elif dim == self.y_dim:
dst_dims.append("y")
else:
dst_dims.append(dim)
xda = xarray.DataArray(
name=self._obj.name,
data=dst_data,
coords=_make_coords(self._obj, dst_affine, dst_width, dst_height),
dims=tuple(dst_dims),
attrs=new_attrs,
)
xda.encoding = self._obj.encoding
xda.rio.write_transform(dst_affine, inplace=True)
xda.rio.write_crs(dst_crs, inplace=True)
xda.rio.write_coordinate_system(inplace=True)
return xda
def reproject_match(self, match_data_array, resampling=Resampling.nearest):
"""
Reproject a DataArray object to match the resolution, projection,
and region of another DataArray.
Powered by `rasterio.warp.reproject`
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
Parameters
----------
match_data_array: :obj:`xarray.DataArray` | :obj:`xarray.Dataset`
DataArray of the target resolution and projection.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
--------
:obj:`xarray.DataArray`:
Contains the data from the src_data_array, reprojected to match
match_data_array.
"""
dst_crs = crs_to_wkt(match_data_array.rio.crs)
return self.reproject(
dst_crs,
transform=match_data_array.rio.transform(recalc=True),
shape=match_data_array.rio.shape,
resampling=resampling,
)
def pad_xy(self, minx, miny, maxx, maxy, constant_values):
"""Pad the array to x,y bounds.
.. versionadded:: 0.0.29
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
constant_values: scalar
The value used for padding. If None, nodata will be used if it is
set, and np.nan otherwise.
Returns
-------
:obj:`xarray.DataArray`:
The padded object.
"""
left, bottom, right, top = self._internal_bounds()
resolution_x, resolution_y = self.resolution()
y_before = y_after = 0
x_before = x_after = 0
y_coord = self._obj[self.y_dim]
x_coord = self._obj[self.x_dim]
if top - resolution_y < maxy:
new_y_coord = np.arange(bottom, maxy, -resolution_y)[::-1]
y_before = len(new_y_coord) - len(y_coord)
y_coord = new_y_coord
top = y_coord[0]
if bottom + resolution_y > miny:
new_y_coord = np.arange(top, miny, resolution_y)
y_after = len(new_y_coord) - len(y_coord)
y_coord = new_y_coord
bottom = y_coord[-1]
if left - resolution_x > minx:
new_x_coord = np.arange(right, minx, -resolution_x)[::-1]
x_before = len(new_x_coord) - len(x_coord)
x_coord = new_x_coord
left = x_coord[0]
if right + resolution_x < maxx:
new_x_coord = np.arange(left, maxx, resolution_x)
x_after = len(new_x_coord) - len(x_coord)
x_coord = new_x_coord
right = x_coord[-1]
if constant_values is None:
constant_values = np.nan if self.nodata is None else self.nodata
superset = self._obj.pad(
pad_width={
self.x_dim: (x_before, x_after),
self.y_dim: (y_before, y_after),
},
constant_values=constant_values,
).rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
superset[self.x_dim] = x_coord
superset[self.y_dim] = y_coord
superset.rio.write_transform(inplace=True)
return superset
def pad_box(self, minx, miny, maxx, maxy, constant_values=None):
"""Pad the :obj:`xarray.DataArray` to a bounding box
.. versionadded:: 0.0.29
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
constant_values: scalar
The value used for padding. If None, nodata will be used if it is
set, and np.nan otherwise.
Returns
-------
:obj:`xarray.DataArray`:
The padded object.
"""
resolution_x, resolution_y = self.resolution()
pad_minx = minx - abs(resolution_x) / 2.0
pad_miny = miny - abs(resolution_y) / 2.0
pad_maxx = maxx + abs(resolution_x) / 2.0
pad_maxy = maxy + abs(resolution_y) / 2.0
pd_array = self.pad_xy(pad_minx, pad_miny, pad_maxx, pad_maxy, constant_values)
# make sure correct attributes preserved & projection added
_add_attrs_proj(pd_array, self._obj)
return pd_array
def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3):
"""Clip the :obj:`xarray.DataArray` by a bounding box.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
auto_expand: bool
If True, it will expand clip search if only 1D raster found with clip.
auto_expand_limit: int
maximum number of times the clip will be retried before raising
an exception.
Returns
-------
:obj:`xarray.DataArray`:
The clipped object.
"""
if self.width == 1 or self.height == 1:
raise OneDimensionalRaster(
"At least one of the raster x,y coordinates has only one point."
f"{_get_data_var_message(self._obj)}"
)
# make sure that if the coordinates are
# in reverse order that it still works
resolution_x, resolution_y = self.resolution()
if resolution_y < 0:
top = maxy
bottom = miny
else:
top = miny
bottom = maxy
if resolution_x < 0:
left = maxx
right = minx
else:
left = minx
right = maxx
# pull the data out
window = rasterio.windows.from_bounds(
left=np.array(left).item(),
bottom=np.array(bottom).item(),
right=np.array(right).item(),
top=np.array(top).item(),
transform=self.transform(recalc=True),
width=self.width,
height=self.height,
)
cl_array = self.isel_window(window)
# check that the window has data in it
if cl_array.rio.width <= 1 or cl_array.rio.height <= 1:
if auto_expand and auto_expand < auto_expand_limit:
resolution_x, resolution_y = self.resolution()
return self.clip_box(
minx=minx - abs(resolution_x) / 2.0,
miny=miny - abs(resolution_y) / 2.0,
maxx=maxx + abs(resolution_x) / 2.0,
maxy=maxy + abs(resolution_y) / 2.0,
auto_expand=int(auto_expand) + 1,
auto_expand_limit=auto_expand_limit,
)
if cl_array.rio.width < 1 or cl_array.rio.height < 1:
raise NoDataInBounds(
f"No data found in bounds.{_get_data_var_message(self._obj)}"
)
elif cl_array.rio.width == 1 or cl_array.rio.height == 1:
raise OneDimensionalRaster(
"At least one of the clipped raster x,y coordinates"
" has only one point."
f"{_get_data_var_message(self._obj)}"
)
# make sure correct attributes preserved & projection added
_add_attrs_proj(cl_array, self._obj)
return cl_array
def clip(self, geometries, crs=None, all_touched=False, drop=True, invert=False):
"""
Crops a :obj:`xarray.DataArray` by geojson like geometry dicts.
Powered by `rasterio.features.geometry_mask`.
Examples:
>>> geometry = ''' {"type": "Polygon",
... "coordinates": [
... [[-94.07955380199459, 41.69085871273774],
... [-94.06082436942204, 41.69103313774798],
... [-94.06063203899649, 41.67932439500822],
... [-94.07935807746362, 41.679150041277325],
... [-94.07955380199459, 41.69085871273774]]]}'''
>>> cropping_geometries = [geojson.loads(geometry)]
>>> xds = xarray.open_rasterio('cool_raster.tif')
>>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326)
Parameters
----------
geometries: list
A list of geojson geometry dicts.
crs: :obj:`rasterio.crs.CRS`, optional
The CRS of the input geometries. Default is to assume it is the same
as the dataset.
all_touched : bool, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
drop: bool, optional
If True, drop the data outside of the extent of the mask geoemtries
Otherwise, it will return the same raster with the data masked.
Default is True.
invert: boolean, optional
If False, pixels that do not overlap shapes will be set as nodata.
Otherwise, pixels that overlap the shapes will be set as nodata.
False by default.
Returns
-------
:obj:`xarray.DataArray`:
The clipped object.
"""
if self.crs is None:
raise MissingCRS(
"CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'."
f"{_get_data_var_message(self._obj)}"
)
crs = CRS.from_wkt(crs_to_wkt(crs)) if crs is not None else self.crs
if self.crs != crs:
geometries = [
rasterio.warp.transform_geom(crs, self.crs, geometry)
for geometry in geometries
]
clip_mask_arr = geometry_mask(
geometries=geometries,
out_shape=(int(self.height), int(self.width)),
transform=self.transform(recalc=True),
invert=not invert,
all_touched=all_touched,
)
clip_mask_xray = xarray.DataArray(
clip_mask_arr,
dims=(self.y_dim, self.x_dim),
)
cropped_ds = self._obj.where(clip_mask_xray)
if drop:
cropped_ds.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
cropped_ds = cropped_ds.rio.isel_window(
rasterio.windows.get_data_window(
np.ma.masked_array(clip_mask_arr, ~clip_mask_arr)
)
)
if self.nodata is not None and not np.isnan(self.nodata):
cropped_ds = cropped_ds.fillna(self.nodata)
cropped_ds = cropped_ds.astype(self._obj.dtype)
if (
cropped_ds.coords[self.x_dim].size < 1
or cropped_ds.coords[self.y_dim].size < 1
):
raise NoDataInBounds(
f"No data found in bounds.{_get_data_var_message(self._obj)}"
)
# make sure correct attributes preserved & projection added
_add_attrs_proj(cropped_ds, self._obj)
return cropped_ds
def _interpolate_na(self, src_data, method="nearest"):
"""
This method uses scipy.interpolate.griddata to interpolate missing data.
Parameters
----------
method: {‘linear’, ‘nearest’, ‘cubic’}, optional
The method to use for interpolation in `scipy.interpolate.griddata`.
Returns
-------
:class:`numpy.ndarray`:
An interpolated :class:`numpy.ndarray`.
"""
src_data_flat = src_data.flatten()
try:
data_isnan = np.isnan(self.nodata)
except TypeError:
data_isnan = False
if not data_isnan:
data_bool = src_data_flat != self.nodata
else:
data_bool = ~np.isnan(src_data_flat)
if not data_bool.any():
return src_data
x_coords, y_coords = np.meshgrid(
self._obj.coords[self.x_dim].values, self._obj.coords[self.y_dim].values
)
return griddata(
points=(x_coords.flatten()[data_bool], y_coords.flatten()[data_bool]),
values=src_data_flat[data_bool],
xi=(x_coords, y_coords),
method=method,
fill_value=self.nodata,
)
def interpolate_na(self, method="nearest"):
"""
This method uses scipy.interpolate.griddata to interpolate missing data.
Parameters
----------
method: {‘linear’, ‘nearest’, ‘cubic’}, optional
The method to use for interpolation in `scipy.interpolate.griddata`.
Returns
-------
:obj:`xarray.DataArray`:
An interpolated :obj:`xarray.DataArray` object.
"""
extra_dim = self._check_dimensions()
if extra_dim:
interp_data = []
for _, sub_xds in self._obj.groupby(extra_dim):
interp_data.append(
self._interpolate_na(sub_xds.load().data, method=method)
)
interp_data = np.array(interp_data)
else:
interp_data = self._interpolate_na(self._obj.load().data, method=method)
interp_array = xarray.DataArray(
name=self._obj.name,
data=interp_data,
coords=self._obj.coords,
dims=self._obj.dims,
attrs=self._obj.attrs,
)
interp_array.encoding = self._obj.encoding
# make sure correct attributes preserved & projection added
_add_attrs_proj(interp_array, self._obj)
return interp_array
def to_raster(
self,
raster_path,
driver="GTiff",
dtype=None,
tags=None,
windowed=False,
recalc_transform=True,
**profile_kwargs,
):
"""
Export the DataArray to a raster file.
Parameters
----------
raster_path: str
The path to output the raster to.
driver: str, optional
The name of the GDAL/rasterio driver to use to export the raster.
Default is "GTiff".
dtype: str, optional
The data type to write the raster to. Default is the datasets dtype.
tags: dict, optional
A dictionary of tags to write to the raster.
windowed: bool, optional
If True, it will write using the windows of the output raster.
This only works if the output raster is tiled. As such, if you
set this to True, the output raster will be tiled.
Default is False.
**profile_kwargs
Additional keyword arguments to pass into writing the raster. The
nodata, transform, crs, count, width, and height attributes
are ignored.
"""
dtype = str(self._obj.dtype) if dtype is None else dtype
# get the output profile from the rasterio object
# if opened with xarray.open_rasterio()
try:
out_profile = self._obj._file_obj.acquire().profile
except AttributeError:
out_profile = {}
out_profile.update(profile_kwargs)
# filter out the generated attributes
out_profile = {
key: value
for key, value in out_profile.items()
if key
not in (
"driver",
"height",
"width",
"crs",
"transform",
"nodata",
"count",
"dtype",
)
}
rio_nodata = (
self.encoded_nodata if self.encoded_nodata is not None else self.nodata
)
if rio_nodata is not None:
# Ensure dtype of output data matches the expected dtype.
# This check is added here as the dtype of the data is
# converted right before writing.
rio_nodata = _ensure_nodata_dtype(rio_nodata, dtype)
with rasterio.open(
raster_path,
"w",
driver=driver,
height=int(self.height),
width=int(self.width),
count=int(self.count),
dtype=dtype,
crs=self.crs,
transform=self.transform(recalc=recalc_transform),
nodata=rio_nodata,
**out_profile,
) as dst:
_write_metatata_to_raster(dst, self._obj, tags)
# write data to raster
if windowed:
window_iter = dst.block_windows(1)
else:
window_iter = [(None, None)]
for _, window in window_iter:
if window is not None:
out_data = self.isel_window(window)
else:
out_data = self._obj
if self.encoded_nodata is not None:
out_data = out_data.fillna(self.encoded_nodata)
data = out_data.values.astype(dtype)
if data.ndim == 2:
dst.write(data, 1, window=window)
else:
dst.write(data, window=window)
@xarray.register_dataset_accessor("rio")
class RasterDataset(XRasterBase):
"""This is the GIS extension for :class:`xarray.Dataset`"""
@property
def vars(self):
"""list: Returns non-coordinate varibles"""
return list(self._obj.data_vars)
@property
def crs(self):
""":obj:`rasterio.crs.CRS`:
Retrieve projection from `xarray.Dataset`
"""
if self._crs is not None:
return None if self._crs is False else self._crs
self._crs = super().crs
if self._crs is not None:
return self._crs
# ensure all the CRS of the variables are the same
crs_list = []
for var in self.vars:
if self._obj[var].rio.crs is not None:
crs_list.append(self._obj[var].rio.crs)
try:
crs = crs_list[0]
except IndexError:
crs = None
if crs is None:
self._crs = False
return None
elif all(crs_i == crs for crs_i in crs_list):
self._crs = crs
else:
raise RioXarrayError(
"CRS in DataArrays differ in the Dataset: {}".format(crs_list)
)
return self._crs
def reproject(
self,
dst_crs,
resolution=None,
shape=None,
transform=None,
resampling=Resampling.nearest,
):
"""
Reproject :class:`xarray.Dataset` objects
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
.. versionadded:: 0.0.27 shape
.. versionadded:: 0.0.28 transform
Parameters
----------
dst_crs: str
OGC WKT string or Proj.4 string.
resolution: float or tuple(float, float), optional
Size of a destination pixel in destination projection units
(e.g. degrees or metres).
shape: tuple(int, int), optional
Shape of the destination in pixels (dst_height, dst_width). Cannot be used
together with resolution.
transform: optional
The destination transform.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
--------
:class:`xarray.Dataset`:
The reprojected Dataset.
"""
resampled_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
resampled_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.reproject(
dst_crs,
resolution=resolution,
shape=shape,
transform=transform,
resampling=resampling,
)
)
return resampled_dataset
def reproject_match(self, match_data_array, resampling=Resampling.nearest):
"""
Reproject a Dataset object to match the resolution, projection,
and region of another DataArray.
.. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported.
Requires either a grid mapping variable with 'spatial_ref' or
a 'crs' attribute to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
Parameters
----------
match_data_array: :obj:`xarray.DataArray` | :obj:`xarray.Dataset`
Dataset with the target resolution and projection.
resampling: Resampling method, optional
See rasterio.warp.reproject for more details.
Returns
--------
:obj:`xarray.Dataset`:
Contains the data from the src_data_array,
reprojected to match match_data_array.
"""
resampled_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
resampled_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.reproject_match(match_data_array, resampling=resampling)
)
return resampled_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def pad_box(self, minx, miny, maxx, maxy):
"""Pad the :class:`xarray.Dataset` to a bounding box.
.. warning:: Only works if all variables in the dataset have the
same coordinates.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
Returns
-------
:obj:`xarray.Dataset`:
The padded object.
"""
padded_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
padded_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.pad_box(minx, miny, maxx, maxy)
)
return padded_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3):
"""Clip the :class:`xarray.Dataset` by a bounding box.
.. warning:: Only works if all variables in the dataset have the
same coordinates.
Parameters
----------
minx: float
Minimum bound for x coordinate.
miny: float
Minimum bound for y coordinate.
maxx: float
Maximum bound for x coordinate.
maxy: float
Maximum bound for y coordinate.
auto_expand: bool
If True, it will expand clip search if only 1D raster found with clip.
auto_expand_limit: int
maximum number of times the clip will be retried before raising
an exception.
Returns
-------
:obj:`Dataset`:
The clipped object.
"""
clipped_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
clipped_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.clip_box(
minx,
miny,
maxx,
maxy,
auto_expand=auto_expand,
auto_expand_limit=auto_expand_limit,
)
)
return clipped_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def clip(self, geometries, crs=None, all_touched=False, drop=True, invert=False):
"""
Crops a :class:`xarray.Dataset` by geojson like geometry dicts.
.. warning:: Only works if all variables in the dataset have the same
coordinates.
Powered by `rasterio.features.geometry_mask`.
Examples:
>>> geometry = ''' {"type": "Polygon",
... "coordinates": [
... [[-94.07955380199459, 41.69085871273774],
... [-94.06082436942204, 41.69103313774798],
... [-94.06063203899649, 41.67932439500822],
... [-94.07935807746362, 41.679150041277325],
... [-94.07955380199459, 41.69085871273774]]]}'''
>>> cropping_geometries = [geojson.loads(geometry)]
>>> xds = xarray.open_rasterio('cool_raster.tif')
>>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326)
Parameters
----------
geometries: list
A list of geojson geometry dicts.
crs: :obj:`rasterio.crs.CRS`, optional
The CRS of the input geometries. Default is to assume it is the same
as the dataset.
all_touched : boolean, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
drop: bool, optional
If True, drop the data outside of the extent of the mask geoemtries
Otherwise, it will return the same raster with the data masked.
Default is True.
invert: boolean, optional
If False, pixels that do not overlap shapes will be set as nodata.
Otherwise, pixels that overlap the shapes will be set as nodata.
False by default.
Returns
-------
:obj:`xarray.Dataset`:
The clipped object.
"""
clipped_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
clipped_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.clip(
geometries,
crs=crs,
all_touched=all_touched,
drop=drop,
invert=invert,
)
)
return clipped_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def interpolate_na(self, method="nearest"):
"""
This method uses `scipy.interpolate.griddata` to interpolate missing data.
Parameters
----------
method: {‘linear’, ‘nearest’, ‘cubic’}, optional
The method to use for interpolation in `scipy.interpolate.griddata`.
Returns
-------
:obj:`xarray.DataArray`:
The interpolated object.
"""
interpolated_dataset = xarray.Dataset(attrs=self._obj.attrs)
for var in self.vars:
interpolated_dataset[var] = (
self._obj[var]
.rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True)
.rio.interpolate_na(method=method)
)
return interpolated_dataset.rio.set_spatial_dims(
x_dim=self.x_dim, y_dim=self.y_dim, inplace=True
)
def to_raster(
self,
raster_path,
driver="GTiff",
dtype=None,
tags=None,
windowed=False,
recalc_transform=True,
**profile_kwargs,
):
"""
Export the Dataset to a raster file. Only works with 2D data.
Parameters
----------
raster_path: str
The path to output the raster to.
driver: str, optional
The name of the GDAL/rasterio driver to use to export the raster.
Default is "GTiff".
dtype: str, optional
The data type to write the raster to. Default is the datasets dtype.
tags: dict, optional
A dictionary of tags to write to the raster.
windowed: bool, optional
If True, it will write using the windows of the output raster.
This only works if the output raster is tiled. As such, if you
set this to True, the output raster will be tiled.
Default is False.
**profile_kwargs
Additional keyword arguments to pass into writing the raster. The
nodata, transform, crs, count, width, and height attributes
are ignored.
"""
variable_dim = "band_{}".format(uuid4())
data_array = self._obj.to_array(dim=variable_dim)
# write data array names to raster
data_array.attrs["long_name"] = data_array[variable_dim].values.tolist()
# ensure raster metadata preserved
scales = []
offsets = []
nodatavals = []
for data_var in data_array[variable_dim].values:
scales.append(self._obj[data_var].attrs.get("scale_factor", 1.0))
offsets.append(self._obj[data_var].attrs.get("add_offset", 0.0))
nodatavals.append(self._obj[data_var].rio.nodata)
data_array.attrs["scales"] = scales
data_array.attrs["offsets"] = offsets
nodata = nodatavals[0]
if (
all(nodataval == nodata for nodataval in nodatavals)
or np.isnan(nodatavals).all()
):
data_array.rio.write_nodata(nodata, inplace=True)
else:
raise RioXarrayError(
"All nodata values must be the same when exporting to raster. "
"Current values: {}".format(nodatavals)
)
if self.crs is not None:
data_array.rio.write_crs(self.crs, inplace=True)
# write it to a raster
data_array.rio.to_raster(
raster_path=raster_path,
driver=driver,
dtype=dtype,
tags=tags,
windowed=windowed,
recalc_transform=recalc_transform,
**profile_kwargs,
)
| en | 0.582948 | # -- coding: utf-8 -- This module is an extension for xarray to provide rasterio capabilities to xarray datasets/dataarrays. Credits: The `reproject` functionality was adopted from https://github.com/opendatacube/datacube-core # noqa Source file: - https://github.com/opendatacube/datacube-core/blob/084c84d78cb6e1326c7fbbe79c5b5d0bef37c078/datacube/api/geo_xarray.py # noqa datacube is licensed under the Apache License, Version 2.0: - https://github.com/opendatacube/datacube-core/blob/1d345f08a10a13c316f81100936b0ad8b1a374eb/LICENSE # noqa Generate 1d pixel centered coordinates from affine. Based on code from the xarray rasterio backend. Parameters ---------- affine: :obj:`affine.Affine` The affine of the grid. width: int The width of the grid. height: int The height of the grid. x_dim: str, optional The name of the X dimension. Default is 'x'. y_dim: str, optional The name of the Y dimension. Default is 'y'. Returns ------- dict: x and y coordinate arrays. # add original attributes # remove all nodata information # add nodata information # add raster spatial information Make sure attributes and projection correct # make sure dimension information is preserved # make sure attributes preserved # remove fill value if it already exists in the encoding # this is for data arrays pulling the encoding from a # source data array instead of being generated anew. # make sure projection added # make sure encoding added get spatial coords in new transform Generate the coordinates of the new projected `xarray.DataArray` Determine the affine of the new projected `xarray.DataArray` Write the metadata stored in the xarray object to raster metadata # write scales and offsets # filter out attributes that should be written in a different location # this is for when multiple values are used # in this case, it will be stored in the raster description # write band name information Get message for named data variables. Convert the nodata to the new datatype and raise warning if the value of the nodata value changed. This is the base class for the GIS extensions for xarray # Determine the spatial dimensions of the `xarray.DataArray` # look for coordinates with CF attributes # make sure to only look in 1D coordinates # that has the same dimension name as the coordinate # properties :obj:`rasterio.crs.CRS`: Retrieve projection from :obj:`xarray.Dataset` | :obj:`xarray.DataArray` # look in grid_mapping # look in attrs for 'crs' Get the object to modify. Parameters ---------- inplace: bool If True, returns self. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray` # preserve attribute information Set the CRS value for the Dataset/DataArray without modifying the dataset/data array. Parameters ---------- input_crs: object Anything accepted by `rasterio.crs.CRS.from_user_input`. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Dataset with crs attribute. str: The CF grid_mapping attribute. 'spatial_ref' is the default. # search the dataset for the grid mapping name Write the CF grid_mapping attribute. Parameters ---------- grid_mapping_name: str, optional Name of the grid_mapping coordinate. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Modified dataset with CF compliant CRS information. Write the CRS to the dataset in a CF compliant manner. Parameters ---------- input_crs: object Anything accepted by `rasterio.crs.CRS.from_user_input`. grid_mapping_name: str, optional Name of the grid_mapping coordinate to store the CRS information in. Default is the grid_mapping name of the dataset. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Modified dataset with CF compliant CRS information. # get original transform # remove old grid maping coordinate if exists # add grid mapping coordinate # spatial_ref is for compatibility with GDAL Returns the estimated UTM CRS based on the bounds of the dataset. .. versionadded:: 0.2 .. note:: Requires pyproj 3+ Parameters ---------- datum_name : str, optional The name of the datum to use in the query. Default is WGS 84. Returns ------- rasterio.crs.CRS # ensure using geographic coordinates Get the transform from: 1. The GeoTransform metatada property in the grid mapping 2. The transform attribute. # look in grid_mapping .. versionadded:: 0.0.30 Write the GeoTransform to the dataset where GDAL can read it in. https://gdal.org/drivers/raster/netcdf.html#georeference Parameters ---------- transform: affine.Affine, optional The transform of the dataset. If not provided, it will be calculated. grid_mapping_name: str, optional Name of the grid_mapping coordinate to store the transform information in. Default is the grid_mapping name of the dataset. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Modified dataset with Geo Transform written. # delete the old attribute to prevent confusion Parameters ---------- recalc: bool, optional If True, it will re-calculate the transform instead of using the cached transform. Returns ------- :obj:`affine.Afffine`: The affine of the :obj:`xarray.Dataset` | :obj:`xarray.DataArray` Write the coordinate system CF metadata. .. versionadded:: 0.0.30 Parameters ---------- inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: The dataset with the CF coordinate system attributes added. # add metadata to x,y coordinates # X metadata # Y metadata # X metadata # Y metadata Set the attributes of the dataset/dataarray and reset rioxarray properties to re-search for them. Parameters ---------- new_attrs: dict A dictionary of new attributes. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Modified dataset with new attributes. # set the attributes # reset rioxarray properties depending # on attributes to be generated Update the attributes of the dataset/dataarray and reset rioxarray properties to re-search for them. Parameters ---------- new_attrs: dict A dictionary of new attributes to update with. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Modified dataset with updated attributes. This sets the spatial dimensions of the dataset. Parameters ---------- x_dim: str The name of the x dimension. y_dim: str The name of the y dimension. inplace: bool, optional If True, it will modify the dataframe in place. Otherwise it will return a modified copy. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: Dataset with spatial dimensions set. str: The dimension for the X-axis. str: The dimension for the Y-axis. int: Returns the width of the dataset (x dimension size) int: Returns the height of the dataset (y dimension size) tuple(int, int): Returns the shape (height, width) This function validates that the dimensions 2D/3D and they are are in the proper order. Returns ------- str or None: Name extra dimension. int: Returns the band count (z dimension size) Determine the internal bounds of the `xarray.DataArray` Parameters ---------- recalc: bool, optional Will force the resolution to be recalculated instead of using the transform attribute. Returns ------- x_resolution, y_resolution: float The resolution of the `xarray.DataArray` | `xarray.Dataset` # if the coordinates of the spatial dimensions are missing # use the cached transform resolution Parameters ---------- recalc: bool, optional Will force the bounds to be recalculated instead of using the transform attribute. Returns ------- left, bottom, right, top: float Outermost coordinates of the `xarray.DataArray` | `xarray.Dataset`. # attempt to get bounds from xarray coordinate values Use a rasterio.window.Window to select a subset of the data. .. warning:: Float indices are converted to integers. Parameters ---------- window: :class:`rasterio.window.Window` The window of the dataset to read. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: The data in the window. # this is to prevent sharing coordinates with the original dataset Slice the array by x,y bounds. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: The data in the slice. # this is to prevent sharing coordinates with the original dataset Transform bounds from src_crs to dst_crs. Optionally densifying the edges (to account for nonlinear transformations along these edges) and extracting the outermost bounds. Note: this does not account for the antimeridian. Parameters ---------- dst_crs: str, :obj:`rasterio.crs.CRS`, or dict Target coordinate reference system. densify_pts: uint, optional Number of points to add to each edge to account for nonlinear edges produced by the transform process. Large numbers will produce worse performance. Default: 21 (gdal default). recalc: bool, optional Will force the bounds to be recalculated instead of using the transform attribute. Returns ------- left, bottom, right, top: float Outermost coordinates in target coordinate reference system. This is the GIS extension for :obj:`xarray.DataArray` # properties Set the nodata value for the DataArray without modifying the data array. Parameters ---------- input_nodata: object Valid nodata for dtype. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- :obj:`xarray.DataArray`: Dataset with nodata attribute set. Write the nodata to the DataArray in a CF compliant manner. Parameters ---------- input_nodata: object Nodata value for the DataArray. If input_nodata is None, it will remove the _FillValue attribute. inplace: bool, optional If True, it will write to the existing DataArray. Default is False. Returns ------- :obj:`xarray.DataArray`: Modified DataArray with CF compliant nodata information. Return the encoded nodata value for the dataset if encoded. Get the nodata value for the dataset. # look in places used by `xarray.open_rasterio` Reproject :obj:`xarray.DataArray` objects Powered by `rasterio.warp.reproject` .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. .. versionadded:: 0.0.27 shape .. versionadded:: 0.0.28 transform Parameters ---------- dst_crs: str OGC WKT string or Proj.4 string. resolution: float or tuple(float, float), optional Size of a destination pixel in destination projection units (e.g. degrees or metres). shape: tuple(int, int), optional Shape of the destination in pixels (dst_height, dst_width). Cannot be used together with resolution. transform: optional The destination transform. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns ------- :obj:`xarray.DataArray`: The reprojected DataArray. # if integer, set nodata to -9999 # add necessary attributes # make sure dimensions with coordinates renamed to x,y Reproject a DataArray object to match the resolution, projection, and region of another DataArray. Powered by `rasterio.warp.reproject` .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. Parameters ---------- match_data_array: :obj:`xarray.DataArray` | :obj:`xarray.Dataset` DataArray of the target resolution and projection. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :obj:`xarray.DataArray`: Contains the data from the src_data_array, reprojected to match match_data_array. Pad the array to x,y bounds. .. versionadded:: 0.0.29 Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. constant_values: scalar The value used for padding. If None, nodata will be used if it is set, and np.nan otherwise. Returns ------- :obj:`xarray.DataArray`: The padded object. Pad the :obj:`xarray.DataArray` to a bounding box .. versionadded:: 0.0.29 Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. constant_values: scalar The value used for padding. If None, nodata will be used if it is set, and np.nan otherwise. Returns ------- :obj:`xarray.DataArray`: The padded object. # make sure correct attributes preserved & projection added Clip the :obj:`xarray.DataArray` by a bounding box. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. auto_expand: bool If True, it will expand clip search if only 1D raster found with clip. auto_expand_limit: int maximum number of times the clip will be retried before raising an exception. Returns ------- :obj:`xarray.DataArray`: The clipped object. # make sure that if the coordinates are # in reverse order that it still works # pull the data out # check that the window has data in it # make sure correct attributes preserved & projection added Crops a :obj:`xarray.DataArray` by geojson like geometry dicts. Powered by `rasterio.features.geometry_mask`. Examples: >>> geometry = ''' {"type": "Polygon", ... "coordinates": [ ... [[-94.07955380199459, 41.69085871273774], ... [-94.06082436942204, 41.69103313774798], ... [-94.06063203899649, 41.67932439500822], ... [-94.07935807746362, 41.679150041277325], ... [-94.07955380199459, 41.69085871273774]]]}''' >>> cropping_geometries = [geojson.loads(geometry)] >>> xds = xarray.open_rasterio('cool_raster.tif') >>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326) Parameters ---------- geometries: list A list of geojson geometry dicts. crs: :obj:`rasterio.crs.CRS`, optional The CRS of the input geometries. Default is to assume it is the same as the dataset. all_touched : bool, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. drop: bool, optional If True, drop the data outside of the extent of the mask geoemtries Otherwise, it will return the same raster with the data masked. Default is True. invert: boolean, optional If False, pixels that do not overlap shapes will be set as nodata. Otherwise, pixels that overlap the shapes will be set as nodata. False by default. Returns ------- :obj:`xarray.DataArray`: The clipped object. # make sure correct attributes preserved & projection added This method uses scipy.interpolate.griddata to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`numpy.ndarray`: An interpolated :class:`numpy.ndarray`. This method uses scipy.interpolate.griddata to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :obj:`xarray.DataArray`: An interpolated :obj:`xarray.DataArray` object. # make sure correct attributes preserved & projection added Export the DataArray to a raster file. Parameters ---------- raster_path: str The path to output the raster to. driver: str, optional The name of the GDAL/rasterio driver to use to export the raster. Default is "GTiff". dtype: str, optional The data type to write the raster to. Default is the datasets dtype. tags: dict, optional A dictionary of tags to write to the raster. windowed: bool, optional If True, it will write using the windows of the output raster. This only works if the output raster is tiled. As such, if you set this to True, the output raster will be tiled. Default is False. **profile_kwargs Additional keyword arguments to pass into writing the raster. The nodata, transform, crs, count, width, and height attributes are ignored. # get the output profile from the rasterio object # if opened with xarray.open_rasterio() # filter out the generated attributes # Ensure dtype of output data matches the expected dtype. # This check is added here as the dtype of the data is # converted right before writing. # write data to raster This is the GIS extension for :class:`xarray.Dataset` list: Returns non-coordinate varibles :obj:`rasterio.crs.CRS`: Retrieve projection from `xarray.Dataset` # ensure all the CRS of the variables are the same Reproject :class:`xarray.Dataset` objects .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. .. versionadded:: 0.0.27 shape .. versionadded:: 0.0.28 transform Parameters ---------- dst_crs: str OGC WKT string or Proj.4 string. resolution: float or tuple(float, float), optional Size of a destination pixel in destination projection units (e.g. degrees or metres). shape: tuple(int, int), optional Shape of the destination in pixels (dst_height, dst_width). Cannot be used together with resolution. transform: optional The destination transform. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :class:`xarray.Dataset`: The reprojected Dataset. Reproject a Dataset object to match the resolution, projection, and region of another DataArray. .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. Parameters ---------- match_data_array: :obj:`xarray.DataArray` | :obj:`xarray.Dataset` Dataset with the target resolution and projection. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :obj:`xarray.Dataset`: Contains the data from the src_data_array, reprojected to match match_data_array. Pad the :class:`xarray.Dataset` to a bounding box. .. warning:: Only works if all variables in the dataset have the same coordinates. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. Returns ------- :obj:`xarray.Dataset`: The padded object. Clip the :class:`xarray.Dataset` by a bounding box. .. warning:: Only works if all variables in the dataset have the same coordinates. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. auto_expand: bool If True, it will expand clip search if only 1D raster found with clip. auto_expand_limit: int maximum number of times the clip will be retried before raising an exception. Returns ------- :obj:`Dataset`: The clipped object. Crops a :class:`xarray.Dataset` by geojson like geometry dicts. .. warning:: Only works if all variables in the dataset have the same coordinates. Powered by `rasterio.features.geometry_mask`. Examples: >>> geometry = ''' {"type": "Polygon", ... "coordinates": [ ... [[-94.07955380199459, 41.69085871273774], ... [-94.06082436942204, 41.69103313774798], ... [-94.06063203899649, 41.67932439500822], ... [-94.07935807746362, 41.679150041277325], ... [-94.07955380199459, 41.69085871273774]]]}''' >>> cropping_geometries = [geojson.loads(geometry)] >>> xds = xarray.open_rasterio('cool_raster.tif') >>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326) Parameters ---------- geometries: list A list of geojson geometry dicts. crs: :obj:`rasterio.crs.CRS`, optional The CRS of the input geometries. Default is to assume it is the same as the dataset. all_touched : boolean, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. drop: bool, optional If True, drop the data outside of the extent of the mask geoemtries Otherwise, it will return the same raster with the data masked. Default is True. invert: boolean, optional If False, pixels that do not overlap shapes will be set as nodata. Otherwise, pixels that overlap the shapes will be set as nodata. False by default. Returns ------- :obj:`xarray.Dataset`: The clipped object. This method uses `scipy.interpolate.griddata` to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :obj:`xarray.DataArray`: The interpolated object. Export the Dataset to a raster file. Only works with 2D data. Parameters ---------- raster_path: str The path to output the raster to. driver: str, optional The name of the GDAL/rasterio driver to use to export the raster. Default is "GTiff". dtype: str, optional The data type to write the raster to. Default is the datasets dtype. tags: dict, optional A dictionary of tags to write to the raster. windowed: bool, optional If True, it will write using the windows of the output raster. This only works if the output raster is tiled. As such, if you set this to True, the output raster will be tiled. Default is False. **profile_kwargs Additional keyword arguments to pass into writing the raster. The nodata, transform, crs, count, width, and height attributes are ignored. # write data array names to raster # ensure raster metadata preserved # write it to a raster | 2.177851 | 2 |
sktime/_contrib/classifier_capabilities_table.py | OliverMatthews/sktime | 1 | 6630166 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Auto-generate a classifier capabilites summary."""
import pandas as pd
from sktime.registry import all_estimators
# List of columns in the table
df_columns = [
"Classifier Category",
"Classifier Name",
"multivariate",
"unequal_length",
"missing_values",
"train_estimate",
"contractable",
]
# creates dataframe as df
df = pd.DataFrame([], columns=df_columns)
# Loop through all the classifiers
for classiName, classiClass in all_estimators(estimator_types="classifier"):
category = str(classiClass).split(".")[2]
try:
# capabilites of each of the classifier classifier
cap_dict = classiClass.capabilities
multivariate = str(cap_dict["multivariate"])
unequal_length = str(cap_dict["unequal_length"])
missing_values = str(cap_dict["missing_values"])
train_estimate = str(cap_dict["train_estimate"])
contractable = str(cap_dict["contractable"])
# Adding capabilites for each classifier in the table
record = {
"Classifier Category": category,
"Classifier Name": classiName,
"multivariate": multivariate,
"unequal_length": unequal_length,
"missing_values": missing_values,
"train_estimate": train_estimate,
"contractable": contractable,
}
except AttributeError:
record = {
"Classifier Category": category,
"Classifier Name": classiName,
"multivariate": "N/A",
"unequal_length": "N/A",
"missing_values": "N/A",
"train_estimate": "N/A",
"contractable": "N/A",
}
df = pd.concat([df, pd.DataFrame(record, index=[0])], ignore_index=True)
df.to_html("Classifier_Capabilities.html", index=False, escape=False)
| # -*- coding: utf-8 -*-
"""Auto-generate a classifier capabilites summary."""
import pandas as pd
from sktime.registry import all_estimators
# List of columns in the table
df_columns = [
"Classifier Category",
"Classifier Name",
"multivariate",
"unequal_length",
"missing_values",
"train_estimate",
"contractable",
]
# creates dataframe as df
df = pd.DataFrame([], columns=df_columns)
# Loop through all the classifiers
for classiName, classiClass in all_estimators(estimator_types="classifier"):
category = str(classiClass).split(".")[2]
try:
# capabilites of each of the classifier classifier
cap_dict = classiClass.capabilities
multivariate = str(cap_dict["multivariate"])
unequal_length = str(cap_dict["unequal_length"])
missing_values = str(cap_dict["missing_values"])
train_estimate = str(cap_dict["train_estimate"])
contractable = str(cap_dict["contractable"])
# Adding capabilites for each classifier in the table
record = {
"Classifier Category": category,
"Classifier Name": classiName,
"multivariate": multivariate,
"unequal_length": unequal_length,
"missing_values": missing_values,
"train_estimate": train_estimate,
"contractable": contractable,
}
except AttributeError:
record = {
"Classifier Category": category,
"Classifier Name": classiName,
"multivariate": "N/A",
"unequal_length": "N/A",
"missing_values": "N/A",
"train_estimate": "N/A",
"contractable": "N/A",
}
df = pd.concat([df, pd.DataFrame(record, index=[0])], ignore_index=True)
df.to_html("Classifier_Capabilities.html", index=False, escape=False) | en | 0.657981 | # -*- coding: utf-8 -*- Auto-generate a classifier capabilites summary. # List of columns in the table # creates dataframe as df # Loop through all the classifiers # capabilites of each of the classifier classifier # Adding capabilites for each classifier in the table | 2.71586 | 3 |
gimmemotifs/commands/motif2factors.py | simonvh/gimmemotifs | 20 | 6630167 | <filename>gimmemotifs/commands/motif2factors.py
# Copyright (c) 2009-2021 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from gimmemotifs.orthologs import motif2factor_from_orthologs
def motif2factors(args):
kwargs = {
"new_reference": args.new_reference,
"extra_orthologs_references": args.ortholog_references,
"genomes_dir": args.genomes_dir,
"tmpdir": args.tmpdir,
"outdir": args.outdir,
"strategy": args.strategy,
"database": args.database,
"threads": args.threads,
"keep_intermediate": args.keep_intermediate,
}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
motif2factor_from_orthologs(**kwargs)
| <filename>gimmemotifs/commands/motif2factors.py
# Copyright (c) 2009-2021 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from gimmemotifs.orthologs import motif2factor_from_orthologs
def motif2factors(args):
kwargs = {
"new_reference": args.new_reference,
"extra_orthologs_references": args.ortholog_references,
"genomes_dir": args.genomes_dir,
"tmpdir": args.tmpdir,
"outdir": args.outdir,
"strategy": args.strategy,
"database": args.database,
"threads": args.threads,
"keep_intermediate": args.keep_intermediate,
}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
motif2factor_from_orthologs(**kwargs)
| en | 0.77257 | # Copyright (c) 2009-2021 <NAME> <<EMAIL>> # # This module is free software. You can redistribute it and/or modify it under # the terms of the MIT License, see the file COPYING included with this # distribution. | 1.910374 | 2 |
contrib/node/src/python/pants/contrib/node/subsystems/eslint.py | mpopenko-exos/pants | 0 | 6630168 | <reponame>mpopenko-exos/pants<filename>contrib/node/src/python/pants/contrib/node/subsystems/eslint.py
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import filecmp
import logging
import os.path
import shutil
from typing import Tuple
from pants.option.custom_types import dir_option, file_option
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir, safe_rmtree
logger = logging.getLogger(__name__)
class ESLint(Subsystem):
options_scope = 'eslint'
required_files = ['yarn.lock', 'package.json']
@classmethod
def register_options(cls, register):
super().register_options(register)
register('--version', default='4.15.0', fingerprint=True, help='Use this ESLint version.')
register(
'--config', type=file_option, default=None,
help="Path to `.eslintrc` or alternative ESLint config file",
)
register(
'--skip', type=bool, default=False,
help="Don't use ESLint when running `./pants fmt` and `./pants lint`"
)
register(
'--setupdir', type=dir_option, fingerprint=True,
help='Find the package.json and yarn.lock under this dir for installing eslint and plugins.',
)
register(
'--ignore', type=file_option, fingerprint=True,
help='The path to the global eslint ignore path',
)
def configure(self, *, bootstrapped_support_path: str) -> None:
logger.debug(
f'Copying {self.options.setupdir} to bootstrapped dir: {bootstrapped_support_path}'
)
safe_rmtree(bootstrapped_support_path)
shutil.copytree(self.options.setupdir, bootstrapped_support_path)
def supportdir(self, *, task_workdir: str) -> Tuple[str, bool]:
"""Returns the path where the ESLint is bootstrapped.
:param task_workdir: The task's working directory
:returns: The path where ESLint is bootstrapped and whether or not it is configured
"""
bootstrapped_support_path = os.path.join(task_workdir, 'eslint')
# TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir
# assuming fingerprinting works as intended.
# If the eslint_setupdir is not provided or missing required files, then
# clean up the directory so that Pants can install a pre-defined eslint version later on.
# Otherwise, if there is no configurations changes, rely on the cache.
# If there is a config change detected, use the new configuration.
if self.options.setupdir:
configured = all(
os.path.exists(os.path.join(self.options.setupdir, f)) for f in self.required_files
)
else:
configured = False
if not configured:
safe_mkdir(bootstrapped_support_path, clean=True)
else:
try:
installed = all(
filecmp.cmp(
os.path.join(self.options.setupdir, f),
os.path.join(bootstrapped_support_path, f)
) for f in self.required_files
)
except OSError:
installed = False
if not installed:
self.configure(bootstrapped_support_path=bootstrapped_support_path)
return bootstrapped_support_path, configured
| # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import filecmp
import logging
import os.path
import shutil
from typing import Tuple
from pants.option.custom_types import dir_option, file_option
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir, safe_rmtree
logger = logging.getLogger(__name__)
class ESLint(Subsystem):
options_scope = 'eslint'
required_files = ['yarn.lock', 'package.json']
@classmethod
def register_options(cls, register):
super().register_options(register)
register('--version', default='4.15.0', fingerprint=True, help='Use this ESLint version.')
register(
'--config', type=file_option, default=None,
help="Path to `.eslintrc` or alternative ESLint config file",
)
register(
'--skip', type=bool, default=False,
help="Don't use ESLint when running `./pants fmt` and `./pants lint`"
)
register(
'--setupdir', type=dir_option, fingerprint=True,
help='Find the package.json and yarn.lock under this dir for installing eslint and plugins.',
)
register(
'--ignore', type=file_option, fingerprint=True,
help='The path to the global eslint ignore path',
)
def configure(self, *, bootstrapped_support_path: str) -> None:
logger.debug(
f'Copying {self.options.setupdir} to bootstrapped dir: {bootstrapped_support_path}'
)
safe_rmtree(bootstrapped_support_path)
shutil.copytree(self.options.setupdir, bootstrapped_support_path)
def supportdir(self, *, task_workdir: str) -> Tuple[str, bool]:
"""Returns the path where the ESLint is bootstrapped.
:param task_workdir: The task's working directory
:returns: The path where ESLint is bootstrapped and whether or not it is configured
"""
bootstrapped_support_path = os.path.join(task_workdir, 'eslint')
# TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir
# assuming fingerprinting works as intended.
# If the eslint_setupdir is not provided or missing required files, then
# clean up the directory so that Pants can install a pre-defined eslint version later on.
# Otherwise, if there is no configurations changes, rely on the cache.
# If there is a config change detected, use the new configuration.
if self.options.setupdir:
configured = all(
os.path.exists(os.path.join(self.options.setupdir, f)) for f in self.required_files
)
else:
configured = False
if not configured:
safe_mkdir(bootstrapped_support_path, clean=True)
else:
try:
installed = all(
filecmp.cmp(
os.path.join(self.options.setupdir, f),
os.path.join(bootstrapped_support_path, f)
) for f in self.required_files
)
except OSError:
installed = False
if not installed:
self.configure(bootstrapped_support_path=bootstrapped_support_path)
return bootstrapped_support_path, configured | en | 0.75201 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). Returns the path where the ESLint is bootstrapped. :param task_workdir: The task's working directory :returns: The path where ESLint is bootstrapped and whether or not it is configured # TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir # assuming fingerprinting works as intended. # If the eslint_setupdir is not provided or missing required files, then # clean up the directory so that Pants can install a pre-defined eslint version later on. # Otherwise, if there is no configurations changes, rely on the cache. # If there is a config change detected, use the new configuration. | 1.74229 | 2 |
causal_discovery/multivariable_mlp.py | codeaudit/ENCO | 0 | 6630169 | import torch
import torch.nn as nn
import math
import numpy as np
class MultivarMLP(nn.Module):
def __init__(self, input_dims, hidden_dims, output_dims, extra_dims, actfn, pre_layers=None):
"""
Module for stacking N neural networks in parallel for more efficient evaluation. In the context
of ENCO, we stack the neural networks of the conditional distributions for all N variables on top
of each other to parallelize it on a GPU.
Parameters
----------
input_dims : int
Input dimensionality for all networks (in ENCO, size of embedding)
hidden_dims : list[int]
Hidden dimensionalities to use in the hidden layer. Length of list determines
the number of hidden layers to use.
output_dims : int
Output dimensionality of all networks (in ENCO, max. number of categories)
extra_dims : list[int]
Number of neural networks to have in parallel (in ENCO, number of variables).
Can have multiple dimensions if needed.
actfn : function -> nn.Module
Activation function to use in between hidden layers
pre_layers : list[nn.Module] / nn.Module
Any modules that should be applied before the actual MLP. This can include
an embedding layer and/or masking operation.
"""
super().__init__()
self.extra_dims = extra_dims
layers = []
if pre_layers is not None:
if not isinstance(pre_layers, list):
layers += [pre_layers]
else:
layers += pre_layers
hidden_dims = [input_dims] + hidden_dims
for i in range(len(hidden_dims)-1):
if not isinstance(layers[-1], EmbedLayer): # After an embedding layer, we directly apply a non-linearity
layers += [MultivarLinear(input_dims=hidden_dims[i],
output_dims=hidden_dims[i+1],
extra_dims=extra_dims)]
layers += [actfn()]
layers += [MultivarLinear(input_dims=hidden_dims[-1],
output_dims=output_dims,
extra_dims=extra_dims)]
self.layers = nn.ModuleList(layers)
def forward(self, x, mask=None):
for l in self.layers:
if isinstance(l, (EmbedLayer, InputMask)):
x = l(x, mask=mask)
else:
x = l(x)
return x
@property
def device(self):
return next(iter(self.parameters())).device
class MultivarLinear(nn.Module):
def __init__(self, input_dims, output_dims, extra_dims):
"""
Linear layer with the same properties as MultivarMLP. It effectively applies N independent
linear layers in parallel.
Parameters
----------
input_dims : int
Number of input dimensions per network.
output_dims : int
Number of output dimensions per network.
extra_dims : list[int]
Number of networks to apply in parallel. Can have multiple dimensions if needed.
"""
super().__init__()
self.input_dims = input_dims
self.output_dims = output_dims
self.extra_dims = extra_dims
self.weight = nn.Parameter(torch.zeros(*extra_dims, output_dims, input_dims))
self.bias = nn.Parameter(torch.zeros(*extra_dims, output_dims))
nn.init.kaiming_uniform_(self.weight, nonlinearity='relu')
def forward(self, x):
# Shape preparation
x_extra_dims = x.shape[1:-1]
if len(x_extra_dims) > 0:
for i in range(len(x_extra_dims)):
assert x_extra_dims[-(i+1)] == self.extra_dims[-(i+1)], \
"Shape mismatch: X=%s, Layer=%s" % (str(x.shape), str(self.extra_dims))
for _ in range(len(self.extra_dims)-len(x_extra_dims)):
x = x.unsqueeze(dim=1)
# Unsqueeze
x = x.unsqueeze(dim=-1)
weight = self.weight.unsqueeze(dim=0)
bias = self.bias.unsqueeze(dim=0)
# Linear layer
out = torch.matmul(weight, x).squeeze(dim=-1)
out = out + bias
return out
def extra_repr(self):
# For printing
return 'input_dims={}, output_dims={}, extra_dims={}'.format(
self.input_dims, self.output_dims, str(self.extra_dims)
)
class InputMask(nn.Module):
def __init__(self, input_mask):
"""
Module for handling to mask the input. Needed to simulate different parent sets.
Parameters
----------
input_mask : torch.Tensor/None
If a tensor, it is assumed to be a fixed mask for all forward passes.
If None, it is required to pass the mask during every forward pass.
"""
super().__init__()
if isinstance(input_mask, torch.Tensor):
self.register_buffer('input_mask', input_mask.float(), persistent=False)
else:
self.input_mask = input_mask
def forward(self, x, mask=None, mask_val=0):
"""
Forward pass.
Parameters
----------
x : torch.Tensor
Input that should be masked.
mask : torch.FloatTensor/None
If self.input_mask is None, this tensor must be not none. Will be used
to mask the input. A value of 1.0 means that an element is not masked,
and 0.0 that it will be masked. Is broadcasted over dimensions with x.
mask_val : float
Value to set for masked elements.
"""
# Check if mask is passed or should be taken constant
if mask is None:
assert self.input_mask is not None, "No mask was given in InputMask module."
mask = self.input_mask
if len(mask.shape) > len(x.shape):
x = x.reshape(x.shape[:1] + (1,)*(len(mask.shape)-len(x.shape)) + x.shape[1:])
if len(x.shape) > len(mask.shape):
mask = mask.reshape((1,)*(len(x.shape)-len(mask.shape)) + mask.shape)
mask = mask.to(x.dtype)
if mask_val != 0.0:
x = x * mask + (1 - mask) * mask_val
else:
x = x * mask
return x
class EmbedLayer(nn.Module):
def __init__(self, num_vars, num_categs, hidden_dim, input_mask, sparse_embeds=False):
"""
Embedding layer to represent categorical inputs in continuous space. For efficiency, the embeddings
of different inputs are summed in this layer instead of stacked. This is equivalent to stacking the
embeddings and applying a linear layer, but is more efficient with slightly more parameter cost.
Masked inputs are represented by a zero embedding tensor.
Parameters
----------
num_vars : int
Number of variables that are input to each neural network.
num_categs : int
Max. number of categories that each variable can take.
hidden_dim : int
Output dimensionality of the embedding layer.
input_mask : InputMask
Input mask module to use for masking possible inputs.
sparse_embeds : bool
If True, we sparsify the embedding tensors before summing them together in the
forward pass. This is more memory efficient and can give a considerable speedup
for networks with many variables, but can be slightly slower for small networks.
It is recommended to set it to True for graphs with more than 50 variables.
"""
super().__init__()
self.num_vars = num_vars
self.hidden_dim = hidden_dim
self.input_mask = input_mask
self.sparse_embeds = sparse_embeds
self.num_categs = num_categs
# For each of the N networks, we have num_vars*num_categs possible embeddings to model.
# Sharing embeddings across all N networks can limit the expressiveness of the networks.
# Instead, we share them across 10-20 variables for large graphs to reduce memory.
self.num_embeds = self.num_vars*self.num_vars*self.num_categs
if self.num_embeds > 1e7:
self.num_embeds = int(math.ceil(self.num_embeds / 20.0))
self.shortend = True
elif self.num_embeds > 1e6:
for s in range(11, -1, -1):
if self.num_vars % s == 0:
self.num_embeds = self.num_embeds // s
break
self.shortend = True
else:
self.shortend = False
self.embedding = nn.Embedding(num_embeddings=self.num_embeds,
embedding_dim=hidden_dim)
self.embedding.weight.data.mul_(2./math.sqrt(self.num_vars))
self.bias = nn.Parameter(torch.zeros(num_vars, self.hidden_dim))
# Tensor for mapping each input to its corresponding embedding range in self.embedding
pos_trans = torch.arange(self.num_vars**2, dtype=torch.long) * self.num_categs
self.register_buffer("pos_trans", pos_trans, persistent=False)
def forward(self, x, mask):
# For very large x tensors during graph fitting, it is more efficient to split it
# into multiple sub-tensors before running the forward pass.
num_chunks = int(math.ceil(np.prod(mask.shape) / 256e5))
if self.training or num_chunks == 1:
return self.embed_tensor(x, mask)
else:
x = x.chunk(num_chunks, dim=0)
mask = mask.chunk(num_chunks, dim=0)
x_out = []
for x_l, mask_l in zip(x, mask):
out_l = self.embed_tensor(x_l, mask_l)
x_out.append(out_l)
x_out = torch.cat(x_out, dim=0)
return x_out
def embed_tensor(self, x, mask):
assert x.shape[-1] == self.num_vars
if len(x.shape) == 2: # Add variable dimension
x = x.unsqueeze(dim=1).expand(-1, self.num_vars, -1)
else:
assert x.shape[-2] == self.num_vars
# Number of variables
pos_trans = self.pos_trans.view((1,)*(len(x.shape)-2) + (self.num_vars, self.num_vars))
x = x + pos_trans
if self.sparse_embeds:
# Selects the non-zero embedding tensors and stores them in a separate tensor instead of masking.
# Lower memory consumption and faster for networks with many variables.
flattened_mask = mask.flatten(0, 1).long()
num_neighbours = flattened_mask.sum(dim=-1)
max_neighbours = num_neighbours.max()
x_sparse = torch.masked_select(x, mask == 1.0)
if self.shortend:
x_sparse = x_sparse % self.num_embeds
x_sparse = self.embedding(x_sparse)
x_sparse = torch.cat([x_sparse.new_zeros(x_sparse.shape[:-2]+(1,)+x_sparse.shape[-1:]), x_sparse], dim=-2)
idxs = flattened_mask.cumsum(dim=-1)
idxs[1:] += num_neighbours[:-1].cumsum(dim=-1)[..., None]
idxs = (idxs * flattened_mask).sort(dim=-1, descending=True)[0]
# Determine how many embeddings to sum per variable. Needed to construct the sparse tensor.
sort_neighbours, sort_indices = num_neighbours.sort(dim=0)
_, resort_indices = sort_indices.sort(dim=0)
pos = 1+torch.arange(num_neighbours.shape[0], device=num_neighbours.device, dtype=torch.long)
comp_cost = sort_neighbours * pos + max_neighbours * (num_neighbours.shape[0] - pos)
min_cost, argmin_cost = comp_cost.min(dim=0)
mid_neighbours = sort_neighbours[argmin_cost]
# More efficient: split tensor into two, one half with the variables with the least and the other
# with the most embeddings to sum. This prevents large computational costs if we have a few outliers.
idxs = idxs[sort_indices]
idxs = idxs[:, :max_neighbours]
if mid_neighbours > 0:
x_new_1 = x_sparse.index_select(index=idxs[:argmin_cost+1, :mid_neighbours].reshape(-1), dim=0)
x_1 = x_new_1.reshape(-1, mid_neighbours, x_sparse.shape[-1]).sum(dim=-2)
else:
x_1 = x_sparse.new_zeros(argmin_cost+1, x_sparse.shape[-1])
x_new_2 = x_sparse.index_select(index=idxs[argmin_cost+1:, :max_neighbours].reshape(-1), dim=0)
x_2 = x_new_2.reshape(-1, max_neighbours, x_sparse.shape[-1]).sum(dim=-2)
# Bring tensors back in order
x = torch.cat([x_1, x_2], dim=0)[resort_indices]
x = x.reshape(mask.shape[0], mask.shape[1], x.shape[-1])
else:
if self.shortend:
x = x % self.num_embeds
x = self.embedding(x)
x = self.input_mask(x, mask=mask[..., None], mask_val=0.0)
if len(x.shape) > 3:
x = x.sum(dim=-2)
bias = self.bias.view((1,)*(len(x.shape)-2) + self.bias.shape)
x = x + bias
return x
def get_activation_function(actfn):
"""
Returns an activation function based on a string description.
"""
if actfn is None or actfn == 'leakyrelu':
def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)
elif actfn == 'gelu':
def create_actfn(): return nn.GELU()
elif actfn == 'relu':
def create_actfn(): return nn.ReLU()
elif actfn == 'swish' or actfn == 'silu':
def create_actfn(): return nn.SiLU()
else:
raise Exception('Unknown activation function ' + str(actfn))
return create_actfn
def create_model(num_vars, num_categs, hidden_dims, actfn=None):
"""
Method for creating a full multivariable MLP as used in ENCO.
"""
num_outputs = max(1, num_categs)
num_inputs = num_vars
actfn = get_activation_function(actfn)
mask = InputMask(None)
if num_categs > 0:
pre_layers = EmbedLayer(num_vars=num_vars,
num_categs=num_categs,
hidden_dim=hidden_dims[0],
input_mask=mask,
sparse_embeds=(num_vars >= 50))
num_inputs = pre_layers.hidden_dim
pre_layers = [pre_layers, actfn()]
else:
pre_layers = mask
mlps = MultivarMLP(input_dims=num_inputs,
hidden_dims=hidden_dims,
output_dims=num_outputs,
extra_dims=[num_vars],
actfn=actfn,
pre_layers=pre_layers)
return mlps
| import torch
import torch.nn as nn
import math
import numpy as np
class MultivarMLP(nn.Module):
def __init__(self, input_dims, hidden_dims, output_dims, extra_dims, actfn, pre_layers=None):
"""
Module for stacking N neural networks in parallel for more efficient evaluation. In the context
of ENCO, we stack the neural networks of the conditional distributions for all N variables on top
of each other to parallelize it on a GPU.
Parameters
----------
input_dims : int
Input dimensionality for all networks (in ENCO, size of embedding)
hidden_dims : list[int]
Hidden dimensionalities to use in the hidden layer. Length of list determines
the number of hidden layers to use.
output_dims : int
Output dimensionality of all networks (in ENCO, max. number of categories)
extra_dims : list[int]
Number of neural networks to have in parallel (in ENCO, number of variables).
Can have multiple dimensions if needed.
actfn : function -> nn.Module
Activation function to use in between hidden layers
pre_layers : list[nn.Module] / nn.Module
Any modules that should be applied before the actual MLP. This can include
an embedding layer and/or masking operation.
"""
super().__init__()
self.extra_dims = extra_dims
layers = []
if pre_layers is not None:
if not isinstance(pre_layers, list):
layers += [pre_layers]
else:
layers += pre_layers
hidden_dims = [input_dims] + hidden_dims
for i in range(len(hidden_dims)-1):
if not isinstance(layers[-1], EmbedLayer): # After an embedding layer, we directly apply a non-linearity
layers += [MultivarLinear(input_dims=hidden_dims[i],
output_dims=hidden_dims[i+1],
extra_dims=extra_dims)]
layers += [actfn()]
layers += [MultivarLinear(input_dims=hidden_dims[-1],
output_dims=output_dims,
extra_dims=extra_dims)]
self.layers = nn.ModuleList(layers)
def forward(self, x, mask=None):
for l in self.layers:
if isinstance(l, (EmbedLayer, InputMask)):
x = l(x, mask=mask)
else:
x = l(x)
return x
@property
def device(self):
return next(iter(self.parameters())).device
class MultivarLinear(nn.Module):
def __init__(self, input_dims, output_dims, extra_dims):
"""
Linear layer with the same properties as MultivarMLP. It effectively applies N independent
linear layers in parallel.
Parameters
----------
input_dims : int
Number of input dimensions per network.
output_dims : int
Number of output dimensions per network.
extra_dims : list[int]
Number of networks to apply in parallel. Can have multiple dimensions if needed.
"""
super().__init__()
self.input_dims = input_dims
self.output_dims = output_dims
self.extra_dims = extra_dims
self.weight = nn.Parameter(torch.zeros(*extra_dims, output_dims, input_dims))
self.bias = nn.Parameter(torch.zeros(*extra_dims, output_dims))
nn.init.kaiming_uniform_(self.weight, nonlinearity='relu')
def forward(self, x):
# Shape preparation
x_extra_dims = x.shape[1:-1]
if len(x_extra_dims) > 0:
for i in range(len(x_extra_dims)):
assert x_extra_dims[-(i+1)] == self.extra_dims[-(i+1)], \
"Shape mismatch: X=%s, Layer=%s" % (str(x.shape), str(self.extra_dims))
for _ in range(len(self.extra_dims)-len(x_extra_dims)):
x = x.unsqueeze(dim=1)
# Unsqueeze
x = x.unsqueeze(dim=-1)
weight = self.weight.unsqueeze(dim=0)
bias = self.bias.unsqueeze(dim=0)
# Linear layer
out = torch.matmul(weight, x).squeeze(dim=-1)
out = out + bias
return out
def extra_repr(self):
# For printing
return 'input_dims={}, output_dims={}, extra_dims={}'.format(
self.input_dims, self.output_dims, str(self.extra_dims)
)
class InputMask(nn.Module):
def __init__(self, input_mask):
"""
Module for handling to mask the input. Needed to simulate different parent sets.
Parameters
----------
input_mask : torch.Tensor/None
If a tensor, it is assumed to be a fixed mask for all forward passes.
If None, it is required to pass the mask during every forward pass.
"""
super().__init__()
if isinstance(input_mask, torch.Tensor):
self.register_buffer('input_mask', input_mask.float(), persistent=False)
else:
self.input_mask = input_mask
def forward(self, x, mask=None, mask_val=0):
"""
Forward pass.
Parameters
----------
x : torch.Tensor
Input that should be masked.
mask : torch.FloatTensor/None
If self.input_mask is None, this tensor must be not none. Will be used
to mask the input. A value of 1.0 means that an element is not masked,
and 0.0 that it will be masked. Is broadcasted over dimensions with x.
mask_val : float
Value to set for masked elements.
"""
# Check if mask is passed or should be taken constant
if mask is None:
assert self.input_mask is not None, "No mask was given in InputMask module."
mask = self.input_mask
if len(mask.shape) > len(x.shape):
x = x.reshape(x.shape[:1] + (1,)*(len(mask.shape)-len(x.shape)) + x.shape[1:])
if len(x.shape) > len(mask.shape):
mask = mask.reshape((1,)*(len(x.shape)-len(mask.shape)) + mask.shape)
mask = mask.to(x.dtype)
if mask_val != 0.0:
x = x * mask + (1 - mask) * mask_val
else:
x = x * mask
return x
class EmbedLayer(nn.Module):
def __init__(self, num_vars, num_categs, hidden_dim, input_mask, sparse_embeds=False):
"""
Embedding layer to represent categorical inputs in continuous space. For efficiency, the embeddings
of different inputs are summed in this layer instead of stacked. This is equivalent to stacking the
embeddings and applying a linear layer, but is more efficient with slightly more parameter cost.
Masked inputs are represented by a zero embedding tensor.
Parameters
----------
num_vars : int
Number of variables that are input to each neural network.
num_categs : int
Max. number of categories that each variable can take.
hidden_dim : int
Output dimensionality of the embedding layer.
input_mask : InputMask
Input mask module to use for masking possible inputs.
sparse_embeds : bool
If True, we sparsify the embedding tensors before summing them together in the
forward pass. This is more memory efficient and can give a considerable speedup
for networks with many variables, but can be slightly slower for small networks.
It is recommended to set it to True for graphs with more than 50 variables.
"""
super().__init__()
self.num_vars = num_vars
self.hidden_dim = hidden_dim
self.input_mask = input_mask
self.sparse_embeds = sparse_embeds
self.num_categs = num_categs
# For each of the N networks, we have num_vars*num_categs possible embeddings to model.
# Sharing embeddings across all N networks can limit the expressiveness of the networks.
# Instead, we share them across 10-20 variables for large graphs to reduce memory.
self.num_embeds = self.num_vars*self.num_vars*self.num_categs
if self.num_embeds > 1e7:
self.num_embeds = int(math.ceil(self.num_embeds / 20.0))
self.shortend = True
elif self.num_embeds > 1e6:
for s in range(11, -1, -1):
if self.num_vars % s == 0:
self.num_embeds = self.num_embeds // s
break
self.shortend = True
else:
self.shortend = False
self.embedding = nn.Embedding(num_embeddings=self.num_embeds,
embedding_dim=hidden_dim)
self.embedding.weight.data.mul_(2./math.sqrt(self.num_vars))
self.bias = nn.Parameter(torch.zeros(num_vars, self.hidden_dim))
# Tensor for mapping each input to its corresponding embedding range in self.embedding
pos_trans = torch.arange(self.num_vars**2, dtype=torch.long) * self.num_categs
self.register_buffer("pos_trans", pos_trans, persistent=False)
def forward(self, x, mask):
# For very large x tensors during graph fitting, it is more efficient to split it
# into multiple sub-tensors before running the forward pass.
num_chunks = int(math.ceil(np.prod(mask.shape) / 256e5))
if self.training or num_chunks == 1:
return self.embed_tensor(x, mask)
else:
x = x.chunk(num_chunks, dim=0)
mask = mask.chunk(num_chunks, dim=0)
x_out = []
for x_l, mask_l in zip(x, mask):
out_l = self.embed_tensor(x_l, mask_l)
x_out.append(out_l)
x_out = torch.cat(x_out, dim=0)
return x_out
def embed_tensor(self, x, mask):
assert x.shape[-1] == self.num_vars
if len(x.shape) == 2: # Add variable dimension
x = x.unsqueeze(dim=1).expand(-1, self.num_vars, -1)
else:
assert x.shape[-2] == self.num_vars
# Number of variables
pos_trans = self.pos_trans.view((1,)*(len(x.shape)-2) + (self.num_vars, self.num_vars))
x = x + pos_trans
if self.sparse_embeds:
# Selects the non-zero embedding tensors and stores them in a separate tensor instead of masking.
# Lower memory consumption and faster for networks with many variables.
flattened_mask = mask.flatten(0, 1).long()
num_neighbours = flattened_mask.sum(dim=-1)
max_neighbours = num_neighbours.max()
x_sparse = torch.masked_select(x, mask == 1.0)
if self.shortend:
x_sparse = x_sparse % self.num_embeds
x_sparse = self.embedding(x_sparse)
x_sparse = torch.cat([x_sparse.new_zeros(x_sparse.shape[:-2]+(1,)+x_sparse.shape[-1:]), x_sparse], dim=-2)
idxs = flattened_mask.cumsum(dim=-1)
idxs[1:] += num_neighbours[:-1].cumsum(dim=-1)[..., None]
idxs = (idxs * flattened_mask).sort(dim=-1, descending=True)[0]
# Determine how many embeddings to sum per variable. Needed to construct the sparse tensor.
sort_neighbours, sort_indices = num_neighbours.sort(dim=0)
_, resort_indices = sort_indices.sort(dim=0)
pos = 1+torch.arange(num_neighbours.shape[0], device=num_neighbours.device, dtype=torch.long)
comp_cost = sort_neighbours * pos + max_neighbours * (num_neighbours.shape[0] - pos)
min_cost, argmin_cost = comp_cost.min(dim=0)
mid_neighbours = sort_neighbours[argmin_cost]
# More efficient: split tensor into two, one half with the variables with the least and the other
# with the most embeddings to sum. This prevents large computational costs if we have a few outliers.
idxs = idxs[sort_indices]
idxs = idxs[:, :max_neighbours]
if mid_neighbours > 0:
x_new_1 = x_sparse.index_select(index=idxs[:argmin_cost+1, :mid_neighbours].reshape(-1), dim=0)
x_1 = x_new_1.reshape(-1, mid_neighbours, x_sparse.shape[-1]).sum(dim=-2)
else:
x_1 = x_sparse.new_zeros(argmin_cost+1, x_sparse.shape[-1])
x_new_2 = x_sparse.index_select(index=idxs[argmin_cost+1:, :max_neighbours].reshape(-1), dim=0)
x_2 = x_new_2.reshape(-1, max_neighbours, x_sparse.shape[-1]).sum(dim=-2)
# Bring tensors back in order
x = torch.cat([x_1, x_2], dim=0)[resort_indices]
x = x.reshape(mask.shape[0], mask.shape[1], x.shape[-1])
else:
if self.shortend:
x = x % self.num_embeds
x = self.embedding(x)
x = self.input_mask(x, mask=mask[..., None], mask_val=0.0)
if len(x.shape) > 3:
x = x.sum(dim=-2)
bias = self.bias.view((1,)*(len(x.shape)-2) + self.bias.shape)
x = x + bias
return x
def get_activation_function(actfn):
"""
Returns an activation function based on a string description.
"""
if actfn is None or actfn == 'leakyrelu':
def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)
elif actfn == 'gelu':
def create_actfn(): return nn.GELU()
elif actfn == 'relu':
def create_actfn(): return nn.ReLU()
elif actfn == 'swish' or actfn == 'silu':
def create_actfn(): return nn.SiLU()
else:
raise Exception('Unknown activation function ' + str(actfn))
return create_actfn
def create_model(num_vars, num_categs, hidden_dims, actfn=None):
"""
Method for creating a full multivariable MLP as used in ENCO.
"""
num_outputs = max(1, num_categs)
num_inputs = num_vars
actfn = get_activation_function(actfn)
mask = InputMask(None)
if num_categs > 0:
pre_layers = EmbedLayer(num_vars=num_vars,
num_categs=num_categs,
hidden_dim=hidden_dims[0],
input_mask=mask,
sparse_embeds=(num_vars >= 50))
num_inputs = pre_layers.hidden_dim
pre_layers = [pre_layers, actfn()]
else:
pre_layers = mask
mlps = MultivarMLP(input_dims=num_inputs,
hidden_dims=hidden_dims,
output_dims=num_outputs,
extra_dims=[num_vars],
actfn=actfn,
pre_layers=pre_layers)
return mlps
| en | 0.801981 | Module for stacking N neural networks in parallel for more efficient evaluation. In the context of ENCO, we stack the neural networks of the conditional distributions for all N variables on top of each other to parallelize it on a GPU. Parameters ---------- input_dims : int Input dimensionality for all networks (in ENCO, size of embedding) hidden_dims : list[int] Hidden dimensionalities to use in the hidden layer. Length of list determines the number of hidden layers to use. output_dims : int Output dimensionality of all networks (in ENCO, max. number of categories) extra_dims : list[int] Number of neural networks to have in parallel (in ENCO, number of variables). Can have multiple dimensions if needed. actfn : function -> nn.Module Activation function to use in between hidden layers pre_layers : list[nn.Module] / nn.Module Any modules that should be applied before the actual MLP. This can include an embedding layer and/or masking operation. # After an embedding layer, we directly apply a non-linearity Linear layer with the same properties as MultivarMLP. It effectively applies N independent linear layers in parallel. Parameters ---------- input_dims : int Number of input dimensions per network. output_dims : int Number of output dimensions per network. extra_dims : list[int] Number of networks to apply in parallel. Can have multiple dimensions if needed. # Shape preparation # Unsqueeze # Linear layer # For printing Module for handling to mask the input. Needed to simulate different parent sets. Parameters ---------- input_mask : torch.Tensor/None If a tensor, it is assumed to be a fixed mask for all forward passes. If None, it is required to pass the mask during every forward pass. Forward pass. Parameters ---------- x : torch.Tensor Input that should be masked. mask : torch.FloatTensor/None If self.input_mask is None, this tensor must be not none. Will be used to mask the input. A value of 1.0 means that an element is not masked, and 0.0 that it will be masked. Is broadcasted over dimensions with x. mask_val : float Value to set for masked elements. # Check if mask is passed or should be taken constant Embedding layer to represent categorical inputs in continuous space. For efficiency, the embeddings of different inputs are summed in this layer instead of stacked. This is equivalent to stacking the embeddings and applying a linear layer, but is more efficient with slightly more parameter cost. Masked inputs are represented by a zero embedding tensor. Parameters ---------- num_vars : int Number of variables that are input to each neural network. num_categs : int Max. number of categories that each variable can take. hidden_dim : int Output dimensionality of the embedding layer. input_mask : InputMask Input mask module to use for masking possible inputs. sparse_embeds : bool If True, we sparsify the embedding tensors before summing them together in the forward pass. This is more memory efficient and can give a considerable speedup for networks with many variables, but can be slightly slower for small networks. It is recommended to set it to True for graphs with more than 50 variables. # For each of the N networks, we have num_vars*num_categs possible embeddings to model. # Sharing embeddings across all N networks can limit the expressiveness of the networks. # Instead, we share them across 10-20 variables for large graphs to reduce memory. # Tensor for mapping each input to its corresponding embedding range in self.embedding # For very large x tensors during graph fitting, it is more efficient to split it # into multiple sub-tensors before running the forward pass. # Add variable dimension # Number of variables # Selects the non-zero embedding tensors and stores them in a separate tensor instead of masking. # Lower memory consumption and faster for networks with many variables. # Determine how many embeddings to sum per variable. Needed to construct the sparse tensor. # More efficient: split tensor into two, one half with the variables with the least and the other # with the most embeddings to sum. This prevents large computational costs if we have a few outliers. # Bring tensors back in order Returns an activation function based on a string description. Method for creating a full multivariable MLP as used in ENCO. | 3.229416 | 3 |
pangram.py | sara-02/dsa_sg | 0 | 6630170 | # Pangram example
# The quick brown fox jumps over the lazy dog
def check_pangram(string):
NUM_APLHA = 26
if len(string) < NUM_APLHA:
return "String is Not a pangram."
bucket_count = [0] * NUM_APLHA
st = string.lower()
for s in st:
if s.isalpha():
bucket_count[ord(s) - 97] += 1
if all([b >= 1 for b in bucket_count]):
return "Strings is a pangram."
return "Strings is Not a pangrams."
def main():
string = raw_input("Enter the string:: ")
print(check_pangram(string))
if __name__ == '__main__':
main()
| # Pangram example
# The quick brown fox jumps over the lazy dog
def check_pangram(string):
NUM_APLHA = 26
if len(string) < NUM_APLHA:
return "String is Not a pangram."
bucket_count = [0] * NUM_APLHA
st = string.lower()
for s in st:
if s.isalpha():
bucket_count[ord(s) - 97] += 1
if all([b >= 1 for b in bucket_count]):
return "Strings is a pangram."
return "Strings is Not a pangrams."
def main():
string = raw_input("Enter the string:: ")
print(check_pangram(string))
if __name__ == '__main__':
main()
| en | 0.641493 | # Pangram example # The quick brown fox jumps over the lazy dog | 4.029597 | 4 |
src/datasets/SingleFolderDataset.py | smmmmi/E2SRI | 35 | 6630171 | import os
from .BaseDataset import BaseDataset
from PIL import Image
class SingleFolderDataset(BaseDataset):
def __init__(self, root, cfg, is_train=True):
super(SingleFolderDataset, self).__init__(cfg, is_train)
root = os.path.abspath(root)
self.file_list = sorted(os.listdir(root))
self.file_list = [os.path.join(root, file_name) for file_name in self.file_list]
self.interval = self.sequence_size // 2
self.file_list = [self.file_list[idx:idx + self.sequence_size] for idx in
range(len(self.file_list) - self.sequence_size + 1)]
self.video_names = [file_path_list[self.interval].split('/')[-2] for file_path_list in self.file_list]
self.image_names = [file_path_list[self.interval].split('/')[-1].split('.')[0] for file_path_list in self.file_list]
self.gt_name = None
def load_image(self, data_list):
central_stack = Image.open(data_list[self.interval]).convert('RGB')
gt = None
side_stack = [Image.open(data_list[idx]).convert('RGB') for idx in
range(len(data_list)) if idx != self.interval]
return central_stack, gt, side_stack
| import os
from .BaseDataset import BaseDataset
from PIL import Image
class SingleFolderDataset(BaseDataset):
def __init__(self, root, cfg, is_train=True):
super(SingleFolderDataset, self).__init__(cfg, is_train)
root = os.path.abspath(root)
self.file_list = sorted(os.listdir(root))
self.file_list = [os.path.join(root, file_name) for file_name in self.file_list]
self.interval = self.sequence_size // 2
self.file_list = [self.file_list[idx:idx + self.sequence_size] for idx in
range(len(self.file_list) - self.sequence_size + 1)]
self.video_names = [file_path_list[self.interval].split('/')[-2] for file_path_list in self.file_list]
self.image_names = [file_path_list[self.interval].split('/')[-1].split('.')[0] for file_path_list in self.file_list]
self.gt_name = None
def load_image(self, data_list):
central_stack = Image.open(data_list[self.interval]).convert('RGB')
gt = None
side_stack = [Image.open(data_list[idx]).convert('RGB') for idx in
range(len(data_list)) if idx != self.interval]
return central_stack, gt, side_stack
| none | 1 | 2.60012 | 3 |
|
main.py | iamabhishek229313/capstone_project_backend | 0 | 6630172 | <reponame>iamabhishek229313/capstone_project_backend<filename>main.py
from typing import Optional
from fastapi import FastAPI
from enum import Enum
app = FastAPI()
@app.get("/")
async def read_root():
return {"Hello": "world"} # we can return a dict, list, singular values as str, int, etc.
@app.get("/items/{item_id}")
async def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
@app.get("/user/me")
async def read_user_me():
return {"current_user": "This user is me!"}
@app.get("/user/{user_id}")
async def read_user(user_id: int):
return {"current_user": user_id}
class ModelName(str, Enum):
alexnet = "alexnet"
resnet = "resnet"
lenet = "lenet"
@app.get("/model/{model_name}")
async def read_model_name(model_name: ModelName):
if model_name == ModelName.alexnet:
return {"Model name is:": model_name, "message": "Deep Learning FTW!"}
elif model_name == ModelName.lenet:
return {"Model name is:": model_name, "message": "LeCNN all the images"}
else:
return {"Model name is:": model_name, "message": "Have some residuals"}
| from typing import Optional
from fastapi import FastAPI
from enum import Enum
app = FastAPI()
@app.get("/")
async def read_root():
return {"Hello": "world"} # we can return a dict, list, singular values as str, int, etc.
@app.get("/items/{item_id}")
async def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
@app.get("/user/me")
async def read_user_me():
return {"current_user": "This user is me!"}
@app.get("/user/{user_id}")
async def read_user(user_id: int):
return {"current_user": user_id}
class ModelName(str, Enum):
alexnet = "alexnet"
resnet = "resnet"
lenet = "lenet"
@app.get("/model/{model_name}")
async def read_model_name(model_name: ModelName):
if model_name == ModelName.alexnet:
return {"Model name is:": model_name, "message": "Deep Learning FTW!"}
elif model_name == ModelName.lenet:
return {"Model name is:": model_name, "message": "LeCNN all the images"}
else:
return {"Model name is:": model_name, "message": "Have some residuals"} | en | 0.828529 | # we can return a dict, list, singular values as str, int, etc. | 2.786028 | 3 |
endrpi/routes/pin.py | persanix-llc/endrpi-server | 2 | 6630173 | <gh_stars>1-10
# Copyright (c) 2020 - 2021 Persanix LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
from fastapi import APIRouter, status
from endrpi.actions.pin import read_pin_configurations, read_pin_configuration, update_pin_configuration
from endrpi.model.action_result import ActionResult, error_action_result
from endrpi.model.message import MessageData, PinMessage
from endrpi.model.pin import PinConfiguration, RaspberryPiPinIds, PinIo
from endrpi.utils.api import http_response
# Router that is exported to the server
router = APIRouter()
@router.get(
'/pins',
name='All pin configurations.',
description='Gets Pin configurations for each pin on the board.',
responses={
status.HTTP_200_OK: {
'model': Union[ActionResult[Dict[str, PinConfiguration]], ActionResult[PinConfiguration]]
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {
'model': MessageData,
'description': 'An error occurred',
}
}
)
async def get_pin_configurations_route():
pin_ids = list(RaspberryPiPinIds)
pin_states_action_result = read_pin_configurations(pin_ids)
return http_response(pin_states_action_result)
@router.get(
'/pins/{bcm_id}',
name='Pin configuration.',
description='Gets pin configuration for a specific pin using its BCM number (i.e. \'GPIO17\').',
responses={
status.HTTP_200_OK: {
'model': PinConfiguration
},
status.HTTP_404_NOT_FOUND: {
'model': MessageData,
'description': PinMessage.ERROR_NOT_FOUND__PIN_ID__,
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {
'model': MessageData,
'description': 'An error occurred',
}
}
)
async def get_pin_configuration_route(bcm_id: str):
valid_pin_id = RaspberryPiPinIds.from_bcm_id(bcm_id)
if valid_pin_id:
pin_action_result = read_pin_configuration(valid_pin_id)
return http_response(pin_action_result)
else:
action_result = error_action_result(PinMessage.ERROR_NOT_FOUND__PIN_ID__.format(pin_id=bcm_id))
return http_response(action_result, status.HTTP_404_NOT_FOUND)
@router.put(
'/pins/{bcm_id}',
description='Updates pin configuration for a specific pin using its BCM number (i.e. \'GPIO17\').',
responses={
status.HTTP_200_OK: {
'model': PinConfiguration
},
status.HTTP_404_NOT_FOUND: {
'model': MessageData,
'description': PinMessage.ERROR_NOT_FOUND__PIN_ID__,
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {
'model': MessageData,
'description': 'An error occurred',
}
}
)
async def put_pin_state_param_route(bcm_id: str, pin_configuration: PinConfiguration):
valid_pin_id = RaspberryPiPinIds.from_bcm_id(bcm_id)
if valid_pin_id:
# Input configurations must specify a pin pull, output configurations must specify a state
if pin_configuration.io is PinIo.INPUT and not pin_configuration.pull:
action_result = error_action_result(PinMessage.ERROR_NO_INPUT_PULL)
return http_response(action_result, status.HTTP_400_BAD_REQUEST)
elif pin_configuration.io is PinIo.OUTPUT and pin_configuration.state is None:
action_result = error_action_result(PinMessage.ERROR_NO_OUTPUT_STATE)
return http_response(action_result, status.HTTP_400_BAD_REQUEST)
action_result = update_pin_configuration(valid_pin_id, pin_configuration)
return http_response(action_result)
else:
action_result = error_action_result(PinMessage.ERROR_NOT_FOUND__PIN_ID__.format(pin_id=bcm_id))
return http_response(action_result, status.HTTP_404_NOT_FOUND)
| # Copyright (c) 2020 - 2021 Persanix LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
from fastapi import APIRouter, status
from endrpi.actions.pin import read_pin_configurations, read_pin_configuration, update_pin_configuration
from endrpi.model.action_result import ActionResult, error_action_result
from endrpi.model.message import MessageData, PinMessage
from endrpi.model.pin import PinConfiguration, RaspberryPiPinIds, PinIo
from endrpi.utils.api import http_response
# Router that is exported to the server
router = APIRouter()
@router.get(
'/pins',
name='All pin configurations.',
description='Gets Pin configurations for each pin on the board.',
responses={
status.HTTP_200_OK: {
'model': Union[ActionResult[Dict[str, PinConfiguration]], ActionResult[PinConfiguration]]
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {
'model': MessageData,
'description': 'An error occurred',
}
}
)
async def get_pin_configurations_route():
pin_ids = list(RaspberryPiPinIds)
pin_states_action_result = read_pin_configurations(pin_ids)
return http_response(pin_states_action_result)
@router.get(
'/pins/{bcm_id}',
name='Pin configuration.',
description='Gets pin configuration for a specific pin using its BCM number (i.e. \'GPIO17\').',
responses={
status.HTTP_200_OK: {
'model': PinConfiguration
},
status.HTTP_404_NOT_FOUND: {
'model': MessageData,
'description': PinMessage.ERROR_NOT_FOUND__PIN_ID__,
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {
'model': MessageData,
'description': 'An error occurred',
}
}
)
async def get_pin_configuration_route(bcm_id: str):
valid_pin_id = RaspberryPiPinIds.from_bcm_id(bcm_id)
if valid_pin_id:
pin_action_result = read_pin_configuration(valid_pin_id)
return http_response(pin_action_result)
else:
action_result = error_action_result(PinMessage.ERROR_NOT_FOUND__PIN_ID__.format(pin_id=bcm_id))
return http_response(action_result, status.HTTP_404_NOT_FOUND)
@router.put(
'/pins/{bcm_id}',
description='Updates pin configuration for a specific pin using its BCM number (i.e. \'GPIO17\').',
responses={
status.HTTP_200_OK: {
'model': PinConfiguration
},
status.HTTP_404_NOT_FOUND: {
'model': MessageData,
'description': PinMessage.ERROR_NOT_FOUND__PIN_ID__,
},
status.HTTP_500_INTERNAL_SERVER_ERROR: {
'model': MessageData,
'description': 'An error occurred',
}
}
)
async def put_pin_state_param_route(bcm_id: str, pin_configuration: PinConfiguration):
valid_pin_id = RaspberryPiPinIds.from_bcm_id(bcm_id)
if valid_pin_id:
# Input configurations must specify a pin pull, output configurations must specify a state
if pin_configuration.io is PinIo.INPUT and not pin_configuration.pull:
action_result = error_action_result(PinMessage.ERROR_NO_INPUT_PULL)
return http_response(action_result, status.HTTP_400_BAD_REQUEST)
elif pin_configuration.io is PinIo.OUTPUT and pin_configuration.state is None:
action_result = error_action_result(PinMessage.ERROR_NO_OUTPUT_STATE)
return http_response(action_result, status.HTTP_400_BAD_REQUEST)
action_result = update_pin_configuration(valid_pin_id, pin_configuration)
return http_response(action_result)
else:
action_result = error_action_result(PinMessage.ERROR_NOT_FOUND__PIN_ID__.format(pin_id=bcm_id))
return http_response(action_result, status.HTTP_404_NOT_FOUND) | en | 0.815656 | # Copyright (c) 2020 - 2021 Persanix LLC. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Router that is exported to the server # Input configurations must specify a pin pull, output configurations must specify a state | 1.906995 | 2 |
src/metadata.py | aditya-shri/libback | 0 | 6630174 | import datetime
import json
import os
import re
import time
import requests
import src.tree
import src.walk
def parseMovie(name):
reg_1 = r'^[\(\[\{](?P<year>\d{4})[\)\]\}]\s(?P<title>[^.]+).*(?P<extention>\..*)?$' # (2008) Iron Man.mkv
reg_2 = r'^(?P<title>.*)\s[\(\[\{](?P<year>\d{4})[\)\]\}].*(?P<extention>\..*)?$' # Iron Man (2008).mkv
reg_3 = r'^(?P<title>(?:(?!\.\d{4}).)*)\.(?P<year>\d{4}).*(?P<extention>\..*)?$' # Iron.Man.2008.1080p.WEBRip.DDP5.1.Atmos.x264.mkv
reg_4 = r'^(?P<year>)(?P<title>.*).*(?P<extention>\..*?$)' # Iron Man.mkv
if re.match(reg_1, name):
match = re.search(reg_1, name)
elif re.match(reg_2, name):
match = re.search(reg_2, name)
elif re.match(reg_3, name):
match = re.search(reg_3, name)
return match["title"].replace(".", " "), match["year"]
elif re.match(reg_4, name):
match = re.search(reg_4, name)
else:
return
return match["title"], match["year"]
def parseTV(name):
reg_1 = r'^[\(\[\{](?P<year>\d{4})[\)\]\}]\s(?P<title>[^.]+).*$' # (2019) The Mandalorian
reg_2 = r'^(?P<title>.*)\s[\(\[\{](?P<year>\d{4})[\)\]\}].*$' # The Mandalorian (2019)
reg_3 = r'^(?P<title>(?:(?!\.\d{4}).)*)\.(?P<year>\d{4}).*$' # The.Mandalorian.2019.1080p.WEBRip
reg_4 = r'^(?P<year>)(?P<title>.*)$' # The Mandalorian
if re.match(reg_1, name):
match = re.search(reg_1, name)
elif re.match(reg_2, name):
match = re.search(reg_2, name)
elif re.match(reg_3, name):
match = re.search(reg_3, name)
return match["title"].replace(".", " "), match["year"]
elif re.match(reg_4, name):
match = re.search(reg_4, name)
else:
return
return match["title"], match["year"]
def mediaIdentifier(tmdb_api_key, title, year, backdrop_base_url, poster_base_url, movie=False, tv=False):
if movie:
search_url = "https://api.themoviedb.org/3/search/movie?api_key=%s&query=%s&year=%s" % (
tmdb_api_key, title, year)
search_content = json.loads((requests.get(search_url)).content)
try:
title = search_content["results"][0]["title"]
except:
pass
try:
posterPath = poster_base_url + \
search_content["results"][0]["poster_path"]
except:
posterPath = ""
try:
backdropPath = backdrop_base_url + \
search_content["results"][0]["backdrop_path"]
except:
backdropPath = ""
try:
releaseDate = search_content["results"][0]["release_date"]
except:
releaseDate = "%s-01-01" % (year)
try:
overview = search_content["results"][0]["overview"]
except:
overview = ""
try:
popularity = search_content["results"][0]["popularity"]
except:
popularity = 0.0
return title, posterPath, backdropPath, releaseDate, overview, popularity
elif tv:
search_url = "https://api.themoviedb.org/3/search/tv?api_key=%s&query=%s&first_air_date_year=%s" % (
tmdb_api_key, title, year)
search_content = json.loads((requests.get(search_url)).content)
try:
title = search_content["results"][0]["name"]
except:
pass
try:
posterPath = poster_base_url + \
search_content["results"][0]["poster_path"]
except:
posterPath = ""
try:
backdropPath = backdrop_base_url + \
search_content["results"][0]["backdrop_path"]
except:
backdropPath = ""
try:
releaseDate = search_content["results"][0]["first_air_date"]
except:
releaseDate = "%s-01-01" % (year)
try:
overview = search_content["results"][0]["overview"]
except:
overview = ""
try:
popularity = search_content["results"][0]["popularity"]
except:
popularity = 0.0
return title, posterPath, backdropPath, releaseDate, overview, popularity
def readMetadata(category_list):
try:
os.mkdir("metadata")
except:
pass
metadata_dir = os.listdir("metadata")
if len(metadata_dir) == 0:
metadata = []
for category in category_list:
tmp = category
tmp["children"] = []
metadata.append(tmp)
elif len(metadata_dir) <= 5:
metadata_file = max(metadata_dir)
with open("metadata/%s" % (metadata_file), "r") as r:
metadata = json.load(r)
elif len(metadata_dir) > 5:
os.remove("metadata/%s" % (min(metadata_dir)))
metadata_file = max(metadata_dir)
with open("metadata/%s" % (metadata_file), "r") as r:
metadata = json.load(r)
else:
pass
return metadata
def writeMetadata(category_list, drive, tmdb_api_key):
configuration_url = "https://api.themoviedb.org/3/configuration?api_key=%s" % (
tmdb_api_key)
configuration_content = json.loads(requests.get(configuration_url).content)
backdrop_base_url = configuration_content["images"]["secure_base_url"] + \
configuration_content["images"]["backdrop_sizes"][3]
poster_base_url = configuration_content["images"]["secure_base_url"] + \
configuration_content["images"]["poster_sizes"][3]
placeholder_metadata = []
count = 0
for category in category_list:
count += 1
start_time = datetime.datetime.utcnow()
print("Building metadata for category %s/%s (%s)" % (count, len(category_list), category["name"]))
if category["type"] == "Movies":
root = drive.files().get(
fileId=category["id"], supportsAllDrives=True).execute()
tree = root
tree["type"] = "directory"
tree["children"] = []
tmp_metadata = src.walk.driveWalk(root, tree, [], drive)
tmp_metadata["children"] = [x for x in tmp_metadata["children"]
if x["mimeType"] != "application/vnd.google-apps.folder"]
tmp_metadata["categoryInfo"] = category
tmp_metadata["length"] = len(tmp_metadata["children"])
tmp_metadata["buildTime"] = str(datetime.datetime.utcnow())
for item in tmp_metadata["children"]:
if item["type"] == "file":
try:
title, year = parseMovie(item["name"])
item["title"], item["posterPath"], item["backdropPath"], item["releaseDate"], item["overview"], item["popularity"] = mediaIdentifier(
tmdb_api_key, title, year, backdrop_base_url, poster_base_url, True, False)
except:
item["title"], item["posterPath"], item["backdropPath"], item[
"releaseDate"], item["overview"] = item["name"], "", "", "1900-01-01", ""
placeholder_metadata.append(tmp_metadata)
elif category["type"] == "TV Shows":
root = drive.files().get(
fileId=category["id"], supportsAllDrives=True).execute()
tmp_metadata = src.tree.driveTree(root, drive)
tmp_metadata["categoryInfo"] = category
tmp_metadata["length"] = len(tmp_metadata["children"])
tmp_metadata["buildTime"] = str(datetime.datetime.utcnow())
for item in tmp_metadata["children"]:
if item["type"] == "directory":
try:
title, year = parseTV(item["name"])
item["title"], item["posterPath"], item["backdropPath"], item["releaseDate"], item["overview"], item["popularity"] = mediaIdentifier(
tmdb_api_key, title, year, backdrop_base_url, poster_base_url, False, True)
except:
item["title"], item["posterPath"], item["backdropPath"], item[
"releaseDate"], item["overview"] = item["name"], "", "", "1900-01-01", ""
placeholder_metadata.append(tmp_metadata)
print("Done in %s" % (str(datetime.datetime.utcnow() - start_time)))
metadata = placeholder_metadata
if os.path.exists("./metadata"):
pass
else:
os.mkdir("./metadata")
metadata_file_name = "metadata/%s.json" % (time.strftime("%Y%m%d-%H%M%S"))
with open(metadata_file_name, "w+") as w:
w.write(json.dumps(metadata))
return metadata
def jsonExtract(obj=list(), key="", getObj=True):
arr = []
arr2 = []
def extract(obj, arr, key):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
arr2.append(obj)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr, arr2
values, values2 = extract(obj, arr, key)
if getObj == True:
return values2
else:
return values
| import datetime
import json
import os
import re
import time
import requests
import src.tree
import src.walk
def parseMovie(name):
reg_1 = r'^[\(\[\{](?P<year>\d{4})[\)\]\}]\s(?P<title>[^.]+).*(?P<extention>\..*)?$' # (2008) Iron Man.mkv
reg_2 = r'^(?P<title>.*)\s[\(\[\{](?P<year>\d{4})[\)\]\}].*(?P<extention>\..*)?$' # Iron Man (2008).mkv
reg_3 = r'^(?P<title>(?:(?!\.\d{4}).)*)\.(?P<year>\d{4}).*(?P<extention>\..*)?$' # Iron.Man.2008.1080p.WEBRip.DDP5.1.Atmos.x264.mkv
reg_4 = r'^(?P<year>)(?P<title>.*).*(?P<extention>\..*?$)' # Iron Man.mkv
if re.match(reg_1, name):
match = re.search(reg_1, name)
elif re.match(reg_2, name):
match = re.search(reg_2, name)
elif re.match(reg_3, name):
match = re.search(reg_3, name)
return match["title"].replace(".", " "), match["year"]
elif re.match(reg_4, name):
match = re.search(reg_4, name)
else:
return
return match["title"], match["year"]
def parseTV(name):
reg_1 = r'^[\(\[\{](?P<year>\d{4})[\)\]\}]\s(?P<title>[^.]+).*$' # (2019) The Mandalorian
reg_2 = r'^(?P<title>.*)\s[\(\[\{](?P<year>\d{4})[\)\]\}].*$' # The Mandalorian (2019)
reg_3 = r'^(?P<title>(?:(?!\.\d{4}).)*)\.(?P<year>\d{4}).*$' # The.Mandalorian.2019.1080p.WEBRip
reg_4 = r'^(?P<year>)(?P<title>.*)$' # The Mandalorian
if re.match(reg_1, name):
match = re.search(reg_1, name)
elif re.match(reg_2, name):
match = re.search(reg_2, name)
elif re.match(reg_3, name):
match = re.search(reg_3, name)
return match["title"].replace(".", " "), match["year"]
elif re.match(reg_4, name):
match = re.search(reg_4, name)
else:
return
return match["title"], match["year"]
def mediaIdentifier(tmdb_api_key, title, year, backdrop_base_url, poster_base_url, movie=False, tv=False):
if movie:
search_url = "https://api.themoviedb.org/3/search/movie?api_key=%s&query=%s&year=%s" % (
tmdb_api_key, title, year)
search_content = json.loads((requests.get(search_url)).content)
try:
title = search_content["results"][0]["title"]
except:
pass
try:
posterPath = poster_base_url + \
search_content["results"][0]["poster_path"]
except:
posterPath = ""
try:
backdropPath = backdrop_base_url + \
search_content["results"][0]["backdrop_path"]
except:
backdropPath = ""
try:
releaseDate = search_content["results"][0]["release_date"]
except:
releaseDate = "%s-01-01" % (year)
try:
overview = search_content["results"][0]["overview"]
except:
overview = ""
try:
popularity = search_content["results"][0]["popularity"]
except:
popularity = 0.0
return title, posterPath, backdropPath, releaseDate, overview, popularity
elif tv:
search_url = "https://api.themoviedb.org/3/search/tv?api_key=%s&query=%s&first_air_date_year=%s" % (
tmdb_api_key, title, year)
search_content = json.loads((requests.get(search_url)).content)
try:
title = search_content["results"][0]["name"]
except:
pass
try:
posterPath = poster_base_url + \
search_content["results"][0]["poster_path"]
except:
posterPath = ""
try:
backdropPath = backdrop_base_url + \
search_content["results"][0]["backdrop_path"]
except:
backdropPath = ""
try:
releaseDate = search_content["results"][0]["first_air_date"]
except:
releaseDate = "%s-01-01" % (year)
try:
overview = search_content["results"][0]["overview"]
except:
overview = ""
try:
popularity = search_content["results"][0]["popularity"]
except:
popularity = 0.0
return title, posterPath, backdropPath, releaseDate, overview, popularity
def readMetadata(category_list):
try:
os.mkdir("metadata")
except:
pass
metadata_dir = os.listdir("metadata")
if len(metadata_dir) == 0:
metadata = []
for category in category_list:
tmp = category
tmp["children"] = []
metadata.append(tmp)
elif len(metadata_dir) <= 5:
metadata_file = max(metadata_dir)
with open("metadata/%s" % (metadata_file), "r") as r:
metadata = json.load(r)
elif len(metadata_dir) > 5:
os.remove("metadata/%s" % (min(metadata_dir)))
metadata_file = max(metadata_dir)
with open("metadata/%s" % (metadata_file), "r") as r:
metadata = json.load(r)
else:
pass
return metadata
def writeMetadata(category_list, drive, tmdb_api_key):
configuration_url = "https://api.themoviedb.org/3/configuration?api_key=%s" % (
tmdb_api_key)
configuration_content = json.loads(requests.get(configuration_url).content)
backdrop_base_url = configuration_content["images"]["secure_base_url"] + \
configuration_content["images"]["backdrop_sizes"][3]
poster_base_url = configuration_content["images"]["secure_base_url"] + \
configuration_content["images"]["poster_sizes"][3]
placeholder_metadata = []
count = 0
for category in category_list:
count += 1
start_time = datetime.datetime.utcnow()
print("Building metadata for category %s/%s (%s)" % (count, len(category_list), category["name"]))
if category["type"] == "Movies":
root = drive.files().get(
fileId=category["id"], supportsAllDrives=True).execute()
tree = root
tree["type"] = "directory"
tree["children"] = []
tmp_metadata = src.walk.driveWalk(root, tree, [], drive)
tmp_metadata["children"] = [x for x in tmp_metadata["children"]
if x["mimeType"] != "application/vnd.google-apps.folder"]
tmp_metadata["categoryInfo"] = category
tmp_metadata["length"] = len(tmp_metadata["children"])
tmp_metadata["buildTime"] = str(datetime.datetime.utcnow())
for item in tmp_metadata["children"]:
if item["type"] == "file":
try:
title, year = parseMovie(item["name"])
item["title"], item["posterPath"], item["backdropPath"], item["releaseDate"], item["overview"], item["popularity"] = mediaIdentifier(
tmdb_api_key, title, year, backdrop_base_url, poster_base_url, True, False)
except:
item["title"], item["posterPath"], item["backdropPath"], item[
"releaseDate"], item["overview"] = item["name"], "", "", "1900-01-01", ""
placeholder_metadata.append(tmp_metadata)
elif category["type"] == "TV Shows":
root = drive.files().get(
fileId=category["id"], supportsAllDrives=True).execute()
tmp_metadata = src.tree.driveTree(root, drive)
tmp_metadata["categoryInfo"] = category
tmp_metadata["length"] = len(tmp_metadata["children"])
tmp_metadata["buildTime"] = str(datetime.datetime.utcnow())
for item in tmp_metadata["children"]:
if item["type"] == "directory":
try:
title, year = parseTV(item["name"])
item["title"], item["posterPath"], item["backdropPath"], item["releaseDate"], item["overview"], item["popularity"] = mediaIdentifier(
tmdb_api_key, title, year, backdrop_base_url, poster_base_url, False, True)
except:
item["title"], item["posterPath"], item["backdropPath"], item[
"releaseDate"], item["overview"] = item["name"], "", "", "1900-01-01", ""
placeholder_metadata.append(tmp_metadata)
print("Done in %s" % (str(datetime.datetime.utcnow() - start_time)))
metadata = placeholder_metadata
if os.path.exists("./metadata"):
pass
else:
os.mkdir("./metadata")
metadata_file_name = "metadata/%s.json" % (time.strftime("%Y%m%d-%H%M%S"))
with open(metadata_file_name, "w+") as w:
w.write(json.dumps(metadata))
return metadata
def jsonExtract(obj=list(), key="", getObj=True):
arr = []
arr2 = []
def extract(obj, arr, key):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
arr2.append(obj)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr, arr2
values, values2 = extract(obj, arr, key)
if getObj == True:
return values2
else:
return values
| en | 0.298259 | # (2008) Iron Man.mkv # Iron Man (2008).mkv # Iron.Man.2008.1080p.WEBRip.DDP5.1.Atmos.x264.mkv # Iron Man.mkv # (2019) The Mandalorian # The Mandalorian (2019) # The.Mandalorian.2019.1080p.WEBRip # The Mandalorian | 2.92988 | 3 |
examples/example.py | salimfadhleyhtp/python-connector-api | 0 | 6630175 | <filename>examples/example.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding:=utf-8
import argparse
import datetime as dt
import logging
import sys
import meteomatics.api as api
from meteomatics.logger import create_log_handler
from meteomatics._constants_ import LOGGERNAME
'''
For further information on available parameters, models etc. please visit
api.meteomatics.com
In case of questions just write a mail to:
<EMAIL>
'''
###Credentials:
username = 'python-community'
password = '<PASSWORD>'
def example():
_logger = logging.getLogger(LOGGERNAME)
###Input timeseries:
now = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
startdate_ts = now
enddate_ts = startdate_ts + dt.timedelta(days=1)
interval_ts = dt.timedelta(hours=1)
coordinates_ts = [(47.249297, 9.342854), (50., 10.)]
parameters_ts = ['t_2m:C', 'rr_1h:mm']
model = 'mix'
ens_select = None # e.g. 'median'
cluster_select = None # e.g. "cluster:1", see http://api.meteomatics.com/API-Request.html#cluster-selection
interp_select = 'gradient_interpolation'
###Input grid / grid unpivoted:
lat_N = 50
lon_W = -15
lat_S = 20
lon_E = 10
res_lat = 3
res_lon = 3
startdate_grid = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
parameter_grid = 'evapotranspiration_1h:mm' # 't_2m:C'
parameters_grid_unpiv = ['t_2m:C', 'rr_1h:mm']
valid_dates_unpiv = [dt.datetime.utcnow(), dt.datetime.utcnow() + dt.timedelta(days=1)]
###input grid png
filename_png = "grid_target.png"
startdate_png = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
parameter_png = 't_2m:C'
###input lightning
startdate_l = dt.datetime.utcnow() - dt.timedelta(days=1)
enddate_l = dt.datetime.utcnow() - dt.timedelta(minutes=5)
lat_N_l = 90
lon_W_l = -180
lat_S_l = -90
lon_E_l = 180
###input netcdf
filename_nc = "path_netcdf/netcdf_target.nc"
startdate_nc = now
enddate_nc = startdate_nc + dt.timedelta(days=1)
interval_nc = dt.timedelta(days=1)
parameter_nc = 't_2m:C'
###input png timeseries
# prefixpath_png_ts = 'path/to/directory' #TODO
prefixpath_png_ts = '' # TODO
startdate_png_ts = now
enddate_png_ts = startdate_png_ts + dt.timedelta(days=2)
interval_png_ts = dt.timedelta(hours=12)
parameter_png_ts = 't_2m:C'
###input station data timeseries
startdate_station_ts = now - dt.timedelta(days=2)
enddate_station_ts = now - dt.timedelta(hours=3)
interval_station_ts = dt.timedelta(hours=1)
parameters_station_ts = ['t_2m:C', 'wind_speed_10m:ms', 'precip_1h:mm']
model_station_ts = 'mix-obs'
coordinates_station_ts = [(47.43, 9.4), (50.03, 8.52)] # St. Gallen / Frankfurt/Main
wmo_stations = ['066810'] # St. Gallen
metar_stations = ['EDDF'] # Frankfurt/Main
mch_stations = ['STG'] # MeteoSchweiz Station St. Gallen
limits = api.query_user_features(username, password)
_logger.info("\ntime series:")
try:
df_ts = api.query_time_series(coordinates_ts, startdate_ts, enddate_ts, interval_ts, parameters_ts,
username, password, model, ens_select, interp_select,
cluster_select=cluster_select)
_logger.info("Dataframe head \n" + df_ts.head().to_string())
except Exception as e:
_logger.info("Failed, the exception is {}".format(e))
_logger.info("\npng timeseries:")
try:
api.query_png_timeseries(prefixpath_png_ts, startdate_png_ts, enddate_png_ts, interval_png_ts, parameter_png_ts,
lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password)
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
if limits['area request option']:
_logger.info("\ngrid:")
try:
df_grid = api.query_grid(startdate_grid, parameter_grid, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon,
username, password)
_logger.info ("Dataframe head \n" + df_grid.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nunpivoted grid:")
try:
df_grid_unpivoted = api.query_grid_unpivoted(valid_dates_unpiv, parameters_grid_unpiv, lat_N, lon_W, lat_S,
lon_E, res_lat, res_lon, username, password)
_logger.info ("Dataframe head \n" + df_grid_unpivoted.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\ngrid timeseries:")
try:
df_grid_timeseries = api.query_grid_timeseries(startdate_ts, enddate_ts, interval_ts, parameters_ts, lat_N,
lon_W, lat_S, lon_E, res_lat, res_lon, username, password)
_logger.info ("Dataframe head \n" + df_grid_timeseries.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\ngrid as a png:")
try:
api.query_grid_png(filename_png, startdate_png, parameter_png, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon,
username, password)
_logger.info("filename = {}".format(filename_png))
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
else:
_logger.error("""
Your account '{}' does not include area requests.
With the corresponding upgrade you could query whole grids of data at once or even time series of grids.
Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer.
""".format(username)
)
if limits['historic request option'] and limits['area request option']:
_logger.info("\nlighning strokes as csv:")
try:
df_lightning = api.query_lightnings(startdate_l, enddate_l, lat_N_l, lon_W_l, lat_S_l, lon_E_l, username,
password)
_logger.info("Dataframe head \n" + df_lightning.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
else:
_logger.error("""
Your account '{}' does not include historic requests.
With the corresponding upgrade you could query data from the past as well as forecasts.
Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer.
""".format(username)
)
if limits['model select option']:
_logger.info("\nnetCDF file:")
try:
api.query_netcdf(filename_nc, startdate_nc, enddate_nc, interval_nc, parameter_nc, lat_N, lon_W, lat_S,
lon_E,
res_lat, res_lon, username, password)
_logger.info("filename = {}".format(filename_nc))
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nfind stations:")
try:
met = api.query_station_list(username, password, startdate=startdate_station_ts, enddate=enddate_station_ts,
parameters=parameters_station_ts)
_logger.info("Dataframe head \n" + met.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nstation coordinates timeseries:")
try:
df_sd_coord = api.query_station_timeseries(startdate_station_ts, enddate_station_ts, interval_station_ts,
parameters_station_ts, username, password,
model=model_station_ts,
latlon_tuple_list=coordinates_station_ts,
on_invalid='fill_with_invalid', request_type="POST",
temporal_interpolation='none')
_logger.info("Dataframe head \n" + df_sd_coord.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nstation wmo + metar ids timeseries:")
try:
df_sd_ids = api.query_station_timeseries(startdate_station_ts, enddate_station_ts, interval_station_ts,
parameters_station_ts, username, password, model=model_station_ts,
wmo_ids=wmo_stations, metar_ids=metar_stations,
mch_ids=mch_stations, on_invalid='fill_with_invalid',
request_type="POST", temporal_interpolation='none')
_logger.info("Dataframe head \n" + df_sd_ids.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nget init dates:")
try:
df_init_dates = api.query_init_date(now, now + dt.timedelta(days=2), dt.timedelta(hours=3), 't_2m:C',
username,
password, '<PASSWORD>')
_logger.info("Dataframe head \n" + df_init_dates.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nget available time ranges:")
try:
df_time_ranges = api.query_available_time_ranges(['t_2m:C', 'precip_6h:mm'], username, password,
'<PASSWORD>')
_logger.info("Dataframe head \n" + df_time_ranges.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
else:
_logger.error("""
Your account '{}' does not include model selection.
With the corresponding upgrade you could query data from stations and request your data in netcdf.
Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer.
""".format(username)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--username', default=username)
parser.add_argument('--password', default=password)
arguments = parser.parse_args()
username = arguments.username
password = arguments.password
if username is None or password is None:
_logger.info(
"You need to provide a username and a password, either on the command line or by inserting them in the script")
sys.exit()
create_log_handler()
logging.getLogger(LOGGERNAME).setLevel(logging.INFO)
example()
| <filename>examples/example.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding:=utf-8
import argparse
import datetime as dt
import logging
import sys
import meteomatics.api as api
from meteomatics.logger import create_log_handler
from meteomatics._constants_ import LOGGERNAME
'''
For further information on available parameters, models etc. please visit
api.meteomatics.com
In case of questions just write a mail to:
<EMAIL>
'''
###Credentials:
username = 'python-community'
password = '<PASSWORD>'
def example():
_logger = logging.getLogger(LOGGERNAME)
###Input timeseries:
now = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
startdate_ts = now
enddate_ts = startdate_ts + dt.timedelta(days=1)
interval_ts = dt.timedelta(hours=1)
coordinates_ts = [(47.249297, 9.342854), (50., 10.)]
parameters_ts = ['t_2m:C', 'rr_1h:mm']
model = 'mix'
ens_select = None # e.g. 'median'
cluster_select = None # e.g. "cluster:1", see http://api.meteomatics.com/API-Request.html#cluster-selection
interp_select = 'gradient_interpolation'
###Input grid / grid unpivoted:
lat_N = 50
lon_W = -15
lat_S = 20
lon_E = 10
res_lat = 3
res_lon = 3
startdate_grid = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
parameter_grid = 'evapotranspiration_1h:mm' # 't_2m:C'
parameters_grid_unpiv = ['t_2m:C', 'rr_1h:mm']
valid_dates_unpiv = [dt.datetime.utcnow(), dt.datetime.utcnow() + dt.timedelta(days=1)]
###input grid png
filename_png = "grid_target.png"
startdate_png = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
parameter_png = 't_2m:C'
###input lightning
startdate_l = dt.datetime.utcnow() - dt.timedelta(days=1)
enddate_l = dt.datetime.utcnow() - dt.timedelta(minutes=5)
lat_N_l = 90
lon_W_l = -180
lat_S_l = -90
lon_E_l = 180
###input netcdf
filename_nc = "path_netcdf/netcdf_target.nc"
startdate_nc = now
enddate_nc = startdate_nc + dt.timedelta(days=1)
interval_nc = dt.timedelta(days=1)
parameter_nc = 't_2m:C'
###input png timeseries
# prefixpath_png_ts = 'path/to/directory' #TODO
prefixpath_png_ts = '' # TODO
startdate_png_ts = now
enddate_png_ts = startdate_png_ts + dt.timedelta(days=2)
interval_png_ts = dt.timedelta(hours=12)
parameter_png_ts = 't_2m:C'
###input station data timeseries
startdate_station_ts = now - dt.timedelta(days=2)
enddate_station_ts = now - dt.timedelta(hours=3)
interval_station_ts = dt.timedelta(hours=1)
parameters_station_ts = ['t_2m:C', 'wind_speed_10m:ms', 'precip_1h:mm']
model_station_ts = 'mix-obs'
coordinates_station_ts = [(47.43, 9.4), (50.03, 8.52)] # St. Gallen / Frankfurt/Main
wmo_stations = ['066810'] # St. Gallen
metar_stations = ['EDDF'] # Frankfurt/Main
mch_stations = ['STG'] # MeteoSchweiz Station St. Gallen
limits = api.query_user_features(username, password)
_logger.info("\ntime series:")
try:
df_ts = api.query_time_series(coordinates_ts, startdate_ts, enddate_ts, interval_ts, parameters_ts,
username, password, model, ens_select, interp_select,
cluster_select=cluster_select)
_logger.info("Dataframe head \n" + df_ts.head().to_string())
except Exception as e:
_logger.info("Failed, the exception is {}".format(e))
_logger.info("\npng timeseries:")
try:
api.query_png_timeseries(prefixpath_png_ts, startdate_png_ts, enddate_png_ts, interval_png_ts, parameter_png_ts,
lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password)
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
if limits['area request option']:
_logger.info("\ngrid:")
try:
df_grid = api.query_grid(startdate_grid, parameter_grid, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon,
username, password)
_logger.info ("Dataframe head \n" + df_grid.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nunpivoted grid:")
try:
df_grid_unpivoted = api.query_grid_unpivoted(valid_dates_unpiv, parameters_grid_unpiv, lat_N, lon_W, lat_S,
lon_E, res_lat, res_lon, username, password)
_logger.info ("Dataframe head \n" + df_grid_unpivoted.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\ngrid timeseries:")
try:
df_grid_timeseries = api.query_grid_timeseries(startdate_ts, enddate_ts, interval_ts, parameters_ts, lat_N,
lon_W, lat_S, lon_E, res_lat, res_lon, username, password)
_logger.info ("Dataframe head \n" + df_grid_timeseries.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\ngrid as a png:")
try:
api.query_grid_png(filename_png, startdate_png, parameter_png, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon,
username, password)
_logger.info("filename = {}".format(filename_png))
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
else:
_logger.error("""
Your account '{}' does not include area requests.
With the corresponding upgrade you could query whole grids of data at once or even time series of grids.
Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer.
""".format(username)
)
if limits['historic request option'] and limits['area request option']:
_logger.info("\nlighning strokes as csv:")
try:
df_lightning = api.query_lightnings(startdate_l, enddate_l, lat_N_l, lon_W_l, lat_S_l, lon_E_l, username,
password)
_logger.info("Dataframe head \n" + df_lightning.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
else:
_logger.error("""
Your account '{}' does not include historic requests.
With the corresponding upgrade you could query data from the past as well as forecasts.
Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer.
""".format(username)
)
if limits['model select option']:
_logger.info("\nnetCDF file:")
try:
api.query_netcdf(filename_nc, startdate_nc, enddate_nc, interval_nc, parameter_nc, lat_N, lon_W, lat_S,
lon_E,
res_lat, res_lon, username, password)
_logger.info("filename = {}".format(filename_nc))
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nfind stations:")
try:
met = api.query_station_list(username, password, startdate=startdate_station_ts, enddate=enddate_station_ts,
parameters=parameters_station_ts)
_logger.info("Dataframe head \n" + met.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nstation coordinates timeseries:")
try:
df_sd_coord = api.query_station_timeseries(startdate_station_ts, enddate_station_ts, interval_station_ts,
parameters_station_ts, username, password,
model=model_station_ts,
latlon_tuple_list=coordinates_station_ts,
on_invalid='fill_with_invalid', request_type="POST",
temporal_interpolation='none')
_logger.info("Dataframe head \n" + df_sd_coord.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nstation wmo + metar ids timeseries:")
try:
df_sd_ids = api.query_station_timeseries(startdate_station_ts, enddate_station_ts, interval_station_ts,
parameters_station_ts, username, password, model=model_station_ts,
wmo_ids=wmo_stations, metar_ids=metar_stations,
mch_ids=mch_stations, on_invalid='fill_with_invalid',
request_type="POST", temporal_interpolation='none')
_logger.info("Dataframe head \n" + df_sd_ids.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nget init dates:")
try:
df_init_dates = api.query_init_date(now, now + dt.timedelta(days=2), dt.timedelta(hours=3), 't_2m:C',
username,
password, '<PASSWORD>')
_logger.info("Dataframe head \n" + df_init_dates.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
_logger.info("\nget available time ranges:")
try:
df_time_ranges = api.query_available_time_ranges(['t_2m:C', 'precip_6h:mm'], username, password,
'<PASSWORD>')
_logger.info("Dataframe head \n" + df_time_ranges.head().to_string())
except Exception as e:
_logger.error("Failed, the exception is {}".format(e))
else:
_logger.error("""
Your account '{}' does not include model selection.
With the corresponding upgrade you could query data from stations and request your data in netcdf.
Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer.
""".format(username)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--username', default=username)
parser.add_argument('--password', default=password)
arguments = parser.parse_args()
username = arguments.username
password = arguments.password
if username is None or password is None:
_logger.info(
"You need to provide a username and a password, either on the command line or by inserting them in the script")
sys.exit()
create_log_handler()
logging.getLogger(LOGGERNAME).setLevel(logging.INFO)
example()
| en | 0.79819 | #!/usr/bin/env python # -*- coding: utf-8 -*- # coding:=utf-8 For further information on available parameters, models etc. please visit api.meteomatics.com In case of questions just write a mail to: <EMAIL> ###Credentials: ###Input timeseries: # e.g. 'median' # e.g. "cluster:1", see http://api.meteomatics.com/API-Request.html#cluster-selection ###Input grid / grid unpivoted: # 't_2m:C' ###input grid png ###input lightning ###input netcdf ###input png timeseries # prefixpath_png_ts = 'path/to/directory' #TODO # TODO ###input station data timeseries # St. Gallen / Frankfurt/Main # St. Gallen # Frankfurt/Main # MeteoSchweiz Station St. Gallen Your account '{}' does not include area requests. With the corresponding upgrade you could query whole grids of data at once or even time series of grids. Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer. Your account '{}' does not include historic requests. With the corresponding upgrade you could query data from the past as well as forecasts. Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer. Your account '{}' does not include model selection. With the corresponding upgrade you could query data from stations and request your data in netcdf. Please check http://shop.meteomatics.com or contact us at <EMAIL> for an individual offer. | 2.428308 | 2 |
app.py | yoshiohasegawa/mindflex-server | 0 | 6630176 | from flask import Flask, json, jsonify, request
from pymongo import MongoClient
import os
# Load .env variables
db_name = os.environ.get('DB_NAME')
db_username = os.environ.get('DB_USERNAME')
db_password = os.environ.get('DB_PASSWORD')
# Initialize Flask server
app = Flask(__name__)
# Establish MongoDB connection
client = MongoClient(f'mongodb+srv://{db_username}:{db_password}@mindflexcluster.csmom.mongodb.net/{db_name}?retryWrites=true&w=majority')
# Connect to mindflex database
db = client.get_database('mindflex')
# questions table
questions = db.questions
# Parse _id property value of ObjectId to String
def parseObjectId(obj):
obj['_id'] = str(obj['_id'])
return obj
# Homepage, simply return a welcome message
@app.route('/', methods=['GET'])
def homepage():
return f'Welcome to the Mindflex API'
# Questions GET route
@app.route('/api/questions', methods=['GET'])
def get_questions():
questions_data = list(questions.find())
questions_data = list(map(parseObjectId, questions_data))
return jsonify({'data': questions_data})
if __name__ == '__main__':
app.run(debug=True) | from flask import Flask, json, jsonify, request
from pymongo import MongoClient
import os
# Load .env variables
db_name = os.environ.get('DB_NAME')
db_username = os.environ.get('DB_USERNAME')
db_password = os.environ.get('DB_PASSWORD')
# Initialize Flask server
app = Flask(__name__)
# Establish MongoDB connection
client = MongoClient(f'mongodb+srv://{db_username}:{db_password}@mindflexcluster.csmom.mongodb.net/{db_name}?retryWrites=true&w=majority')
# Connect to mindflex database
db = client.get_database('mindflex')
# questions table
questions = db.questions
# Parse _id property value of ObjectId to String
def parseObjectId(obj):
obj['_id'] = str(obj['_id'])
return obj
# Homepage, simply return a welcome message
@app.route('/', methods=['GET'])
def homepage():
return f'Welcome to the Mindflex API'
# Questions GET route
@app.route('/api/questions', methods=['GET'])
def get_questions():
questions_data = list(questions.find())
questions_data = list(map(parseObjectId, questions_data))
return jsonify({'data': questions_data})
if __name__ == '__main__':
app.run(debug=True) | en | 0.456735 | # Load .env variables # Initialize Flask server # Establish MongoDB connection # Connect to mindflex database # questions table # Parse _id property value of ObjectId to String # Homepage, simply return a welcome message # Questions GET route | 2.973657 | 3 |
inverted_index/utils/redis_init.py | chachazhu/inverted-index.py | 0 | 6630177 | <filename>inverted_index/utils/redis_init.py
import redis
def con(host, port, db):
"""Redis connection."""
return redis.StrictRedis(
host=host,
port=port,
db=db)
| <filename>inverted_index/utils/redis_init.py
import redis
def con(host, port, db):
"""Redis connection."""
return redis.StrictRedis(
host=host,
port=port,
db=db)
| en | 0.731475 | Redis connection. | 2.420348 | 2 |
lib/googlecloudsdk/api_lib/compute/csek_utils.py | eyalev/gcloud | 0 | 6630178 | <filename>lib/googlecloudsdk/api_lib/compute/csek_utils.py
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for managing customer supplied encryption keys."""
import abc
import base64
import json
import re
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import exceptions as core_exceptions
CSEK_HELP_URL = ('https://cloud.google.com/compute/docs/disks/'
'customer-supplied-encryption')
EXPECTED_RECORD_KEY_KEYS = set(['uri', 'key', 'key-type'])
BASE64_RAW_KEY_LENGTH_IN_CHARS = 44
BASE64_RSA_ENCRYPTED_KEY_LENGTH_IN_CHARS = 344
class InvalidKeyFileException(core_exceptions.Error):
"""There's a problem in a CSEK file."""
def __init__(self, base_message):
super(InvalidKeyFileException, self).__init__(
'{0}'.format(base_message))
# TODO(user) Update this message to include
# a lint to friendly documentation.
class BadPatternException(InvalidKeyFileException):
"""A (e.g.) url pattern is bad and why."""
def __init__(self, pattern_type, pattern):
self.pattern_type = pattern_type
self.pattern = pattern
super(BadPatternException, self).__init__(
'Invalid value for [{0}] pattern: [{1}]'.format(
self.pattern_type,
self.pattern))
class InvalidKeyExceptionNoContext(InvalidKeyFileException):
"""Indicate that a particular key is bad and why."""
def __init__(self, key, issue):
self.key = key
self.issue = issue
super(InvalidKeyExceptionNoContext, self).__init__(
'Invalid key, [{0}] : {1}'.format(
self.key,
self.issue))
class InvalidKeyException(InvalidKeyFileException):
"""Indicate that a particular key is bad, why, and where."""
def __init__(self, key, key_id, issue):
self.key = key
self.key_id = key_id
self.issue = issue
super(InvalidKeyException, self).__init__(
'Invalid key, [{0}], for [{1}]: {2}'.format(
self.key,
self.key_id,
self.issue))
def ValidateKey(base64_encoded_string, expected_key_length):
"""ValidateKey(s, k) returns None or raises InvalidKeyExceptionNoContext."""
if expected_key_length < 1:
raise ValueError('ValidateKey requires expected_key_length > 1. Got {0}'
.format(expected_key_length))
if len(base64_encoded_string) != expected_key_length:
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Key should contain {0} characters (including padding), '
'but is [{1}] characters long.'.format(
expected_key_length,
len(base64_encoded_string)))
if base64_encoded_string[-1] != '=':
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Bad padding. Keys should end with an \'=\' character.')
try:
base64_encoded_string_as_str = base64_encoded_string.encode('ascii')
except UnicodeDecodeError:
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Key contains non-ascii characters.')
if not re.match(r'^[a-zA-Z0-9+/=]*$', base64_encoded_string_as_str):
raise InvalidKeyExceptionNoContext(
base64_encoded_string_as_str,
'Key contains unexpected characters. Base64 encoded strings '
'contain only letters (upper or lower case), numbers, '
'plusses \'+\', slashes \'/\', or equality signs \'=\'.')
try:
base64.b64decode(base64_encoded_string_as_str)
except TypeError as t:
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Key is not valid base64: [{0}].'.format(t.message))
class CsekKeyBase(object):
"""A class representing for Csek keys."""
__metaclass__ = abc.ABCMeta
def __init__(self, key_material):
ValidateKey(key_material, expected_key_length=self.GetKeyLength())
self._key_material = key_material
@staticmethod
def MakeKey(key_material, key_type, allow_rsa_encrypted=False):
"""Make a CSEK key.
Args:
key_material: str, the key material for this key
key_type: str, the type of this key
allow_rsa_encrypted: bool, whether the key is allowed to be RSA-wrapped
Returns:
CsekRawKey or CsekRsaEncryptedKey derived from the given key material and
type.
Raises:
BadKeyTypeException: if the key is not a valid key type
"""
if key_type == 'raw':
return CsekRawKey(key_material)
if key_type == 'rsa-encrypted':
if allow_rsa_encrypted:
return CsekRsaEncryptedKey(key_material)
raise BadKeyTypeException(
key_type,
'this feature is only allowed in the alpha and beta versions of this '
'command.')
raise BadKeyTypeException(key_type)
@abc.abstractmethod
def GetKeyLength(self):
raise NotImplementedError('GetKeyLength() must be overridden.')
@abc.abstractmethod
def ToMessage(self):
raise NotImplementedError('ToMessage() must be overridden.')
@property
def key_material(self):
return self._key_material
class CsekRawKey(CsekKeyBase):
"""Class representing raw Csek keys."""
def GetKeyLength(self):
return BASE64_RAW_KEY_LENGTH_IN_CHARS
def ToMessage(self, compute_client):
return compute_client.MESSAGES_MODULE.CustomerEncryptionKey(
rawKey=str(self.key_material))
class CsekRsaEncryptedKey(CsekKeyBase):
"""Class representing rsa encrypted Csek keys."""
def GetKeyLength(self):
return BASE64_RSA_ENCRYPTED_KEY_LENGTH_IN_CHARS
def ToMessage(self, compute_client):
return compute_client.MESSAGES_MODULE.CustomerEncryptionKey(
rsaEncryptedKey=str(self.key_material))
class BadKeyTypeException(InvalidKeyFileException):
"""A key type is bad and why."""
def __init__(self, key_type, explanation=''):
self.key_type = key_type
msg = 'Invalid key type [{0}]'.format(self.key_type)
if explanation:
msg += ': ' + explanation
msg += '.'
super(BadKeyTypeException, self).__init__(msg)
class MissingCsekKeyException(exceptions.ToolException):
def __init__(self, resource):
super(MissingCsekKeyException, self).__init__(
'Key required for resource [{0}], but none found.'.format(resource))
def AddCsekKeyArgs(parser, flags_about_creation=True):
"""Adds arguments related to csek keys."""
csek_key_file = parser.add_argument(
'--csek-key-file',
help='Path to a CSEK key file',
metavar='FILE')
csek_key_file.detailed_help = (
'Path to a Customer-Supplied Encryption Key (CSEK) key file, mapping '
'Google Compute Engine resources to user managed keys to be used when '
'creating, mounting, or snapshotting disks. '
'See {0} for more details.').format(CSEK_HELP_URL)
# TODO(user)
# Argument - indicates the key file should be read from stdin.'
if flags_about_creation:
require_csek_key_create = parser.add_argument(
'--require-csek-key-create',
action='store_true',
default=True,
help='Create resources protected by csek key.')
require_csek_key_create.detailed_help = (
'Refuse to create resources not protected by a user managed key in the '
'key file when --csek-key-file is given. This behavior is enabled by '
'default to prevent incorrect gcloud invocations from accidentally '
'creating resources with no user managed key. Disabling the check '
'allows creation of some resources without a matching '
'Customer-Supplied Encryption Key in the supplied --csek-key-file. '
'See {0} for more details').format(CSEK_HELP_URL)
class UriPattern(object):
"""A uri-based pattern that maybe be matched against resource objects."""
def __init__(self, path_as_string):
if not path_as_string.startswith('http'):
raise BadPatternException('uri', path_as_string)
self._path_as_string = path_as_string
def Matches(self, resource):
"""Tests if its argument matches the pattern."""
return self._path_as_string == resource.SelfLink()
def __str__(self):
return 'Uri Pattern: ' + self._path_as_string
class CsekKeyStore(object):
"""Represents a map from resource patterns to keys."""
# Members
# self._state: dictionary from UriPattern to an instance of (a subclass of)
# CsekKeyBase
@staticmethod
def FromFile(fname, allow_rsa_encrypted):
"""FromFile loads a CsekKeyStore from a file.
Args:
fname: str, the name of a file intended to contain a well-formed key file
allow_rsa_encrypted: bool, whether to allow keys of type 'rsa-encrypted'
Returns:
A CsekKeyStore, if found
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
with open(fname) as infile:
content = infile.read()
return CsekKeyStore(content, allow_rsa_encrypted)
@staticmethod
def FromArgs(args, allow_rsa_encrypted=False):
"""FromFile attempts to load a CsekKeyStore from a command's args.
Args:
args: CLI args with a csek_key_file field set
allow_rsa_encrypted: bool, whether to allow keys of type 'rsa-encrypted'
Returns:
A CsekKeyStore, if a valid key file name is provided as csek_key_file
None, if args.csek_key_file is None
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
if args.csek_key_file is None:
return None
return CsekKeyStore.FromFile(args.csek_key_file, allow_rsa_encrypted)
@staticmethod
def _ParseAndValidate(s, allow_rsa_encrypted=False):
"""_ParseAndValidate(s) inteprets s as a csek key file.
Args:
s: str, an input to parse
allow_rsa_encrypted: bool, whether to allow RSA-wrapped keys
Returns:
a valid state object
Raises:
InvalidKeyFileException: if the input doesn't parse or is not well-formed.
"""
assert type(s) is str
state = {}
try:
records = json.loads(s)
if type(records) is not list:
raise InvalidKeyFileException(
'Key file\'s top-level element must be a JSON list.')
for key_record in records:
if type(key_record) is not dict:
raise InvalidKeyFileException(
'Key file records must be JSON objects, but [{0}] found.'.format(
json.dumps(key_record)))
if set(key_record.keys()) != EXPECTED_RECORD_KEY_KEYS:
raise InvalidKeyFileException(
'Record [{0}] has incorrect json keys; [{1}] expected'.format(
json.dumps(key_record),
','.join(EXPECTED_RECORD_KEY_KEYS)))
pattern = UriPattern(key_record['uri'])
try:
state[pattern] = CsekKeyBase.MakeKey(
key_material=key_record['key'], key_type=key_record['key-type'],
allow_rsa_encrypted=allow_rsa_encrypted)
except InvalidKeyExceptionNoContext as e:
raise InvalidKeyException(key=e.key, key_id=pattern, issue=e.issue)
except ValueError as e:
raise InvalidKeyFileException(*e.args)
assert type(state) is dict
return state
def __len__(self):
return len(self.state)
def LookupKey(self, resource, raise_if_missing=False):
"""Search for the unique key corresponding to a given resource.
Args:
resource: the resource to find a key for.
raise_if_missing: bool, raise an exception if the resource is not found.
Returns: CsekKeyBase, corresponding to the resource, or None if not found
and not raise_if_missing.
Raises:
InvalidKeyFileException: if there are two records matching the resource.
MissingCsekKeyException: if raise_if_missing and no key is found
for the provided resoure.
"""
assert type(self.state) is dict
search_state = (None, None)
for pat, key in self.state.iteritems():
if pat.Matches(resource):
# TODO(user) what's the best thing to do if there are multiple
# matches?
if search_state[0]:
raise exceptions.InvalidKeyFileException(
'Uri patterns [{0}] and [{1}] both match '
'resource [{2}]. Bailing out.'.format(
search_state[0], pat, str(resource)))
search_state = (pat, key)
if raise_if_missing and (search_state[1] is None):
raise MissingCsekKeyException(resource)
return search_state[1]
def __init__(self, json_string, allow_rsa_encrypted=False):
self.state = CsekKeyStore._ParseAndValidate(json_string,
allow_rsa_encrypted)
# Functions below make it easy for clients to operate on values that possibly
# either CsekKeyStores or None or else CsekKeyBases or None. Fellow functional
# programming geeks: basically we're faking the Maybe monad.
def MaybeToMessage(csek_key_or_none, compute):
return csek_key_or_none.ToMessage(compute) if csek_key_or_none else None
def MaybeLookupKey(csek_keys_or_none, resource):
if csek_keys_or_none and resource:
return csek_keys_or_none.LookupKey(resource)
return None
def MaybeLookupKeyMessage(csek_keys_or_none, resource, compute_client):
maybe_key = MaybeLookupKey(csek_keys_or_none, resource)
return MaybeToMessage(maybe_key, compute_client)
def MaybeLookupKeys(csek_keys_or_none, resources):
return [MaybeLookupKey(csek_keys_or_none, r) for r in resources]
def MaybeLookupKeyMessages(csek_keys_or_none, resources, compute_client):
return [MaybeToMessage(k, compute_client) for k in
MaybeLookupKeys(csek_keys_or_none, resources)]
def MaybeLookupKeysByUri(csek_keys_or_none, parser, uris):
return MaybeLookupKeys(
csek_keys_or_none,
[(parser.Parse(u) if u else None) for u in uris])
def MaybeLookupKeyMessagesByUri(csek_keys_or_none, parser,
uris, compute_client):
return [MaybeToMessage(k, compute_client) for k in
MaybeLookupKeysByUri(csek_keys_or_none, parser, uris)]
| <filename>lib/googlecloudsdk/api_lib/compute/csek_utils.py
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for managing customer supplied encryption keys."""
import abc
import base64
import json
import re
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import exceptions as core_exceptions
CSEK_HELP_URL = ('https://cloud.google.com/compute/docs/disks/'
'customer-supplied-encryption')
EXPECTED_RECORD_KEY_KEYS = set(['uri', 'key', 'key-type'])
BASE64_RAW_KEY_LENGTH_IN_CHARS = 44
BASE64_RSA_ENCRYPTED_KEY_LENGTH_IN_CHARS = 344
class InvalidKeyFileException(core_exceptions.Error):
"""There's a problem in a CSEK file."""
def __init__(self, base_message):
super(InvalidKeyFileException, self).__init__(
'{0}'.format(base_message))
# TODO(user) Update this message to include
# a lint to friendly documentation.
class BadPatternException(InvalidKeyFileException):
"""A (e.g.) url pattern is bad and why."""
def __init__(self, pattern_type, pattern):
self.pattern_type = pattern_type
self.pattern = pattern
super(BadPatternException, self).__init__(
'Invalid value for [{0}] pattern: [{1}]'.format(
self.pattern_type,
self.pattern))
class InvalidKeyExceptionNoContext(InvalidKeyFileException):
"""Indicate that a particular key is bad and why."""
def __init__(self, key, issue):
self.key = key
self.issue = issue
super(InvalidKeyExceptionNoContext, self).__init__(
'Invalid key, [{0}] : {1}'.format(
self.key,
self.issue))
class InvalidKeyException(InvalidKeyFileException):
"""Indicate that a particular key is bad, why, and where."""
def __init__(self, key, key_id, issue):
self.key = key
self.key_id = key_id
self.issue = issue
super(InvalidKeyException, self).__init__(
'Invalid key, [{0}], for [{1}]: {2}'.format(
self.key,
self.key_id,
self.issue))
def ValidateKey(base64_encoded_string, expected_key_length):
"""ValidateKey(s, k) returns None or raises InvalidKeyExceptionNoContext."""
if expected_key_length < 1:
raise ValueError('ValidateKey requires expected_key_length > 1. Got {0}'
.format(expected_key_length))
if len(base64_encoded_string) != expected_key_length:
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Key should contain {0} characters (including padding), '
'but is [{1}] characters long.'.format(
expected_key_length,
len(base64_encoded_string)))
if base64_encoded_string[-1] != '=':
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Bad padding. Keys should end with an \'=\' character.')
try:
base64_encoded_string_as_str = base64_encoded_string.encode('ascii')
except UnicodeDecodeError:
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Key contains non-ascii characters.')
if not re.match(r'^[a-zA-Z0-9+/=]*$', base64_encoded_string_as_str):
raise InvalidKeyExceptionNoContext(
base64_encoded_string_as_str,
'Key contains unexpected characters. Base64 encoded strings '
'contain only letters (upper or lower case), numbers, '
'plusses \'+\', slashes \'/\', or equality signs \'=\'.')
try:
base64.b64decode(base64_encoded_string_as_str)
except TypeError as t:
raise InvalidKeyExceptionNoContext(
base64_encoded_string,
'Key is not valid base64: [{0}].'.format(t.message))
class CsekKeyBase(object):
"""A class representing for Csek keys."""
__metaclass__ = abc.ABCMeta
def __init__(self, key_material):
ValidateKey(key_material, expected_key_length=self.GetKeyLength())
self._key_material = key_material
@staticmethod
def MakeKey(key_material, key_type, allow_rsa_encrypted=False):
"""Make a CSEK key.
Args:
key_material: str, the key material for this key
key_type: str, the type of this key
allow_rsa_encrypted: bool, whether the key is allowed to be RSA-wrapped
Returns:
CsekRawKey or CsekRsaEncryptedKey derived from the given key material and
type.
Raises:
BadKeyTypeException: if the key is not a valid key type
"""
if key_type == 'raw':
return CsekRawKey(key_material)
if key_type == 'rsa-encrypted':
if allow_rsa_encrypted:
return CsekRsaEncryptedKey(key_material)
raise BadKeyTypeException(
key_type,
'this feature is only allowed in the alpha and beta versions of this '
'command.')
raise BadKeyTypeException(key_type)
@abc.abstractmethod
def GetKeyLength(self):
raise NotImplementedError('GetKeyLength() must be overridden.')
@abc.abstractmethod
def ToMessage(self):
raise NotImplementedError('ToMessage() must be overridden.')
@property
def key_material(self):
return self._key_material
class CsekRawKey(CsekKeyBase):
"""Class representing raw Csek keys."""
def GetKeyLength(self):
return BASE64_RAW_KEY_LENGTH_IN_CHARS
def ToMessage(self, compute_client):
return compute_client.MESSAGES_MODULE.CustomerEncryptionKey(
rawKey=str(self.key_material))
class CsekRsaEncryptedKey(CsekKeyBase):
"""Class representing rsa encrypted Csek keys."""
def GetKeyLength(self):
return BASE64_RSA_ENCRYPTED_KEY_LENGTH_IN_CHARS
def ToMessage(self, compute_client):
return compute_client.MESSAGES_MODULE.CustomerEncryptionKey(
rsaEncryptedKey=str(self.key_material))
class BadKeyTypeException(InvalidKeyFileException):
"""A key type is bad and why."""
def __init__(self, key_type, explanation=''):
self.key_type = key_type
msg = 'Invalid key type [{0}]'.format(self.key_type)
if explanation:
msg += ': ' + explanation
msg += '.'
super(BadKeyTypeException, self).__init__(msg)
class MissingCsekKeyException(exceptions.ToolException):
def __init__(self, resource):
super(MissingCsekKeyException, self).__init__(
'Key required for resource [{0}], but none found.'.format(resource))
def AddCsekKeyArgs(parser, flags_about_creation=True):
"""Adds arguments related to csek keys."""
csek_key_file = parser.add_argument(
'--csek-key-file',
help='Path to a CSEK key file',
metavar='FILE')
csek_key_file.detailed_help = (
'Path to a Customer-Supplied Encryption Key (CSEK) key file, mapping '
'Google Compute Engine resources to user managed keys to be used when '
'creating, mounting, or snapshotting disks. '
'See {0} for more details.').format(CSEK_HELP_URL)
# TODO(user)
# Argument - indicates the key file should be read from stdin.'
if flags_about_creation:
require_csek_key_create = parser.add_argument(
'--require-csek-key-create',
action='store_true',
default=True,
help='Create resources protected by csek key.')
require_csek_key_create.detailed_help = (
'Refuse to create resources not protected by a user managed key in the '
'key file when --csek-key-file is given. This behavior is enabled by '
'default to prevent incorrect gcloud invocations from accidentally '
'creating resources with no user managed key. Disabling the check '
'allows creation of some resources without a matching '
'Customer-Supplied Encryption Key in the supplied --csek-key-file. '
'See {0} for more details').format(CSEK_HELP_URL)
class UriPattern(object):
"""A uri-based pattern that maybe be matched against resource objects."""
def __init__(self, path_as_string):
if not path_as_string.startswith('http'):
raise BadPatternException('uri', path_as_string)
self._path_as_string = path_as_string
def Matches(self, resource):
"""Tests if its argument matches the pattern."""
return self._path_as_string == resource.SelfLink()
def __str__(self):
return 'Uri Pattern: ' + self._path_as_string
class CsekKeyStore(object):
"""Represents a map from resource patterns to keys."""
# Members
# self._state: dictionary from UriPattern to an instance of (a subclass of)
# CsekKeyBase
@staticmethod
def FromFile(fname, allow_rsa_encrypted):
"""FromFile loads a CsekKeyStore from a file.
Args:
fname: str, the name of a file intended to contain a well-formed key file
allow_rsa_encrypted: bool, whether to allow keys of type 'rsa-encrypted'
Returns:
A CsekKeyStore, if found
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
with open(fname) as infile:
content = infile.read()
return CsekKeyStore(content, allow_rsa_encrypted)
@staticmethod
def FromArgs(args, allow_rsa_encrypted=False):
"""FromFile attempts to load a CsekKeyStore from a command's args.
Args:
args: CLI args with a csek_key_file field set
allow_rsa_encrypted: bool, whether to allow keys of type 'rsa-encrypted'
Returns:
A CsekKeyStore, if a valid key file name is provided as csek_key_file
None, if args.csek_key_file is None
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
if args.csek_key_file is None:
return None
return CsekKeyStore.FromFile(args.csek_key_file, allow_rsa_encrypted)
@staticmethod
def _ParseAndValidate(s, allow_rsa_encrypted=False):
"""_ParseAndValidate(s) inteprets s as a csek key file.
Args:
s: str, an input to parse
allow_rsa_encrypted: bool, whether to allow RSA-wrapped keys
Returns:
a valid state object
Raises:
InvalidKeyFileException: if the input doesn't parse or is not well-formed.
"""
assert type(s) is str
state = {}
try:
records = json.loads(s)
if type(records) is not list:
raise InvalidKeyFileException(
'Key file\'s top-level element must be a JSON list.')
for key_record in records:
if type(key_record) is not dict:
raise InvalidKeyFileException(
'Key file records must be JSON objects, but [{0}] found.'.format(
json.dumps(key_record)))
if set(key_record.keys()) != EXPECTED_RECORD_KEY_KEYS:
raise InvalidKeyFileException(
'Record [{0}] has incorrect json keys; [{1}] expected'.format(
json.dumps(key_record),
','.join(EXPECTED_RECORD_KEY_KEYS)))
pattern = UriPattern(key_record['uri'])
try:
state[pattern] = CsekKeyBase.MakeKey(
key_material=key_record['key'], key_type=key_record['key-type'],
allow_rsa_encrypted=allow_rsa_encrypted)
except InvalidKeyExceptionNoContext as e:
raise InvalidKeyException(key=e.key, key_id=pattern, issue=e.issue)
except ValueError as e:
raise InvalidKeyFileException(*e.args)
assert type(state) is dict
return state
def __len__(self):
return len(self.state)
def LookupKey(self, resource, raise_if_missing=False):
"""Search for the unique key corresponding to a given resource.
Args:
resource: the resource to find a key for.
raise_if_missing: bool, raise an exception if the resource is not found.
Returns: CsekKeyBase, corresponding to the resource, or None if not found
and not raise_if_missing.
Raises:
InvalidKeyFileException: if there are two records matching the resource.
MissingCsekKeyException: if raise_if_missing and no key is found
for the provided resoure.
"""
assert type(self.state) is dict
search_state = (None, None)
for pat, key in self.state.iteritems():
if pat.Matches(resource):
# TODO(user) what's the best thing to do if there are multiple
# matches?
if search_state[0]:
raise exceptions.InvalidKeyFileException(
'Uri patterns [{0}] and [{1}] both match '
'resource [{2}]. Bailing out.'.format(
search_state[0], pat, str(resource)))
search_state = (pat, key)
if raise_if_missing and (search_state[1] is None):
raise MissingCsekKeyException(resource)
return search_state[1]
def __init__(self, json_string, allow_rsa_encrypted=False):
self.state = CsekKeyStore._ParseAndValidate(json_string,
allow_rsa_encrypted)
# Functions below make it easy for clients to operate on values that possibly
# either CsekKeyStores or None or else CsekKeyBases or None. Fellow functional
# programming geeks: basically we're faking the Maybe monad.
def MaybeToMessage(csek_key_or_none, compute):
return csek_key_or_none.ToMessage(compute) if csek_key_or_none else None
def MaybeLookupKey(csek_keys_or_none, resource):
if csek_keys_or_none and resource:
return csek_keys_or_none.LookupKey(resource)
return None
def MaybeLookupKeyMessage(csek_keys_or_none, resource, compute_client):
maybe_key = MaybeLookupKey(csek_keys_or_none, resource)
return MaybeToMessage(maybe_key, compute_client)
def MaybeLookupKeys(csek_keys_or_none, resources):
return [MaybeLookupKey(csek_keys_or_none, r) for r in resources]
def MaybeLookupKeyMessages(csek_keys_or_none, resources, compute_client):
return [MaybeToMessage(k, compute_client) for k in
MaybeLookupKeys(csek_keys_or_none, resources)]
def MaybeLookupKeysByUri(csek_keys_or_none, parser, uris):
return MaybeLookupKeys(
csek_keys_or_none,
[(parser.Parse(u) if u else None) for u in uris])
def MaybeLookupKeyMessagesByUri(csek_keys_or_none, parser,
uris, compute_client):
return [MaybeToMessage(k, compute_client) for k in
MaybeLookupKeysByUri(csek_keys_or_none, parser, uris)]
| en | 0.775307 | # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Utility functions for managing customer supplied encryption keys. There's a problem in a CSEK file. # TODO(user) Update this message to include # a lint to friendly documentation. A (e.g.) url pattern is bad and why. Indicate that a particular key is bad and why. Indicate that a particular key is bad, why, and where. ValidateKey(s, k) returns None or raises InvalidKeyExceptionNoContext. A class representing for Csek keys. Make a CSEK key. Args: key_material: str, the key material for this key key_type: str, the type of this key allow_rsa_encrypted: bool, whether the key is allowed to be RSA-wrapped Returns: CsekRawKey or CsekRsaEncryptedKey derived from the given key material and type. Raises: BadKeyTypeException: if the key is not a valid key type Class representing raw Csek keys. Class representing rsa encrypted Csek keys. A key type is bad and why. Adds arguments related to csek keys. # TODO(user) # Argument - indicates the key file should be read from stdin.' A uri-based pattern that maybe be matched against resource objects. Tests if its argument matches the pattern. Represents a map from resource patterns to keys. # Members # self._state: dictionary from UriPattern to an instance of (a subclass of) # CsekKeyBase FromFile loads a CsekKeyStore from a file. Args: fname: str, the name of a file intended to contain a well-formed key file allow_rsa_encrypted: bool, whether to allow keys of type 'rsa-encrypted' Returns: A CsekKeyStore, if found Raises: exceptions.BadFileException: there's a problem reading fname exceptions.InvalidKeyFileException: the key file failed to parse or was otherwise invalid FromFile attempts to load a CsekKeyStore from a command's args. Args: args: CLI args with a csek_key_file field set allow_rsa_encrypted: bool, whether to allow keys of type 'rsa-encrypted' Returns: A CsekKeyStore, if a valid key file name is provided as csek_key_file None, if args.csek_key_file is None Raises: exceptions.BadFileException: there's a problem reading fname exceptions.InvalidKeyFileException: the key file failed to parse or was otherwise invalid _ParseAndValidate(s) inteprets s as a csek key file. Args: s: str, an input to parse allow_rsa_encrypted: bool, whether to allow RSA-wrapped keys Returns: a valid state object Raises: InvalidKeyFileException: if the input doesn't parse or is not well-formed. Search for the unique key corresponding to a given resource. Args: resource: the resource to find a key for. raise_if_missing: bool, raise an exception if the resource is not found. Returns: CsekKeyBase, corresponding to the resource, or None if not found and not raise_if_missing. Raises: InvalidKeyFileException: if there are two records matching the resource. MissingCsekKeyException: if raise_if_missing and no key is found for the provided resoure. # TODO(user) what's the best thing to do if there are multiple # matches? # Functions below make it easy for clients to operate on values that possibly # either CsekKeyStores or None or else CsekKeyBases or None. Fellow functional # programming geeks: basically we're faking the Maybe monad. | 2.272708 | 2 |
geneticAlgorithms/sistemaEquacoesLineares.py | PauloBernardo/InteligenciaComputacional | 0 | 6630179 | import random
POPULATION_SIZE = 200
GENES = '01'
def func1(x, y, z, w):
return pow(x, 2) + pow(y, 3) + pow(z, 4) - pow(w, 5)
def func2(x, y, z, w):
return pow(x, 2) + 3 * pow(z, 2) - w
def func3(x, y, z, w):
return pow(z, 5) - y - 10
def func4(x, y, z, w):
return pow(x, 4) - z + y * w
class Individual(object):
def __init__(self, chromosome):
self.chromosome = chromosome
self.fitness = self.cal_fitness()
@classmethod
def mutated_genes(cls):
global GENES
gene = random.choice(GENES)
return gene
@classmethod
def create_gnome(cls):
gnome_len = 64
return [cls.mutated_genes() for _ in range(gnome_len)]
def mate(self, par2):
child_chromosome = []
for gp1, gp2 in zip(self.chromosome, par2.chromosome):
prob = random.random()
if prob < 0.45:
child_chromosome.append(gp1)
elif prob < 0.90:
child_chromosome.append(gp2)
else:
child_chromosome.append(self.mutated_genes())
return Individual(child_chromosome)
def get_chromosome_value(self, position):
if self.chromosome[16 * (position - 1)] == '1':
return -1 * int(''.join(self.chromosome[16 * (position - 1) + 1:position * 16 - 8]), 2) + int(''.join(self.chromosome[position * 16 - 8:position * 16]), 2) / 256
return int(''.join(self.chromosome[16 * (position - 1) + 1:position * 16 - 8]), 2) + int(''.join(self.chromosome[position * 16 - 8:position * 16]), 2) / 256
def cal_fitness(self):
sum = 0
sum += abs(func1(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
sum += abs(func2(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
sum += abs(func3(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
sum += abs(func4(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
return sum
def main():
global POPULATION_SIZE
generation = 1
found = False
population = []
for _ in range(POPULATION_SIZE):
gnome = Individual.create_gnome()
population.append(Individual(gnome))
while not found and generation < 1000:
population = sorted(population, key=lambda x: x.fitness)
if population[0].fitness <= 0:
break
new_generation = []
s = int((10 * POPULATION_SIZE) / 100)
new_generation.extend(population[:s])
s = int((90 * POPULATION_SIZE) / 100)
for _ in range(s):
parent1 = random.choice(population[:50])
parent2 = random.choice(population[:50])
child = parent1.mate(parent2)
new_generation.append(child)
population = new_generation
print("Generation: {}\tString: {}\tFitness: {}". \
format(generation,
"".join(population[0].chromosome),
population[0].fitness))
generation += 1
# print(population[0].chromosome)
f1 = abs(func1(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
f2 = abs(func2(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
f3 = abs(func3(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
f4 = abs(func4(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
print("x: %.4f" % population[0].get_chromosome_value(1), "y: %.4f" % population[0].get_chromosome_value(2),
"z: %.4f" % population[0].get_chromosome_value(3), "w: %.4f" % population[0].get_chromosome_value(4))
print(f1, f2, f3, f4, f1 + f2 + f3 + f4)
print("Generation: {}\tString: {}\tFitness: {}". \
format(generation,
"".join(population[0].chromosome),
"{:.8f}".format(population[0].fitness)))
if __name__ == '__main__':
main()
| import random
POPULATION_SIZE = 200
GENES = '01'
def func1(x, y, z, w):
return pow(x, 2) + pow(y, 3) + pow(z, 4) - pow(w, 5)
def func2(x, y, z, w):
return pow(x, 2) + 3 * pow(z, 2) - w
def func3(x, y, z, w):
return pow(z, 5) - y - 10
def func4(x, y, z, w):
return pow(x, 4) - z + y * w
class Individual(object):
def __init__(self, chromosome):
self.chromosome = chromosome
self.fitness = self.cal_fitness()
@classmethod
def mutated_genes(cls):
global GENES
gene = random.choice(GENES)
return gene
@classmethod
def create_gnome(cls):
gnome_len = 64
return [cls.mutated_genes() for _ in range(gnome_len)]
def mate(self, par2):
child_chromosome = []
for gp1, gp2 in zip(self.chromosome, par2.chromosome):
prob = random.random()
if prob < 0.45:
child_chromosome.append(gp1)
elif prob < 0.90:
child_chromosome.append(gp2)
else:
child_chromosome.append(self.mutated_genes())
return Individual(child_chromosome)
def get_chromosome_value(self, position):
if self.chromosome[16 * (position - 1)] == '1':
return -1 * int(''.join(self.chromosome[16 * (position - 1) + 1:position * 16 - 8]), 2) + int(''.join(self.chromosome[position * 16 - 8:position * 16]), 2) / 256
return int(''.join(self.chromosome[16 * (position - 1) + 1:position * 16 - 8]), 2) + int(''.join(self.chromosome[position * 16 - 8:position * 16]), 2) / 256
def cal_fitness(self):
sum = 0
sum += abs(func1(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
sum += abs(func2(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
sum += abs(func3(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
sum += abs(func4(self.get_chromosome_value(1), self.get_chromosome_value(2), self.get_chromosome_value(3),
self.get_chromosome_value(4)))
return sum
def main():
global POPULATION_SIZE
generation = 1
found = False
population = []
for _ in range(POPULATION_SIZE):
gnome = Individual.create_gnome()
population.append(Individual(gnome))
while not found and generation < 1000:
population = sorted(population, key=lambda x: x.fitness)
if population[0].fitness <= 0:
break
new_generation = []
s = int((10 * POPULATION_SIZE) / 100)
new_generation.extend(population[:s])
s = int((90 * POPULATION_SIZE) / 100)
for _ in range(s):
parent1 = random.choice(population[:50])
parent2 = random.choice(population[:50])
child = parent1.mate(parent2)
new_generation.append(child)
population = new_generation
print("Generation: {}\tString: {}\tFitness: {}". \
format(generation,
"".join(population[0].chromosome),
population[0].fitness))
generation += 1
# print(population[0].chromosome)
f1 = abs(func1(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
f2 = abs(func2(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
f3 = abs(func3(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
f4 = abs(func4(population[0].get_chromosome_value(1), population[0].get_chromosome_value(2),
population[0].get_chromosome_value(3), population[0].get_chromosome_value(4)))
print("x: %.4f" % population[0].get_chromosome_value(1), "y: %.4f" % population[0].get_chromosome_value(2),
"z: %.4f" % population[0].get_chromosome_value(3), "w: %.4f" % population[0].get_chromosome_value(4))
print(f1, f2, f3, f4, f1 + f2 + f3 + f4)
print("Generation: {}\tString: {}\tFitness: {}". \
format(generation,
"".join(population[0].chromosome),
"{:.8f}".format(population[0].fitness)))
if __name__ == '__main__':
main()
| en | 0.226516 | # print(population[0].chromosome) | 2.96425 | 3 |
src/monitor.py | NikolayStrekalov/pymon2 | 1 | 6630180 | <gh_stars>1-10
# coding: utf-8
import yaml
from tasks import PeriodicTask
from utils import execute_cmd
from reports import CheckResult, CheckResultCode, create_reporters
class ShellChecker(object):
CODE_MAP = {
'0': CheckResultCode.SUCCESS,
'1': CheckResultCode.INFO,
'2': CheckResultCode.FAILURE,
}
def __init__(self, name, shell_cmd, reporters):
self._name = name
self._reporters = reporters
self._shell_cmd = shell_cmd
def run_check(self):
output = execute_cmd(self.shell_cmd).strip()
result = self.check(output)
self.report(result)
def check(self, output):
message_index = output.find(';')
data_index = output.find(';', message_index + 1)
data_index = data_index if data_index > 0 else len(output)
message = output[message_index + 1:data_index]
data = output[data_index + 1:]
code = self.CODE_MAP.get(output[:message_index], None)
if code is not None:
if code == CheckResultCode.FAILURE:
print 'FAIL', output
return CheckResult(self.name, message, data, code)
else:
# unknown check result
return None
def report(self, result):
for reporter in self.reporters:
reporter.report(result)
@property
def reporters(self):
return self._reporters
@property
def name(self):
return self._name
@property
def shell_cmd(self):
return self._shell_cmd
class ShellMonitor(PeriodicTask):
def __init__(self, period, name, shell_cmd, reporters):
super(ShellMonitor, self).__init__()
self._period = period
self._checker = ShellChecker(name, shell_cmd, reporters)
def start(self):
super(ShellMonitor, self).start(self._period)
def task(self):
self._checker.run_check()
def create_monitor(check, reporters):
reporters_for_check = [reporters[name] for name in check['reporters']]
return ShellMonitor(
check['period'], check['name'], check['command'],
reporters_for_check,
)
def monitors_from_yaml(filename):
with open(filename) as f:
config = yaml.safe_load(f)
reporter_configs = config.get('reporters')
if not reporter_configs:
raise ValueError('reporter_configs must be specified')
reporters = create_reporters(reporter_configs)
return [
create_monitor(check, reporters) for check in config['checks']
]
| # coding: utf-8
import yaml
from tasks import PeriodicTask
from utils import execute_cmd
from reports import CheckResult, CheckResultCode, create_reporters
class ShellChecker(object):
CODE_MAP = {
'0': CheckResultCode.SUCCESS,
'1': CheckResultCode.INFO,
'2': CheckResultCode.FAILURE,
}
def __init__(self, name, shell_cmd, reporters):
self._name = name
self._reporters = reporters
self._shell_cmd = shell_cmd
def run_check(self):
output = execute_cmd(self.shell_cmd).strip()
result = self.check(output)
self.report(result)
def check(self, output):
message_index = output.find(';')
data_index = output.find(';', message_index + 1)
data_index = data_index if data_index > 0 else len(output)
message = output[message_index + 1:data_index]
data = output[data_index + 1:]
code = self.CODE_MAP.get(output[:message_index], None)
if code is not None:
if code == CheckResultCode.FAILURE:
print 'FAIL', output
return CheckResult(self.name, message, data, code)
else:
# unknown check result
return None
def report(self, result):
for reporter in self.reporters:
reporter.report(result)
@property
def reporters(self):
return self._reporters
@property
def name(self):
return self._name
@property
def shell_cmd(self):
return self._shell_cmd
class ShellMonitor(PeriodicTask):
def __init__(self, period, name, shell_cmd, reporters):
super(ShellMonitor, self).__init__()
self._period = period
self._checker = ShellChecker(name, shell_cmd, reporters)
def start(self):
super(ShellMonitor, self).start(self._period)
def task(self):
self._checker.run_check()
def create_monitor(check, reporters):
reporters_for_check = [reporters[name] for name in check['reporters']]
return ShellMonitor(
check['period'], check['name'], check['command'],
reporters_for_check,
)
def monitors_from_yaml(filename):
with open(filename) as f:
config = yaml.safe_load(f)
reporter_configs = config.get('reporters')
if not reporter_configs:
raise ValueError('reporter_configs must be specified')
reporters = create_reporters(reporter_configs)
return [
create_monitor(check, reporters) for check in config['checks']
] | en | 0.661119 | # coding: utf-8 # unknown check result | 2.461757 | 2 |
widgets/src/widgets/imageswitch/widget.py | builder08/enigma2-plugins_2 | 0 | 6630181 | from __future__ import print_function
from Plugins.Extensions.Widgets.Widget import Widget
from enigma import ePicLoad, ePixmap, getDesktop, eTimer
from Components.Pixmap import Pixmap
from twisted.web.client import downloadPage
from urllib import quote_plus
from os import remove as os_remove, mkdir as os_mkdir
from os.path import isdir as os_path_isdir, isfile as os_isfile
from Components.AVSwitch import AVSwitch
def getAspect():
val = AVSwitch().getAspectRatioSetting()
if val == 0 or val == 1:
r = (5 * 576, 4 * 720)
elif val == 2 or val == 3 or val == 6:
r = (16 * 720, 9 * 1280)
elif val == 4 or val == 5:
r = (16 * 576, 10 * 720)
return r
class WebPixmap(Pixmap):
def __init__(self, default=None):
Pixmap.__init__(self)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.setPixmapCB)
self.cachedir = "/tmp/"
self.default = default
def onShow(self):
Pixmap.onShow(self)
sc = getAspect()
resize = 1
background = '#ff000000'
self.picload.setPara((self.instance.size().width(), self.instance.size().height(), sc[0], sc[1], False, resize, background))
def load(self, url=None):
tmpfile = ''.join((self.cachedir, quote_plus(url), ''))
if os_path_isdir(self.cachedir) is False:
print("cachedir not existing, creating it")
os_mkdir(self.cachedir)
if os_isfile(tmpfile):
self.tmpfile = tmpfile
self.onLoadFinished(None)
elif url is not None:
self.tmpfile = tmpfile
head = {}
agt = "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.2) Gecko/2008091620 Firefox/3.0.2"
downloadPage(url, self.tmpfile, headers=head, agent=agt).addCallback(self.onLoadFinished).addErrback(self.onLoadFailed)
elif self.default:
self.picload.startDecode(self.default)
def onLoadFinished(self, result):
self.picload.startDecode(self.tmpfile)
def onLoadFailed(self, error):
print("WebPixmap:onLoadFAILED", error)
if self.default and self.instance:
print("showing 404", self.default)
self.picload.startDecode(self.default)
if os_isfile(self.tmpfile):
os_remove(self.tmpfile)
def setPixmapCB(self, picInfo=None):
if os_isfile(self.tmpfile):
os_remove(self.tmpfile)
ptr = self.picload.getData()
if ptr and self.instance:
self.instance.setPixmap(ptr)
class ImageswitchWidget(Widget):
def __init__(self, session):
Widget.__init__(self, session, name="Image Switch Widget", description="Example of a simple Widget images from the web", version="0.1", author="3c5x9", homepage="cvs://schwerkraft")
self.elements["imageswitch_pixmap"] = WebPixmap()
self.Timer = eTimer()
self.Timer.callback.append(self.TimerFire)
self.last = False
def onLoadFinished(self, instance):
self.instance = instance
self.TimerFire()
def onClose(self):
self.Timer.stop()
def TimerFire(self):
if self.last:
self.getElement("imageswitch_pixmap").load("http://www.google.de/intl/de_de/images/logo.gif")
self.last = False
else:
self.getElement("imageswitch_pixmap").load("http://maps.google.de/intl/de_de/images/maps_small_horizontal_logo.png")
self.last = True
self.Timer.start(5000)
def get_widget(session):
return ImageswitchWidget(session)
| from __future__ import print_function
from Plugins.Extensions.Widgets.Widget import Widget
from enigma import ePicLoad, ePixmap, getDesktop, eTimer
from Components.Pixmap import Pixmap
from twisted.web.client import downloadPage
from urllib import quote_plus
from os import remove as os_remove, mkdir as os_mkdir
from os.path import isdir as os_path_isdir, isfile as os_isfile
from Components.AVSwitch import AVSwitch
def getAspect():
val = AVSwitch().getAspectRatioSetting()
if val == 0 or val == 1:
r = (5 * 576, 4 * 720)
elif val == 2 or val == 3 or val == 6:
r = (16 * 720, 9 * 1280)
elif val == 4 or val == 5:
r = (16 * 576, 10 * 720)
return r
class WebPixmap(Pixmap):
def __init__(self, default=None):
Pixmap.__init__(self)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.setPixmapCB)
self.cachedir = "/tmp/"
self.default = default
def onShow(self):
Pixmap.onShow(self)
sc = getAspect()
resize = 1
background = '#ff000000'
self.picload.setPara((self.instance.size().width(), self.instance.size().height(), sc[0], sc[1], False, resize, background))
def load(self, url=None):
tmpfile = ''.join((self.cachedir, quote_plus(url), ''))
if os_path_isdir(self.cachedir) is False:
print("cachedir not existing, creating it")
os_mkdir(self.cachedir)
if os_isfile(tmpfile):
self.tmpfile = tmpfile
self.onLoadFinished(None)
elif url is not None:
self.tmpfile = tmpfile
head = {}
agt = "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.2) Gecko/2008091620 Firefox/3.0.2"
downloadPage(url, self.tmpfile, headers=head, agent=agt).addCallback(self.onLoadFinished).addErrback(self.onLoadFailed)
elif self.default:
self.picload.startDecode(self.default)
def onLoadFinished(self, result):
self.picload.startDecode(self.tmpfile)
def onLoadFailed(self, error):
print("WebPixmap:onLoadFAILED", error)
if self.default and self.instance:
print("showing 404", self.default)
self.picload.startDecode(self.default)
if os_isfile(self.tmpfile):
os_remove(self.tmpfile)
def setPixmapCB(self, picInfo=None):
if os_isfile(self.tmpfile):
os_remove(self.tmpfile)
ptr = self.picload.getData()
if ptr and self.instance:
self.instance.setPixmap(ptr)
class ImageswitchWidget(Widget):
def __init__(self, session):
Widget.__init__(self, session, name="Image Switch Widget", description="Example of a simple Widget images from the web", version="0.1", author="3c5x9", homepage="cvs://schwerkraft")
self.elements["imageswitch_pixmap"] = WebPixmap()
self.Timer = eTimer()
self.Timer.callback.append(self.TimerFire)
self.last = False
def onLoadFinished(self, instance):
self.instance = instance
self.TimerFire()
def onClose(self):
self.Timer.stop()
def TimerFire(self):
if self.last:
self.getElement("imageswitch_pixmap").load("http://www.google.de/intl/de_de/images/logo.gif")
self.last = False
else:
self.getElement("imageswitch_pixmap").load("http://maps.google.de/intl/de_de/images/maps_small_horizontal_logo.png")
self.last = True
self.Timer.start(5000)
def get_widget(session):
return ImageswitchWidget(session)
| none | 1 | 1.980533 | 2 |
|
lib/galaxy/datatypes/msa.py | lesperry/Metagenomics | 0 | 6630182 | import abc
import logging
import os
import re
from galaxy.datatypes.binary import Binary
from galaxy.datatypes.data import get_file_peek, Text
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import build_sniff_from_prefix
from galaxy.datatypes.util import generic_util
from galaxy.util import (
nice_size,
unicodify,
)
log = logging.getLogger(__name__)
STOCKHOLM_SEARCH_PATTERN = re.compile(r'#\s+STOCKHOLM\s+1\.0')
@build_sniff_from_prefix
class InfernalCM(Text):
file_ext = "cm"
MetadataElement(name="number_of_models", default=0, desc="Number of covariance models",
readonly=True, visible=True, optional=True, no_value=0)
MetadataElement(name="cm_version", default="1/a", desc="Infernal Covariance Model version",
readonly=True, visible=True, optional=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
if dataset.metadata.number_of_models == 1:
dataset.blurb = "1 model"
else:
dataset.blurb = "%s models" % dataset.metadata.number_of_models
dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff_prefix(self, file_prefix):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'infernal_model.cm' )
>>> InfernalCM().sniff( fname )
True
>>> fname = get_test_fname( '2.txt' )
>>> InfernalCM().sniff( fname )
False
"""
return file_prefix.startswith("INFERNAL")
def set_meta(self, dataset, **kwd):
"""
Set the number of models and the version of CM file in dataset.
"""
dataset.metadata.number_of_models = generic_util.count_special_lines('^INFERNAL', dataset.file_name)
with open(dataset.file_name) as f:
first_line = f.readline()
if first_line.startswith("INFERNAL"):
dataset.metadata.cm_version = (first_line.split()[0]).replace('INFERNAL', '')
@build_sniff_from_prefix
class Hmmer(Text):
edam_data = "data_1364"
edam_format = "format_1370"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "HMMER Database"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "HMMER database (%s)" % (nice_size(dataset.get_size()))
@abc.abstractmethod
def sniff_prefix(self, filename):
raise NotImplementedError
class Hmmer2(Hmmer):
edam_format = "format_3328"
file_ext = "hmm2"
def sniff_prefix(self, file_prefix):
"""HMMER2 files start with HMMER2.0
"""
return file_prefix.startswith('HMMER2.0')
class Hmmer3(Hmmer):
edam_format = "format_3329"
file_ext = "hmm3"
def sniff_prefix(self, file_prefix):
"""HMMER3 files start with HMMER3/f
"""
return file_prefix.startswith('HMMER3/f')
class HmmerPress(Binary):
"""Class for hmmpress database files."""
file_ext = 'hmmpress'
allow_datatype_change = False
composite_type = 'basic'
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text."""
if not dataset.dataset.purged:
dataset.peek = "HMMER Binary database"
dataset.blurb = "HMMER Binary database"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
"""Create HTML content, used for displaying peek."""
try:
return dataset.peek
except Exception:
return "HMMER3 database (multiple files)"
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
# Binary model
self.add_composite_file('model.hmm.h3m', is_binary=True)
# SSI index for binary model
self.add_composite_file('model.hmm.h3i', is_binary=True)
# Profiles (MSV part)
self.add_composite_file('model.hmm.h3f', is_binary=True)
# Profiles (remained)
self.add_composite_file('model.hmm.h3p', is_binary=True)
@build_sniff_from_prefix
class Stockholm_1_0(Text):
edam_data = "data_0863"
edam_format = "format_1961"
file_ext = "stockholm"
MetadataElement(name="number_of_models", default=0, desc="Number of multiple alignments", readonly=True, visible=True, optional=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
if (dataset.metadata.number_of_models == 1):
dataset.blurb = "1 alignment"
else:
dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
dataset.peek = get_file_peek(dataset.file_name)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff_prefix(self, file_prefix):
return file_prefix.search(STOCKHOLM_SEARCH_PATTERN)
def set_meta(self, dataset, **kwd):
"""
Set the number of models in dataset.
"""
dataset.metadata.number_of_models = generic_util.count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', dataset.file_name)
def split(cls, input_datasets, subdir_generator_function, split_params):
"""
Split the input files by model records.
"""
if split_params is None:
return None
if len(input_datasets) > 1:
raise Exception("STOCKHOLM-file splitting does not support multiple files")
input_files = [ds.file_name for ds in input_datasets]
chunk_size = None
if split_params['split_mode'] == 'number_of_parts':
raise Exception('Split mode "%s" is currently not implemented for STOCKHOLM-files.' % split_params['split_mode'])
elif split_params['split_mode'] == 'to_size':
chunk_size = int(split_params['split_size'])
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
def _read_stockholm_records(filename):
lines = []
with open(filename) as handle:
for line in handle:
lines.append(line)
if line.strip() == '//':
yield lines
lines = []
def _write_part_stockholm_file(accumulated_lines):
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_files[0]))
with open(part_path, 'w') as part_file:
part_file.writelines(accumulated_lines)
try:
stockholm_records = _read_stockholm_records(input_files[0])
stockholm_lines_accumulated = []
for counter, stockholm_record in enumerate(stockholm_records, start=1):
stockholm_lines_accumulated.extend(stockholm_record)
if counter % chunk_size == 0:
_write_part_stockholm_file(stockholm_lines_accumulated)
stockholm_lines_accumulated = []
if stockholm_lines_accumulated:
_write_part_stockholm_file(stockholm_lines_accumulated)
except Exception as e:
log.error('Unable to split files: %s', unicodify(e))
raise
split = classmethod(split)
@build_sniff_from_prefix
class MauveXmfa(Text):
file_ext = "xmfa"
MetadataElement(name="number_of_models", default=0, desc="Number of alignmened sequences", readonly=True, visible=True, optional=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
if (dataset.metadata.number_of_models == 1):
dataset.blurb = "1 alignment"
else:
dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
dataset.peek = get_file_peek(dataset.file_name)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff_prefix(self, file_prefix):
return file_prefix.startswith('#FormatVersion Mauve1')
def set_meta(self, dataset, **kwd):
dataset.metadata.number_of_models = generic_util.count_special_lines('^#Sequence([[:digit:]]+)Entry', dataset.file_name)
| import abc
import logging
import os
import re
from galaxy.datatypes.binary import Binary
from galaxy.datatypes.data import get_file_peek, Text
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import build_sniff_from_prefix
from galaxy.datatypes.util import generic_util
from galaxy.util import (
nice_size,
unicodify,
)
log = logging.getLogger(__name__)
STOCKHOLM_SEARCH_PATTERN = re.compile(r'#\s+STOCKHOLM\s+1\.0')
@build_sniff_from_prefix
class InfernalCM(Text):
file_ext = "cm"
MetadataElement(name="number_of_models", default=0, desc="Number of covariance models",
readonly=True, visible=True, optional=True, no_value=0)
MetadataElement(name="cm_version", default="1/a", desc="Infernal Covariance Model version",
readonly=True, visible=True, optional=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
if dataset.metadata.number_of_models == 1:
dataset.blurb = "1 model"
else:
dataset.blurb = "%s models" % dataset.metadata.number_of_models
dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff_prefix(self, file_prefix):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'infernal_model.cm' )
>>> InfernalCM().sniff( fname )
True
>>> fname = get_test_fname( '2.txt' )
>>> InfernalCM().sniff( fname )
False
"""
return file_prefix.startswith("INFERNAL")
def set_meta(self, dataset, **kwd):
"""
Set the number of models and the version of CM file in dataset.
"""
dataset.metadata.number_of_models = generic_util.count_special_lines('^INFERNAL', dataset.file_name)
with open(dataset.file_name) as f:
first_line = f.readline()
if first_line.startswith("INFERNAL"):
dataset.metadata.cm_version = (first_line.split()[0]).replace('INFERNAL', '')
@build_sniff_from_prefix
class Hmmer(Text):
edam_data = "data_1364"
edam_format = "format_1370"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "HMMER Database"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "HMMER database (%s)" % (nice_size(dataset.get_size()))
@abc.abstractmethod
def sniff_prefix(self, filename):
raise NotImplementedError
class Hmmer2(Hmmer):
edam_format = "format_3328"
file_ext = "hmm2"
def sniff_prefix(self, file_prefix):
"""HMMER2 files start with HMMER2.0
"""
return file_prefix.startswith('HMMER2.0')
class Hmmer3(Hmmer):
edam_format = "format_3329"
file_ext = "hmm3"
def sniff_prefix(self, file_prefix):
"""HMMER3 files start with HMMER3/f
"""
return file_prefix.startswith('HMMER3/f')
class HmmerPress(Binary):
"""Class for hmmpress database files."""
file_ext = 'hmmpress'
allow_datatype_change = False
composite_type = 'basic'
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text."""
if not dataset.dataset.purged:
dataset.peek = "HMMER Binary database"
dataset.blurb = "HMMER Binary database"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
"""Create HTML content, used for displaying peek."""
try:
return dataset.peek
except Exception:
return "HMMER3 database (multiple files)"
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
# Binary model
self.add_composite_file('model.hmm.h3m', is_binary=True)
# SSI index for binary model
self.add_composite_file('model.hmm.h3i', is_binary=True)
# Profiles (MSV part)
self.add_composite_file('model.hmm.h3f', is_binary=True)
# Profiles (remained)
self.add_composite_file('model.hmm.h3p', is_binary=True)
@build_sniff_from_prefix
class Stockholm_1_0(Text):
edam_data = "data_0863"
edam_format = "format_1961"
file_ext = "stockholm"
MetadataElement(name="number_of_models", default=0, desc="Number of multiple alignments", readonly=True, visible=True, optional=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
if (dataset.metadata.number_of_models == 1):
dataset.blurb = "1 alignment"
else:
dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
dataset.peek = get_file_peek(dataset.file_name)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff_prefix(self, file_prefix):
return file_prefix.search(STOCKHOLM_SEARCH_PATTERN)
def set_meta(self, dataset, **kwd):
"""
Set the number of models in dataset.
"""
dataset.metadata.number_of_models = generic_util.count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', dataset.file_name)
def split(cls, input_datasets, subdir_generator_function, split_params):
"""
Split the input files by model records.
"""
if split_params is None:
return None
if len(input_datasets) > 1:
raise Exception("STOCKHOLM-file splitting does not support multiple files")
input_files = [ds.file_name for ds in input_datasets]
chunk_size = None
if split_params['split_mode'] == 'number_of_parts':
raise Exception('Split mode "%s" is currently not implemented for STOCKHOLM-files.' % split_params['split_mode'])
elif split_params['split_mode'] == 'to_size':
chunk_size = int(split_params['split_size'])
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
def _read_stockholm_records(filename):
lines = []
with open(filename) as handle:
for line in handle:
lines.append(line)
if line.strip() == '//':
yield lines
lines = []
def _write_part_stockholm_file(accumulated_lines):
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_files[0]))
with open(part_path, 'w') as part_file:
part_file.writelines(accumulated_lines)
try:
stockholm_records = _read_stockholm_records(input_files[0])
stockholm_lines_accumulated = []
for counter, stockholm_record in enumerate(stockholm_records, start=1):
stockholm_lines_accumulated.extend(stockholm_record)
if counter % chunk_size == 0:
_write_part_stockholm_file(stockholm_lines_accumulated)
stockholm_lines_accumulated = []
if stockholm_lines_accumulated:
_write_part_stockholm_file(stockholm_lines_accumulated)
except Exception as e:
log.error('Unable to split files: %s', unicodify(e))
raise
split = classmethod(split)
@build_sniff_from_prefix
class MauveXmfa(Text):
file_ext = "xmfa"
MetadataElement(name="number_of_models", default=0, desc="Number of alignmened sequences", readonly=True, visible=True, optional=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
if (dataset.metadata.number_of_models == 1):
dataset.blurb = "1 alignment"
else:
dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
dataset.peek = get_file_peek(dataset.file_name)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff_prefix(self, file_prefix):
return file_prefix.startswith('#FormatVersion Mauve1')
def set_meta(self, dataset, **kwd):
dataset.metadata.number_of_models = generic_util.count_special_lines('^#Sequence([[:digit:]]+)Entry', dataset.file_name)
| en | 0.576926 | >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( 'infernal_model.cm' ) >>> InfernalCM().sniff( fname ) True >>> fname = get_test_fname( '2.txt' ) >>> InfernalCM().sniff( fname ) False Set the number of models and the version of CM file in dataset. HMMER2 files start with HMMER2.0 HMMER3 files start with HMMER3/f Class for hmmpress database files. Set the peek and blurb text. Create HTML content, used for displaying peek. # Binary model # SSI index for binary model # Profiles (MSV part) # Profiles (remained) Set the number of models in dataset. #[[:space:]+]STOCKHOLM[[:space:]+]1.0', dataset.file_name) Split the input files by model records. #Sequence([[:digit:]]+)Entry', dataset.file_name) | 2.001092 | 2 |
navycut/core/app_config.py | FlaskAio/navycut | 4 | 6630183 | <gh_stars>1-10
from flask import Blueprint
from flask_express import FlaskExpress
from flask_bootstrap import Bootstrap
from importlib import import_module
from werkzeug.routing import RequestRedirect
from werkzeug.exceptions import MethodNotAllowed, NotFound
from ._serving import run_simple_wsgi
from ..http.request import Request
from ..http.response import Response
from ..errors.misc import ImportNameNotFoundError
from ..urls import MethodView, url
from ..utils import path
from ..utils.tools import snake_to_camel_case
import typing as t
if t.TYPE_CHECKING:
from ..middleware import MiddlewareMixin
from .. import urls
_basedir = path.abspath(__file__).parent.parent
class _BaseIndexView(MethodView):
"""
The default index view for a navycut project.
"""
def get(self):
return self.render("_index.html")
class Navycut(FlaskExpress):
"""
The base class of navycut project.
It's basically inheritaing the services from the class Flask.
We have customized some the core flask features
to provide this huge and fullstack service.
"""
request_class = Request
response_class = Response
def __init__(self):
super(Navycut, self).__init__("app_default_name",
template_folder=_basedir / 'templates',
static_folder=str(_basedir / "static"),
static_url_path="/static")
self.__registeredSisterName:t.List['str'] = []
def _attach_settings_modules(self):
"""
attach all the available and required
settings features with the core app.
"""
from ..conf import settings
self.settings = settings
self._add_config(settings)
self._configure_core_features(settings)
self._perform_sister_registration(settings)
self._import_and_attach_base_urls(settings)
self._perform_middleware_registration(settings)
def _add_config(self, settings) -> None:
"""
add the required and default
configuration with the core app.
:param settings:
the settings object from the project directory.
"""
self.import_name = settings.IMPORT_NAME
self.project_name = settings.PROJECT_NAME
self.config['PROJECT_NAME'] = settings.PROJECT_NAME
self.config['IMPORT_NAME'] = settings.IMPORT_NAME
self.config["BASE_DIR"] = settings.BASE_DIR
self.config['SECRET_KEY'] = settings.SECRET_KEY
self.config['FLASK_ADMIN_FLUID_LAYOUT'] = True
self.config['FLASK_ADMIN_SWATCH'] = 'cerulean'
self.config['SETTINGS'] = settings
self._configure_database(settings)
self._configure_default_mailer(settings)
self.debugging(settings.DEBUG)
if settings.EXTRA_ARGS is not None:
self._add_extra_config(settings)
def _configure_database(self, settings) -> bool:
"""
configure the default database as per the
details provided from settings.py `DATABASE`
:param settings:
the default settings object from the project directory.
"""
db_engine_name:str = settings.DATABASE['engine']
db_engine_file_name, db_engine_type = db_engine_name.rsplit(".", 1)
db_engine_module:t.ModuleType = import_module(db_engine_file_name)
db_engineer:t.Callable[["Navycut", t.Dict[str, str]], None]
db_engineer = getattr(db_engine_module, db_engine_type)
db_engineer(self, settings.DATABASE["creds"])
return True
def _configure_default_mailer(self, settings):
"""
The default config function to take smtp creds
from settings file and attach with the navycut app.
:param settings:
the settings object from the project directory.
"""
self.config['MAIL_SERVER'] = settings.EMAIL_HOST
self.config['MAIL_PORT'] = settings.EMAIL_PORT
self.config['MAIL_USE_TLS'] = settings.EMAIL_USE_TLS
self.config['MAIL_USE_SSL'] = settings.EMAIL_USE_SSL
self.config['MAIL_USERNAME'] = settings.EMAIL_HOST_USER
self.config['MAIL_PASSWORD'] = settings.EMAIL_HOST_PASSWORD
self.config['MAIL_TIMEOUT'] = settings.EMAIL_TIMEOUT
self.config['MAIL_SSL_KEYFILE'] = settings.EMAIL_SSL_KEYFILE
self.config['MAIL_SSL_CERTFILE'] = settings.EMAIL_SSL_CERTFILE
self.config['MAIL_DEFAULT_SENDER'] = settings.DEFAULT_FROM_EMAIL
self.config['MAIL_BACKEND'] = settings.EMAIL_BACKEND
self.config['MAIL_FILE_PATH'] = settings.EMAIL_FILE_PATH
self.config['MAIL_USE_LOCALTIME'] = settings.EMAIL_USE_LOCALTIME
self.config['MAIL_DEFAULT_CHARSET'] = settings.EMAIL_DEFAULT_CHARSET
def _add_extra_config(self, settings) -> None:
"""
config the extra settings provided
from settings.py - `EXTRA_ARGS`.
:param settings:
the settings object from the project directory.
"""
for key, value in settings.EXTRA_ARGS.items():
self.config[key] = value
def _configure_core_features(self, settings):
"""
add all the core features of navycut app here.
"""
Bootstrap(self)
def _perform_sister_registration(self, settings):
"""
attach the available apps on seetings
file with the core navycut app.
:param settings:
the settings object from the project directory.
"""
self._registerSister(settings.INSTALLED_APPS)
def _perform_middleware_registration(self, settings):
"""
attach the available middlewares on
settings file with the core navycut app.
:param settings:
the settings object from the project directory.
"""
self._registerMiddleware(settings.MIDDLEWARE)
def _get_view_function(self, url, method="GET") -> tuple:
"""
get the view function for a particulat url.
:param url:
the url whose view function you want to find out.
:param method:
the request method. default is `GET`
"""
adapter = self.url_map.bind('0.0.0.0')
try:
match = adapter.match(url, method=method)
except RequestRedirect as e:
# recursively match redirects
return self._get_view_function(e.new_url, method)
except (MethodNotAllowed, NotFound):
# no match
return None
try:
# return the view function and arguments
return self.view_functions[match[0]], match[1]
except KeyError:
# no view is associated with the endpoint
return None
def _has_view_function(self, url, method="GET") -> bool:
"""
check wheather a view fucntion is
present or not for the provided url.
:param url:
the url whose view function you want to check i.e present or not.
:param method:
the request method. default is `GET`
"""
res = self._get_view_function(url, method)
if res:
return True
else:
return False
def _configure_default_index(self):
"""
If the project directory dosen't contain the view
for the index page and teh debug is in true mode,
then navycut will show a default index page
with the help of this function.
"""
if self.debug is not False and not self._has_view_function("/"):
self.add_url_rule(rule="/", view_func=_BaseIndexView.as_view("index"), methods=['GET'])
else:
pass
def initIns(self, ins) -> bool:
"""
initialize the extra instances with the core app.
:param ins:
extra instance object.
"""
ins.init_app(self)
return True
def __is_sister_name_registered(self, str_sister_name:str) -> bool:
"""
This function will return True if the provided
sister is registered. Else return False.
:param `str_sister_name`: The name of the app sister.
"""
return True if str_sister_name in self.__registeredSisterName else False
def _get_proper_sister_name(self, sister_name:str) -> t.Optional[str]:
"""
This function will return the full name of the app sister.
:param `sister_name`: The name of the app sister.
"""
try:
if not sister_name.endswith("Sister"):
_pure_sister_name = sister_name.rsplit(".", 1)
if len(_pure_sister_name) == 1:
_pure_sister_name = _pure_sister_name[0]
else:
_pure_sister_name = _pure_sister_name[1]
sister_name = f"{sister_name}.sister.{snake_to_camel_case(_pure_sister_name)}Sister"
return sister_name
except:
return None
def _import_sister(self, sister_name:str) -> "AppSister":
"""
import the app by app_name.
:param app_name:
string type full name fo the app.
"""
try:
sister_name = self._get_proper_sister_name(sister_name)
sister_location, sister_class_name = sister_name.rsplit(".", 1)
sister_file = import_module(sister_location)
real_sister_class = getattr(sister_file, sister_class_name)
sister:"AppSister" = real_sister_class()
if getattr(sister, "import_name", None) is None:
sister.import_name = sister_file.__name__
return sister
except Exception as e:
raise AttributeError(f"{sister_name} not installed at {self.config.get('BASE_DIR')}. Dobule check the app name. is it really {sister_name} ?") from e
def _registerSister(self, _sisters:list):
"""
register all the sister apps present in the settings.py - `INSTALLED_APPS`.
:param _sisters:
the list containing the name of the apps.
"""
## here i need to add the urls too.
for str_sister in _sisters:
sister:t.Type["AppSister"] = self._import_sister(str_sister)
self.__registeredSisterName.append(sister.name)
sister.init() #init the core features of the sister app.
sister_power:t.Type["Blueprint"] = sister.get_sister_power()
if sister_power is not None:
self.register_blueprint(sister_power, url_prefix=sister.url_prefix)
def _import_middleware(self, mw_name:str) -> t.Type["MiddlewareMixin"]:
"""
import the middleware by middleware name.
:param mw_name:
teh str name of teh middleware, want to import.
"""
mw_file, mw_class_name = tuple(mw_name.rsplit(".", 1))
mw_module = import_module(mw_file)
real_mw_class = getattr(mw_module, mw_class_name)
return real_mw_class
def _registerMiddleware(self, _mwList:t.List["str"]):
"""
register all the middlewares present at settings.py - `MIDDLEWARE`.
:param _mwList:
the list containing the name of the middlewares.
"""
for middleware in _mwList:
mw_class:t.Type["MiddlewareMixin"] = self._import_middleware(middleware)
mw_maker = getattr(mw_class, "__maker__")
mw_maker() # attach the request, response object with the middleware function.
self.before_first_request_funcs\
.append(mw_class._before_first_request)
self.after_request_funcs\
.setdefault(None, [])\
.append(mw_class._after_request)
self.before_request_funcs\
.setdefault(None, [])\
.append(mw_class._before_request)
self.teardown_request_funcs\
.setdefault(None, [])\
.append(mw_class._teardown_request)
def _import_and_attach_base_urls(self, settings) -> None:
"""
add the base urls to the app.
:param settings:
the settings object from the project directory.
"""
url_file:str = f"{settings.PROJECT_NAME}.urls"
urls:t.ModuleType = import_module(url_file)
urlpatterns = getattr(urls, 'urlpatterns')
self._add_url_pattern(urlpatterns)
return None
def _add_url_pattern(
self,
pattern_list:list,
) -> None:
"""
add the url pattern with the blueprint power object.
:param pattern_list:
the url_pattern list.
"""
methods=['GET','PUT', 'DELETE', 'POST', 'HEAD', 'OPTIONS']
for url_path in pattern_list:
if repr(url_path).startswith("path"):
self.add_url_rule(
rule=url_path.url_rule,
view_func=url_path.views.as_view(url_path.name),
methods=methods
)
elif repr(url_path).startswith("url"):
self.add_url_rule(
rule=url_path.url_rule,
endpoint= url_path.name,
view_func=url_path.views,
methods=methods
)
elif repr(url_path).startswith("include"):
if self.__is_sister_name_registered(url_path.sister_name) is True:
self._add_url_pattern(url_path.urlpatterns)
else:
pass
def debugging(self, flag:bool) -> None:
"""
to change the debugging mode of the core app
then please play with this function.
make it `False` for production use.
:param flag:
your desired state of the app's debug feature.
"""
self.debug = flag
self.config['DEBUG'] =flag
def run(self, host:str="0.0.0.0", port:int=8888, **options) -> None:
return self.run_wsgi(host, port, **options)
def run_wsgi(self, host:str, port:int, **options) -> None:
"""
run the default wsgi server.
:param host:
the default hostname to run the interactive server.
:param port:
the default port number to run the interactive server.
:param options:
other kwargs type vaule to provide more
options to the werkzeug run_simple server.
"""
self._configure_default_index()
use_reloader = use_debugger = self.config['DEBUG']
options.setdefault("threaded", True)
run_simple_wsgi(
host,
port,
self,
use_reloader=use_reloader,
use_debugger=use_debugger,
**options
)
def __repr__(self) -> str:
"""
The representation of the Navycut class.
"""
return self.import_name
class AppSister:
"""
The default class to create the
sister(side) app for navycut core app.
supported params are:
:param import_app_feature:
:type bool:
Default is False. If True then the app
will try to import the default fetaures, i.e admin and models.
:param import_name:
:type t.Optional[str]:
the import_name parameter for the sister's blueprint object.
:param name:
:type t.Optional[str]:
the name parameter for the sister's blueprint object.
This is required if you turn the `import_app_feature` to True.
:param template_folder:
:type t.Optional[str]:
define the template folder for the sister app.
:param static_folder:
:type t.Optional[str]:
define the static folder for the sister app.
:param static_url_path:
:type t.Optional[str]:
define the url path for the static files.
:param url_prefix:
:type t.Optional[str]:
url_prefix for all the routes of a sister app.
:param extra_ins:
:type t.Optional[t.Tuple[object]]:
provide extra instances to init with main navycut app.
:param seize_power:
:type bool:
default is True, if True then the sister app will
not create any blueprint object. Please turn it to
False if you don't want to add any url patterns with your sister app.
:for example::
from navycut.core import AppSister
from .urls import urlpatterns
class CustomSister(AppSister):
import_name = __name__
name = "custom"
...
"""
import_app_feature:bool = False
import_name: t.Optional[str] = None
name: t.Optional[str] = None
template_folder: t.Optional[str] = None
static_folder: t.Optional[str] = None
static_url_path: t.Optional[str] = None
extra_ins:t.Optional[t.Tuple[object]] = None
url_prefix:t.Optional[str] = None
seize_power:bool = True
def init(self, **kwargs) -> None:
"""
start initializing the sister app features.
"""
from navycut.conf import settings
if self.import_name is None:
raise ImportNameNotFoundError("app_register")
if self.name is None:
self.name = "_".join(self.import_name.split("."))
if self.template_folder is not None:
kwargs.update(dict(template_folder=self.template_folder,))
else:
kwargs.update(dict(template_folder=settings.TEMPLATE_DIR))
if self.static_folder is not None:
kwargs.update(dict(static_folder=self.static_folder,))
if self.static_url_path is not None:
kwargs.update(dict(static_url_path=self.static_url_path,))
if self.url_prefix is not None:
kwargs.update(dict(url_prefix=self.url_prefix,))
if self.extra_ins is not None:
for ins in self.extra_ins:
app.initIns(ins)
if self.import_app_feature is True:
self.import_app_features()
# The default blueprint object for each sister app
self.power:t.Optional["Blueprint"] = self._create_power_object(**kwargs)
def _create_power_object(self, **kwargs) -> t.Optional["Blueprint"]:
if self.seize_power is not True:
power = Blueprint(self.name, self.import_name, **kwargs)
return power
else:
return None
def get_sister_power(self) -> t.Optional[Blueprint]:
"""
return the default blueprint
object(power) for the selected sister app.
"""
return self.power
def import_app_features(self) -> None:
"""
To use this feature you must need to set the
value of name variable same as the app name,
otherwise it may not work properly.
"""
import_module(f"{self.name}.models", package=None)
import_module(f"{self.name}.admin", package=None)
def register_blueprint(self, *wargs, **kwargs) -> None:
"""
register extra blueprints with the coer app.
"""
app.register_blueprint(*wargs, **kwargs)
def __repr__(self):
"""
the representation of the AppSister class
"""
return f"<AppSister '{self.name}'>"
"""
create the default navycut app here.
"""
app:Navycut = Navycut() | from flask import Blueprint
from flask_express import FlaskExpress
from flask_bootstrap import Bootstrap
from importlib import import_module
from werkzeug.routing import RequestRedirect
from werkzeug.exceptions import MethodNotAllowed, NotFound
from ._serving import run_simple_wsgi
from ..http.request import Request
from ..http.response import Response
from ..errors.misc import ImportNameNotFoundError
from ..urls import MethodView, url
from ..utils import path
from ..utils.tools import snake_to_camel_case
import typing as t
if t.TYPE_CHECKING:
from ..middleware import MiddlewareMixin
from .. import urls
_basedir = path.abspath(__file__).parent.parent
class _BaseIndexView(MethodView):
"""
The default index view for a navycut project.
"""
def get(self):
return self.render("_index.html")
class Navycut(FlaskExpress):
"""
The base class of navycut project.
It's basically inheritaing the services from the class Flask.
We have customized some the core flask features
to provide this huge and fullstack service.
"""
request_class = Request
response_class = Response
def __init__(self):
super(Navycut, self).__init__("app_default_name",
template_folder=_basedir / 'templates',
static_folder=str(_basedir / "static"),
static_url_path="/static")
self.__registeredSisterName:t.List['str'] = []
def _attach_settings_modules(self):
"""
attach all the available and required
settings features with the core app.
"""
from ..conf import settings
self.settings = settings
self._add_config(settings)
self._configure_core_features(settings)
self._perform_sister_registration(settings)
self._import_and_attach_base_urls(settings)
self._perform_middleware_registration(settings)
def _add_config(self, settings) -> None:
"""
add the required and default
configuration with the core app.
:param settings:
the settings object from the project directory.
"""
self.import_name = settings.IMPORT_NAME
self.project_name = settings.PROJECT_NAME
self.config['PROJECT_NAME'] = settings.PROJECT_NAME
self.config['IMPORT_NAME'] = settings.IMPORT_NAME
self.config["BASE_DIR"] = settings.BASE_DIR
self.config['SECRET_KEY'] = settings.SECRET_KEY
self.config['FLASK_ADMIN_FLUID_LAYOUT'] = True
self.config['FLASK_ADMIN_SWATCH'] = 'cerulean'
self.config['SETTINGS'] = settings
self._configure_database(settings)
self._configure_default_mailer(settings)
self.debugging(settings.DEBUG)
if settings.EXTRA_ARGS is not None:
self._add_extra_config(settings)
def _configure_database(self, settings) -> bool:
"""
configure the default database as per the
details provided from settings.py `DATABASE`
:param settings:
the default settings object from the project directory.
"""
db_engine_name:str = settings.DATABASE['engine']
db_engine_file_name, db_engine_type = db_engine_name.rsplit(".", 1)
db_engine_module:t.ModuleType = import_module(db_engine_file_name)
db_engineer:t.Callable[["Navycut", t.Dict[str, str]], None]
db_engineer = getattr(db_engine_module, db_engine_type)
db_engineer(self, settings.DATABASE["creds"])
return True
def _configure_default_mailer(self, settings):
"""
The default config function to take smtp creds
from settings file and attach with the navycut app.
:param settings:
the settings object from the project directory.
"""
self.config['MAIL_SERVER'] = settings.EMAIL_HOST
self.config['MAIL_PORT'] = settings.EMAIL_PORT
self.config['MAIL_USE_TLS'] = settings.EMAIL_USE_TLS
self.config['MAIL_USE_SSL'] = settings.EMAIL_USE_SSL
self.config['MAIL_USERNAME'] = settings.EMAIL_HOST_USER
self.config['MAIL_PASSWORD'] = settings.EMAIL_HOST_PASSWORD
self.config['MAIL_TIMEOUT'] = settings.EMAIL_TIMEOUT
self.config['MAIL_SSL_KEYFILE'] = settings.EMAIL_SSL_KEYFILE
self.config['MAIL_SSL_CERTFILE'] = settings.EMAIL_SSL_CERTFILE
self.config['MAIL_DEFAULT_SENDER'] = settings.DEFAULT_FROM_EMAIL
self.config['MAIL_BACKEND'] = settings.EMAIL_BACKEND
self.config['MAIL_FILE_PATH'] = settings.EMAIL_FILE_PATH
self.config['MAIL_USE_LOCALTIME'] = settings.EMAIL_USE_LOCALTIME
self.config['MAIL_DEFAULT_CHARSET'] = settings.EMAIL_DEFAULT_CHARSET
def _add_extra_config(self, settings) -> None:
"""
config the extra settings provided
from settings.py - `EXTRA_ARGS`.
:param settings:
the settings object from the project directory.
"""
for key, value in settings.EXTRA_ARGS.items():
self.config[key] = value
def _configure_core_features(self, settings):
"""
add all the core features of navycut app here.
"""
Bootstrap(self)
def _perform_sister_registration(self, settings):
"""
attach the available apps on seetings
file with the core navycut app.
:param settings:
the settings object from the project directory.
"""
self._registerSister(settings.INSTALLED_APPS)
def _perform_middleware_registration(self, settings):
"""
attach the available middlewares on
settings file with the core navycut app.
:param settings:
the settings object from the project directory.
"""
self._registerMiddleware(settings.MIDDLEWARE)
def _get_view_function(self, url, method="GET") -> tuple:
"""
get the view function for a particulat url.
:param url:
the url whose view function you want to find out.
:param method:
the request method. default is `GET`
"""
adapter = self.url_map.bind('0.0.0.0')
try:
match = adapter.match(url, method=method)
except RequestRedirect as e:
# recursively match redirects
return self._get_view_function(e.new_url, method)
except (MethodNotAllowed, NotFound):
# no match
return None
try:
# return the view function and arguments
return self.view_functions[match[0]], match[1]
except KeyError:
# no view is associated with the endpoint
return None
def _has_view_function(self, url, method="GET") -> bool:
"""
check wheather a view fucntion is
present or not for the provided url.
:param url:
the url whose view function you want to check i.e present or not.
:param method:
the request method. default is `GET`
"""
res = self._get_view_function(url, method)
if res:
return True
else:
return False
def _configure_default_index(self):
"""
If the project directory dosen't contain the view
for the index page and teh debug is in true mode,
then navycut will show a default index page
with the help of this function.
"""
if self.debug is not False and not self._has_view_function("/"):
self.add_url_rule(rule="/", view_func=_BaseIndexView.as_view("index"), methods=['GET'])
else:
pass
def initIns(self, ins) -> bool:
"""
initialize the extra instances with the core app.
:param ins:
extra instance object.
"""
ins.init_app(self)
return True
def __is_sister_name_registered(self, str_sister_name:str) -> bool:
"""
This function will return True if the provided
sister is registered. Else return False.
:param `str_sister_name`: The name of the app sister.
"""
return True if str_sister_name in self.__registeredSisterName else False
def _get_proper_sister_name(self, sister_name:str) -> t.Optional[str]:
"""
This function will return the full name of the app sister.
:param `sister_name`: The name of the app sister.
"""
try:
if not sister_name.endswith("Sister"):
_pure_sister_name = sister_name.rsplit(".", 1)
if len(_pure_sister_name) == 1:
_pure_sister_name = _pure_sister_name[0]
else:
_pure_sister_name = _pure_sister_name[1]
sister_name = f"{sister_name}.sister.{snake_to_camel_case(_pure_sister_name)}Sister"
return sister_name
except:
return None
def _import_sister(self, sister_name:str) -> "AppSister":
"""
import the app by app_name.
:param app_name:
string type full name fo the app.
"""
try:
sister_name = self._get_proper_sister_name(sister_name)
sister_location, sister_class_name = sister_name.rsplit(".", 1)
sister_file = import_module(sister_location)
real_sister_class = getattr(sister_file, sister_class_name)
sister:"AppSister" = real_sister_class()
if getattr(sister, "import_name", None) is None:
sister.import_name = sister_file.__name__
return sister
except Exception as e:
raise AttributeError(f"{sister_name} not installed at {self.config.get('BASE_DIR')}. Dobule check the app name. is it really {sister_name} ?") from e
def _registerSister(self, _sisters:list):
"""
register all the sister apps present in the settings.py - `INSTALLED_APPS`.
:param _sisters:
the list containing the name of the apps.
"""
## here i need to add the urls too.
for str_sister in _sisters:
sister:t.Type["AppSister"] = self._import_sister(str_sister)
self.__registeredSisterName.append(sister.name)
sister.init() #init the core features of the sister app.
sister_power:t.Type["Blueprint"] = sister.get_sister_power()
if sister_power is not None:
self.register_blueprint(sister_power, url_prefix=sister.url_prefix)
def _import_middleware(self, mw_name:str) -> t.Type["MiddlewareMixin"]:
"""
import the middleware by middleware name.
:param mw_name:
teh str name of teh middleware, want to import.
"""
mw_file, mw_class_name = tuple(mw_name.rsplit(".", 1))
mw_module = import_module(mw_file)
real_mw_class = getattr(mw_module, mw_class_name)
return real_mw_class
def _registerMiddleware(self, _mwList:t.List["str"]):
"""
register all the middlewares present at settings.py - `MIDDLEWARE`.
:param _mwList:
the list containing the name of the middlewares.
"""
for middleware in _mwList:
mw_class:t.Type["MiddlewareMixin"] = self._import_middleware(middleware)
mw_maker = getattr(mw_class, "__maker__")
mw_maker() # attach the request, response object with the middleware function.
self.before_first_request_funcs\
.append(mw_class._before_first_request)
self.after_request_funcs\
.setdefault(None, [])\
.append(mw_class._after_request)
self.before_request_funcs\
.setdefault(None, [])\
.append(mw_class._before_request)
self.teardown_request_funcs\
.setdefault(None, [])\
.append(mw_class._teardown_request)
def _import_and_attach_base_urls(self, settings) -> None:
"""
add the base urls to the app.
:param settings:
the settings object from the project directory.
"""
url_file:str = f"{settings.PROJECT_NAME}.urls"
urls:t.ModuleType = import_module(url_file)
urlpatterns = getattr(urls, 'urlpatterns')
self._add_url_pattern(urlpatterns)
return None
def _add_url_pattern(
self,
pattern_list:list,
) -> None:
"""
add the url pattern with the blueprint power object.
:param pattern_list:
the url_pattern list.
"""
methods=['GET','PUT', 'DELETE', 'POST', 'HEAD', 'OPTIONS']
for url_path in pattern_list:
if repr(url_path).startswith("path"):
self.add_url_rule(
rule=url_path.url_rule,
view_func=url_path.views.as_view(url_path.name),
methods=methods
)
elif repr(url_path).startswith("url"):
self.add_url_rule(
rule=url_path.url_rule,
endpoint= url_path.name,
view_func=url_path.views,
methods=methods
)
elif repr(url_path).startswith("include"):
if self.__is_sister_name_registered(url_path.sister_name) is True:
self._add_url_pattern(url_path.urlpatterns)
else:
pass
def debugging(self, flag:bool) -> None:
"""
to change the debugging mode of the core app
then please play with this function.
make it `False` for production use.
:param flag:
your desired state of the app's debug feature.
"""
self.debug = flag
self.config['DEBUG'] =flag
def run(self, host:str="0.0.0.0", port:int=8888, **options) -> None:
return self.run_wsgi(host, port, **options)
def run_wsgi(self, host:str, port:int, **options) -> None:
"""
run the default wsgi server.
:param host:
the default hostname to run the interactive server.
:param port:
the default port number to run the interactive server.
:param options:
other kwargs type vaule to provide more
options to the werkzeug run_simple server.
"""
self._configure_default_index()
use_reloader = use_debugger = self.config['DEBUG']
options.setdefault("threaded", True)
run_simple_wsgi(
host,
port,
self,
use_reloader=use_reloader,
use_debugger=use_debugger,
**options
)
def __repr__(self) -> str:
"""
The representation of the Navycut class.
"""
return self.import_name
class AppSister:
"""
The default class to create the
sister(side) app for navycut core app.
supported params are:
:param import_app_feature:
:type bool:
Default is False. If True then the app
will try to import the default fetaures, i.e admin and models.
:param import_name:
:type t.Optional[str]:
the import_name parameter for the sister's blueprint object.
:param name:
:type t.Optional[str]:
the name parameter for the sister's blueprint object.
This is required if you turn the `import_app_feature` to True.
:param template_folder:
:type t.Optional[str]:
define the template folder for the sister app.
:param static_folder:
:type t.Optional[str]:
define the static folder for the sister app.
:param static_url_path:
:type t.Optional[str]:
define the url path for the static files.
:param url_prefix:
:type t.Optional[str]:
url_prefix for all the routes of a sister app.
:param extra_ins:
:type t.Optional[t.Tuple[object]]:
provide extra instances to init with main navycut app.
:param seize_power:
:type bool:
default is True, if True then the sister app will
not create any blueprint object. Please turn it to
False if you don't want to add any url patterns with your sister app.
:for example::
from navycut.core import AppSister
from .urls import urlpatterns
class CustomSister(AppSister):
import_name = __name__
name = "custom"
...
"""
import_app_feature:bool = False
import_name: t.Optional[str] = None
name: t.Optional[str] = None
template_folder: t.Optional[str] = None
static_folder: t.Optional[str] = None
static_url_path: t.Optional[str] = None
extra_ins:t.Optional[t.Tuple[object]] = None
url_prefix:t.Optional[str] = None
seize_power:bool = True
def init(self, **kwargs) -> None:
"""
start initializing the sister app features.
"""
from navycut.conf import settings
if self.import_name is None:
raise ImportNameNotFoundError("app_register")
if self.name is None:
self.name = "_".join(self.import_name.split("."))
if self.template_folder is not None:
kwargs.update(dict(template_folder=self.template_folder,))
else:
kwargs.update(dict(template_folder=settings.TEMPLATE_DIR))
if self.static_folder is not None:
kwargs.update(dict(static_folder=self.static_folder,))
if self.static_url_path is not None:
kwargs.update(dict(static_url_path=self.static_url_path,))
if self.url_prefix is not None:
kwargs.update(dict(url_prefix=self.url_prefix,))
if self.extra_ins is not None:
for ins in self.extra_ins:
app.initIns(ins)
if self.import_app_feature is True:
self.import_app_features()
# The default blueprint object for each sister app
self.power:t.Optional["Blueprint"] = self._create_power_object(**kwargs)
def _create_power_object(self, **kwargs) -> t.Optional["Blueprint"]:
if self.seize_power is not True:
power = Blueprint(self.name, self.import_name, **kwargs)
return power
else:
return None
def get_sister_power(self) -> t.Optional[Blueprint]:
"""
return the default blueprint
object(power) for the selected sister app.
"""
return self.power
def import_app_features(self) -> None:
"""
To use this feature you must need to set the
value of name variable same as the app name,
otherwise it may not work properly.
"""
import_module(f"{self.name}.models", package=None)
import_module(f"{self.name}.admin", package=None)
def register_blueprint(self, *wargs, **kwargs) -> None:
"""
register extra blueprints with the coer app.
"""
app.register_blueprint(*wargs, **kwargs)
def __repr__(self):
"""
the representation of the AppSister class
"""
return f"<AppSister '{self.name}'>"
"""
create the default navycut app here.
"""
app:Navycut = Navycut() | en | 0.748096 | The default index view for a navycut project. The base class of navycut project. It's basically inheritaing the services from the class Flask. We have customized some the core flask features to provide this huge and fullstack service. attach all the available and required settings features with the core app. add the required and default configuration with the core app. :param settings: the settings object from the project directory. configure the default database as per the details provided from settings.py `DATABASE` :param settings: the default settings object from the project directory. The default config function to take smtp creds from settings file and attach with the navycut app. :param settings: the settings object from the project directory. config the extra settings provided from settings.py - `EXTRA_ARGS`. :param settings: the settings object from the project directory. add all the core features of navycut app here. attach the available apps on seetings file with the core navycut app. :param settings: the settings object from the project directory. attach the available middlewares on settings file with the core navycut app. :param settings: the settings object from the project directory. get the view function for a particulat url. :param url: the url whose view function you want to find out. :param method: the request method. default is `GET` # recursively match redirects # no match # return the view function and arguments # no view is associated with the endpoint check wheather a view fucntion is present or not for the provided url. :param url: the url whose view function you want to check i.e present or not. :param method: the request method. default is `GET` If the project directory dosen't contain the view for the index page and teh debug is in true mode, then navycut will show a default index page with the help of this function. initialize the extra instances with the core app. :param ins: extra instance object. This function will return True if the provided sister is registered. Else return False. :param `str_sister_name`: The name of the app sister. This function will return the full name of the app sister. :param `sister_name`: The name of the app sister. import the app by app_name. :param app_name: string type full name fo the app. register all the sister apps present in the settings.py - `INSTALLED_APPS`. :param _sisters: the list containing the name of the apps. ## here i need to add the urls too. #init the core features of the sister app. import the middleware by middleware name. :param mw_name: teh str name of teh middleware, want to import. register all the middlewares present at settings.py - `MIDDLEWARE`. :param _mwList: the list containing the name of the middlewares. # attach the request, response object with the middleware function. add the base urls to the app. :param settings: the settings object from the project directory. add the url pattern with the blueprint power object. :param pattern_list: the url_pattern list. to change the debugging mode of the core app then please play with this function. make it `False` for production use. :param flag: your desired state of the app's debug feature. run the default wsgi server. :param host: the default hostname to run the interactive server. :param port: the default port number to run the interactive server. :param options: other kwargs type vaule to provide more options to the werkzeug run_simple server. The representation of the Navycut class. The default class to create the sister(side) app for navycut core app. supported params are: :param import_app_feature: :type bool: Default is False. If True then the app will try to import the default fetaures, i.e admin and models. :param import_name: :type t.Optional[str]: the import_name parameter for the sister's blueprint object. :param name: :type t.Optional[str]: the name parameter for the sister's blueprint object. This is required if you turn the `import_app_feature` to True. :param template_folder: :type t.Optional[str]: define the template folder for the sister app. :param static_folder: :type t.Optional[str]: define the static folder for the sister app. :param static_url_path: :type t.Optional[str]: define the url path for the static files. :param url_prefix: :type t.Optional[str]: url_prefix for all the routes of a sister app. :param extra_ins: :type t.Optional[t.Tuple[object]]: provide extra instances to init with main navycut app. :param seize_power: :type bool: default is True, if True then the sister app will not create any blueprint object. Please turn it to False if you don't want to add any url patterns with your sister app. :for example:: from navycut.core import AppSister from .urls import urlpatterns class CustomSister(AppSister): import_name = __name__ name = "custom" ... start initializing the sister app features. # The default blueprint object for each sister app return the default blueprint object(power) for the selected sister app. To use this feature you must need to set the value of name variable same as the app name, otherwise it may not work properly. register extra blueprints with the coer app. the representation of the AppSister class create the default navycut app here. | 2.214588 | 2 |
python_magnetgeo/Helix.py | ValletRomain/python_magnetgeo | 0 | 6630184 | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
"""
Provides definition for Helix:
* Geom data: r, z
* Model Axi: definition of helical cut (provided from MagnetTools)
* Model 3D: actual 3D CAD
* Shape: definition of Shape eventually added to the helical cut
"""
import json
import yaml
from . import deserialize
from . import Shape
from . import ModelAxi
from . import Model3D
class Helix(yaml.YAMLObject):
"""
name :
r :
z :
cutwidth:
dble :
odd :
axi :
m3d :
shape :
"""
yaml_tag = 'Helix'
def __init__(self, name, r=[], z=[], cutwidth=0.0, odd=False, dble=False, axi=ModelAxi.ModelAxi(), m3d=Model3D.Model3D(), shape=Shape.Shape()):
"""
initialize object
"""
self.name = name
self.dble = dble
self.odd = odd
self.r = r
self.z = z
self.cutwidth = cutwidth
self.axi = axi
self.m3d = m3d
self.shape = shape
def __repr__(self):
"""
representation of object
"""
return "%s(name=%r, odd=%r, dble=%r, r=%r, z=%r, cutwidth=%r, axi=%r, m3d=%r, shape=%r)" % \
(self.__class__.__name__,
self.name,
self.odd,
self.dble,
self.r,
self.z,
self.cutwidth,
self.axi,
self.m3d,
self.shape
)
def dump(self):
"""
dump object to file
"""
try:
ostream = open(self.name + '.yaml', 'w')
yaml.dump(self, stream=ostream)
ostream.close()
except:
raise Exception("Failed to Helix dump")
def load(self):
"""
load object from file
"""
data = None
try:
istream = open(self.name + '.yaml', 'r')
data = yaml.load(stream=istream)
istream.close()
except:
raise Exception("Failed to load Helix data %s.yaml"%self.name)
self.name = data.name
self.dble = data.dble
self.odd = data.odd
self.r = data.r
self.z = data.z
self.cutwidth = data.cutwidth
self.axi = data.axi
self.m3d = data.m3d
self.shape = data.shape
def to_json(self):
"""
convert from yaml to json
"""
return json.dumps(self, default=deserialize.serialize_instance, sort_keys=True, indent=4)
def from_json(self, string):
"""
convert from json to yaml
"""
return json.loads(string, object_hook=deserialize.unserialize_object)
def write_to_json(self):
"""
write from json file
"""
ostream = open(self.name + '.json', 'w')
jsondata = self.to_json()
ostream.write(str(jsondata))
ostream.close()
def read_from_json(self):
"""
read from json file
"""
istream = open(self.name + '.json', 'r')
jsondata = self.from_json(istream.read())
print (type(jsondata))
istream.close()
def get_Nturns(self):
"""
returns the number of turn
"""
return self.axi.get_Nturns()
def gmsh(self, debug=False):
"""
create gmsh geometry
"""
import gmsh
# TODO get axi model
gmsh_ids = []
x = self.r[0]
dr = self.r[1] - self.r[0]
y = -self.axi.h
_id = gmsh.model.occ.addRectangle(self.r[0], self.z[0], 0, dr, y-self.z[0])
gmsh_ids.append(_id)
for i, (n, pitch) in enumerate(zip(self.axi.turns, self.axi.pitch)):
dz = n * pitch
_id = gmsh.model.occ.addRectangle(x, y, 0, dr, dz)
gmsh_ids.append(_id)
y += dz
_id = gmsh.model.occ.addRectangle(self.r[0], y, 0, dr, self.z[1]-y)
gmsh_ids.append(_id)
if debug:
print("gmsh_ids:", len(gmsh_ids))
for i in gmsh_ids:
print(i)
return gmsh_ids
def gmsh_bcs(self, name, ids, debug=False):
"""
retreive ids for bcs in gmsh geometry
"""
import gmsh
# set physical name
for i,id in enumerate(ids):
ps = gmsh.model.addPhysicalGroup(2, [id])
gmsh.model.setPhysicalName(2, ps, "%s_Cu%d" % (name, i))
# get BC ids
gmsh.option.setNumber("Geometry.OCCBoundsUseStl", 1)
eps = 1.e-3
# TODO: if z[xx] < 0 multiply by 1+eps to get a min by 1-eps to get a max
zmin = self.z[0]* (1+eps)
zmax = self.z[1]* (1+eps)
ov = gmsh.model.getEntitiesInBoundingBox(self.r[0]* (1-eps), zmin, 0,
self.r[0]* (1+eps), zmax, 0, 1)
r0_bc_ids = [tag for (dim,tag) in ov]
ov = gmsh.model.getEntitiesInBoundingBox(self.r[1]* (1-eps), zmin, 0,
self.r[1]* (1+eps), zmax, 0, 1)
r1_bc_ids = [tag for (dim,tag) in ov]
return (r0_bc_ids, r1_bc_ids)
def Helix_constructor(loader, node):
"""
build an helix object
"""
values = loader.construct_mapping(node)
name = values["name"]
r = values["r"]
z = values["z"]
odd = values["odd"]
dble = values["dble"]
cutwidth = values["cutwidth"]
axi = values["axi"]
m3d = values["m3d"]
shape = values["shape"]
return Helix(name, r, z, cutwidth, odd, dble, axi, m3d, shape)
yaml.add_constructor(u'!Helix', Helix_constructor)
| #!/usr/bin/env python3
#-*- coding:utf-8 -*-
"""
Provides definition for Helix:
* Geom data: r, z
* Model Axi: definition of helical cut (provided from MagnetTools)
* Model 3D: actual 3D CAD
* Shape: definition of Shape eventually added to the helical cut
"""
import json
import yaml
from . import deserialize
from . import Shape
from . import ModelAxi
from . import Model3D
class Helix(yaml.YAMLObject):
"""
name :
r :
z :
cutwidth:
dble :
odd :
axi :
m3d :
shape :
"""
yaml_tag = 'Helix'
def __init__(self, name, r=[], z=[], cutwidth=0.0, odd=False, dble=False, axi=ModelAxi.ModelAxi(), m3d=Model3D.Model3D(), shape=Shape.Shape()):
"""
initialize object
"""
self.name = name
self.dble = dble
self.odd = odd
self.r = r
self.z = z
self.cutwidth = cutwidth
self.axi = axi
self.m3d = m3d
self.shape = shape
def __repr__(self):
"""
representation of object
"""
return "%s(name=%r, odd=%r, dble=%r, r=%r, z=%r, cutwidth=%r, axi=%r, m3d=%r, shape=%r)" % \
(self.__class__.__name__,
self.name,
self.odd,
self.dble,
self.r,
self.z,
self.cutwidth,
self.axi,
self.m3d,
self.shape
)
def dump(self):
"""
dump object to file
"""
try:
ostream = open(self.name + '.yaml', 'w')
yaml.dump(self, stream=ostream)
ostream.close()
except:
raise Exception("Failed to Helix dump")
def load(self):
"""
load object from file
"""
data = None
try:
istream = open(self.name + '.yaml', 'r')
data = yaml.load(stream=istream)
istream.close()
except:
raise Exception("Failed to load Helix data %s.yaml"%self.name)
self.name = data.name
self.dble = data.dble
self.odd = data.odd
self.r = data.r
self.z = data.z
self.cutwidth = data.cutwidth
self.axi = data.axi
self.m3d = data.m3d
self.shape = data.shape
def to_json(self):
"""
convert from yaml to json
"""
return json.dumps(self, default=deserialize.serialize_instance, sort_keys=True, indent=4)
def from_json(self, string):
"""
convert from json to yaml
"""
return json.loads(string, object_hook=deserialize.unserialize_object)
def write_to_json(self):
"""
write from json file
"""
ostream = open(self.name + '.json', 'w')
jsondata = self.to_json()
ostream.write(str(jsondata))
ostream.close()
def read_from_json(self):
"""
read from json file
"""
istream = open(self.name + '.json', 'r')
jsondata = self.from_json(istream.read())
print (type(jsondata))
istream.close()
def get_Nturns(self):
"""
returns the number of turn
"""
return self.axi.get_Nturns()
def gmsh(self, debug=False):
"""
create gmsh geometry
"""
import gmsh
# TODO get axi model
gmsh_ids = []
x = self.r[0]
dr = self.r[1] - self.r[0]
y = -self.axi.h
_id = gmsh.model.occ.addRectangle(self.r[0], self.z[0], 0, dr, y-self.z[0])
gmsh_ids.append(_id)
for i, (n, pitch) in enumerate(zip(self.axi.turns, self.axi.pitch)):
dz = n * pitch
_id = gmsh.model.occ.addRectangle(x, y, 0, dr, dz)
gmsh_ids.append(_id)
y += dz
_id = gmsh.model.occ.addRectangle(self.r[0], y, 0, dr, self.z[1]-y)
gmsh_ids.append(_id)
if debug:
print("gmsh_ids:", len(gmsh_ids))
for i in gmsh_ids:
print(i)
return gmsh_ids
def gmsh_bcs(self, name, ids, debug=False):
"""
retreive ids for bcs in gmsh geometry
"""
import gmsh
# set physical name
for i,id in enumerate(ids):
ps = gmsh.model.addPhysicalGroup(2, [id])
gmsh.model.setPhysicalName(2, ps, "%s_Cu%d" % (name, i))
# get BC ids
gmsh.option.setNumber("Geometry.OCCBoundsUseStl", 1)
eps = 1.e-3
# TODO: if z[xx] < 0 multiply by 1+eps to get a min by 1-eps to get a max
zmin = self.z[0]* (1+eps)
zmax = self.z[1]* (1+eps)
ov = gmsh.model.getEntitiesInBoundingBox(self.r[0]* (1-eps), zmin, 0,
self.r[0]* (1+eps), zmax, 0, 1)
r0_bc_ids = [tag for (dim,tag) in ov]
ov = gmsh.model.getEntitiesInBoundingBox(self.r[1]* (1-eps), zmin, 0,
self.r[1]* (1+eps), zmax, 0, 1)
r1_bc_ids = [tag for (dim,tag) in ov]
return (r0_bc_ids, r1_bc_ids)
def Helix_constructor(loader, node):
"""
build an helix object
"""
values = loader.construct_mapping(node)
name = values["name"]
r = values["r"]
z = values["z"]
odd = values["odd"]
dble = values["dble"]
cutwidth = values["cutwidth"]
axi = values["axi"]
m3d = values["m3d"]
shape = values["shape"]
return Helix(name, r, z, cutwidth, odd, dble, axi, m3d, shape)
yaml.add_constructor(u'!Helix', Helix_constructor)
| en | 0.662393 | #!/usr/bin/env python3 #-*- coding:utf-8 -*- Provides definition for Helix: * Geom data: r, z * Model Axi: definition of helical cut (provided from MagnetTools) * Model 3D: actual 3D CAD * Shape: definition of Shape eventually added to the helical cut name : r : z : cutwidth: dble : odd : axi : m3d : shape : initialize object representation of object dump object to file load object from file convert from yaml to json convert from json to yaml write from json file read from json file returns the number of turn create gmsh geometry # TODO get axi model retreive ids for bcs in gmsh geometry # set physical name # get BC ids # TODO: if z[xx] < 0 multiply by 1+eps to get a min by 1-eps to get a max build an helix object | 2.836571 | 3 |
gaia-sdk-python/gaia_sdk/api/SkillRef.py | leftshiftone/gaia-sdk | 0 | 6630185 | import json
import rx
import rx.operators as ops
from rx.core.typing import Observable, Scheduler
from gaia_sdk.http import GaiaStreamClient
class SkillProvisionCanceledResponse:
def __init__(self, reference: str):
self._reference = reference
@property
def reference(self) -> str:
return self._reference
class SkillBuildResponse:
def __init__(self, reference):
self._reference = reference
@property
def reference(self) -> str:
return self._reference
class SkillEvaluation:
def __init__(self, response: dict):
self._response = response
@property
def response(self):
return self._response
class SkillRef:
def __init__(self, uri: str, client: GaiaStreamClient, scheduler: Scheduler):
self._uri = uri
self._client = client
self._scheduler = scheduler
def start(self) -> Observable[any]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/start"),
self._scheduler) \
.pipe(
ops.map(lambda r: {})
)
def stop(self) -> Observable[any]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/stop"),
self._scheduler) \
.pipe(
ops.map(lambda r: {})
)
def logs(self, number_of_lines: int) -> Observable[str]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri, 'numberOfLines': number_of_lines},
url_postfix="/skill/logs"), self._scheduler) \
.pipe(
ops.map(lambda r: json.loads(r.content)),
ops.flat_map(lambda r: rx.from_iterable(r['logLines']))
)
def cancel(self) -> Observable[SkillProvisionCanceledResponse]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/cancel"),
self._scheduler) \
.pipe(
ops.map(lambda r: json.loads(r.content)),
ops.map(lambda r: SkillProvisionCanceledResponse(r["reference"]))
)
def build(self) -> Observable[SkillBuildResponse]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/build"),
self._scheduler) \
.pipe(
ops.map(lambda r: json.loads(r.content)),
ops.map(lambda r: SkillBuildResponse(r["reference"]))
)
def evaluate(self, payload: dict, contract: str = None) -> Observable[SkillEvaluation]:
request = {'uri': self._uri, 'payload': payload}
if contract is not None:
request['contract'] = contract
return rx.from_callable(
lambda: self._client.post_json(request, url_postfix="/skill/evaluate"),
self._scheduler) \
.pipe(
ops.map(lambda response: json.loads(response.content)),
ops.map(lambda d: SkillEvaluation(d)))
| import json
import rx
import rx.operators as ops
from rx.core.typing import Observable, Scheduler
from gaia_sdk.http import GaiaStreamClient
class SkillProvisionCanceledResponse:
def __init__(self, reference: str):
self._reference = reference
@property
def reference(self) -> str:
return self._reference
class SkillBuildResponse:
def __init__(self, reference):
self._reference = reference
@property
def reference(self) -> str:
return self._reference
class SkillEvaluation:
def __init__(self, response: dict):
self._response = response
@property
def response(self):
return self._response
class SkillRef:
def __init__(self, uri: str, client: GaiaStreamClient, scheduler: Scheduler):
self._uri = uri
self._client = client
self._scheduler = scheduler
def start(self) -> Observable[any]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/start"),
self._scheduler) \
.pipe(
ops.map(lambda r: {})
)
def stop(self) -> Observable[any]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/stop"),
self._scheduler) \
.pipe(
ops.map(lambda r: {})
)
def logs(self, number_of_lines: int) -> Observable[str]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri, 'numberOfLines': number_of_lines},
url_postfix="/skill/logs"), self._scheduler) \
.pipe(
ops.map(lambda r: json.loads(r.content)),
ops.flat_map(lambda r: rx.from_iterable(r['logLines']))
)
def cancel(self) -> Observable[SkillProvisionCanceledResponse]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/cancel"),
self._scheduler) \
.pipe(
ops.map(lambda r: json.loads(r.content)),
ops.map(lambda r: SkillProvisionCanceledResponse(r["reference"]))
)
def build(self) -> Observable[SkillBuildResponse]:
return rx.from_callable(lambda: self._client.post_json({'uri': self._uri}, url_postfix="/skill/build"),
self._scheduler) \
.pipe(
ops.map(lambda r: json.loads(r.content)),
ops.map(lambda r: SkillBuildResponse(r["reference"]))
)
def evaluate(self, payload: dict, contract: str = None) -> Observable[SkillEvaluation]:
request = {'uri': self._uri, 'payload': payload}
if contract is not None:
request['contract'] = contract
return rx.from_callable(
lambda: self._client.post_json(request, url_postfix="/skill/evaluate"),
self._scheduler) \
.pipe(
ops.map(lambda response: json.loads(response.content)),
ops.map(lambda d: SkillEvaluation(d)))
| none | 1 | 2.419362 | 2 |
|
regtests/go/generics_subclasses.py | gython/Gython | 65 | 6630186 | '''
generics classes with common base.
'''
class A:
def __init__(self, x:int):
int self.x = x
def method1(self) -> int:
return self.x
class B(A):
def method1(self) ->int:
return self.x * 2
class C(A):
def method1(self) ->int:
return self.x + 200
def my_generic( g:A ) ->int:
return g.method1()
def main():
a = A( 100 )
b = B( 100 )
c = C( 100 )
x = my_generic( a )
TestError(a.x == x )
y = my_generic( b )
TestError( y==200 )
z = my_generic( c )
TestError( z==300 )
| '''
generics classes with common base.
'''
class A:
def __init__(self, x:int):
int self.x = x
def method1(self) -> int:
return self.x
class B(A):
def method1(self) ->int:
return self.x * 2
class C(A):
def method1(self) ->int:
return self.x + 200
def my_generic( g:A ) ->int:
return g.method1()
def main():
a = A( 100 )
b = B( 100 )
c = C( 100 )
x = my_generic( a )
TestError(a.x == x )
y = my_generic( b )
TestError( y==200 )
z = my_generic( c )
TestError( z==300 )
| en | 0.902121 | generics classes with common base. | 3.865931 | 4 |
Kattis/dasblinkenlights.py | ruidazeng/online-judge | 0 | 6630187 | <reponame>ruidazeng/online-judge
p, q, s = map(int, input().split())
for x in range(1, s + 1):
if x % p == 0 and x % q == 0:
print("yes")
quit()
print("no") | p, q, s = map(int, input().split())
for x in range(1, s + 1):
if x % p == 0 and x % q == 0:
print("yes")
quit()
print("no") | none | 1 | 3.349272 | 3 |
|
podman/domain/containers_create.py | alvistack/containers-podman-py | 0 | 6630188 | """Mixin to provide Container create() method."""
import copy
import logging
import re
from contextlib import suppress
from typing import Any, Dict, List, MutableMapping, Union
from podman import api
from podman.domain.containers import Container
from podman.domain.images import Image
from podman.domain.pods import Pod
from podman.errors import ImageNotFound
logger = logging.getLogger("podman.containers")
class CreateMixin: # pylint: disable=too-few-public-methods
"""Class providing create method for ContainersManager."""
def create(
self, image: Union[Image, str], command: Union[str, List[str], None] = None, **kwargs
) -> Container:
"""Create a container.
Args:
image: Image to run.
command: Command to run in the container.
Keyword Args:
auto_remove (bool): Enable auto-removal of the container on daemon side when the
container's process exits.
blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight)
in the form of: [{"Path": "device_path", "Weight": weight}].
blkio_weight (int): Block IO weight (relative weight), accepts a weight value
between 10 and 1000.
cap_add (List[str]): Add kernel capabilities. For example: ["SYS_ADMIN", "MKNOD"]
cap_drop (List[str]): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs (Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can get in a CPU period.
cpu_rt_period (int): Limit CPU real-time period in microseconds.
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution (0-3, 0,1).
Only effective on NUMA systems.
detach (bool): Run container in the background and return a Container object.
device_cgroup_rules (List[str]): A list of cgroup rules to apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device in the form of:
`[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (List[str): Expose host devices to the container, as a List[str] in the form
<path_on_host>:<path_in_container>:<cgroup_permissions>.
For example:
/dev/sda:/dev/xvda:rwm allows the container to have read-write access to the
host's /dev/sda via a node named /dev/xvda inside the container.
dns (List[str]): Set custom DNS servers.
dns_opt (List[str]): Additional options to be added to the container's resolv.conf file.
dns_search (List[str]): DNS search domains.
domainname (Union[str, List[str]]): Set custom DNS search domains.
entrypoint (Union[str, List[str]]): The entrypoint for the container.
environment (Union[Dict[str, str], List[str]): Environment variables to set inside
the container, as a dictionary or a List[str] in the format
["SOMEVARIABLE=xxx", "SOMEOTHERVARIABLE=xyz"].
extra_hosts (Dict[str, str]): Additional hostnames to resolve inside the container,
as a mapping of hostname to IP address.
group_add (List[str]): List of additional group names and/or IDs that the container
process will run as.
healthcheck (Dict[str,Any]): Specify a test to perform to check that the
container is healthy.
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (Union[Dict[str, str], List[str]): A dictionary of name-value labels (e.g.
{"label1": "value1", "label2": "value2"}) or a list of names of labels to set
with empty values (e.g. ["label1", "label2"])
links (Optional[Dict[str, str]]): Mapping of links using the {'container': 'alias'}
format. The alias is optional. Containers declared in this dict will be linked to
the new container using the provided alias. Default: None.
log_config (LogConfig): Logging configuration.
lxc_config (Dict[str, str]): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (Union[int, str]): Memory limit. Accepts float values (which represent the
memory limit of the created container in bytes) or a string with a units
identification char (100000b, 1000k, 128m, 1g). If a string is specified without
a units character, bytes are assumed as an intended unit.
mem_reservation (Union[int, str]): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number
between 0 and 100.
memswap_limit (Union[int, str]): Maximum amount of memory + swap a container is allowed
to consume.
mounts (List[Mount]): Specification for mounts to be added to the container. More
powerful alternative to volumes. Each item in the list is expected to be a
Mount object.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected to at creation time.
You can connect to additional networks using Network.connect.
Incompatible with network_mode.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- bridge: Create a new network stack for the container
on the bridge network.
- none: No networking for this container.
- container:<name|id>: Reuse another container's network
stack.
- host: Use the host network stack.
Incompatible with network.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given to the container in
order to tune OOM killer preferences.
pid_mode (str): If set to host, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set -1 for unlimited.
platform (str): Platform in the format os[/arch[/variant]]. Only used if the method
needs to pull the requested image.
ports (Dict[str, Union[int, Tuple[str, int], List[int]]]): Ports to bind inside
the container.
The keys of the dictionary are the ports to bind inside the container, either as an
integer or a string in the form port/protocol, where the protocol is either
tcp, udp, or sctp.
The values of the dictionary are the corresponding ports to open on the host,
which can be either:
- The port number, as an integer.
For example: {'2222/tcp': 3333} will expose port 2222 inside the container
as port 3333 on the host.
- None, to assign a random host port.
For example: {'2222/tcp': None}.
- A tuple of (address, port) if you want to specify the host interface.
For example: {'1111/tcp': ('127.0.0.1', 1111)}.
- A list of integers, if you want to bind multiple host ports to a single container
port.
For example: {'1111/tcp': [1234, 4567]}.
For example: {'9090': 7878, '10932/tcp': '8781',
"8989/tcp": ("127.0.0.1", 9091)}
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read only.
remove (bool): Remove the container when it has finished running. Default: False.
restart_policy (Dict[str, Union[str, int]]): Restart the container when it exits.
Configured as a dictionary with keys:
- Name: One of on-failure, or always.
- MaximumRetryCount: Number of times to restart the container on failure.
For example: {"Name": "on-failure", "MaximumRetryCount": 5}
runtime (str): Runtime to use with this container.
security_opt (List[str]): A List[str]ing values to customize labels for MLS systems,
such as SELinux.
shm_size (Union[str, int]): Size of /dev/shm (e.g. 1G).
stdin_open (bool): Keep STDIN open even if not attached.
stdout (bool): Return logs from STDOUT when detach=False. Default: True.
stderr (bool): Return logs from STDERR when detach=False. Default: False.
stop_signal (str): The stop signal to use to stop the container (e.g. SIGINT).
storage_opt (Dict[str, str]): Storage driver options per container as a
key-value mapping.
stream (bool): If true and detach is false, return a log generator instead of a string.
Ignored if detach is true. Default: False.
sysctls (Dict[str, str]): Kernel parameters to set in the container.
tmpfs (Dict[str, str]): Temporary filesystems to mount, as a dictionary mapping a
path inside the container to options for that path.
For example: {'/mnt/vol2': '', '/mnt/vol1': 'size=3G,uid=1000'}
tty (bool): Allocate a pseudo-TTY.
ulimits (List[Ulimit]): Ulimits to set inside the container.
use_config_proxy (bool): If True, and if the docker client configuration
file (~/.config/containers/config.json by default) contains a proxy configuration,
the corresponding environment variables will be set in the container being built.
user (Union[str, int]): Username or UID to run commands as inside the container.
userns_mode (str): Sets the user namespace mode for the container when user namespace
remapping option is enabled. Supported values are: host
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: host
version (str): The version of the API to use. Set to auto to automatically detect
the server's version. Default: 3.0.0
volume_driver (str): The name of a volume driver/plugin.
volumes (Dict[str, Dict[str, str]]): A dictionary to configure volumes mounted inside
the container. The key is either the host path or a volume name, and the value is
a dictionary with the keys:
- bind: The path to mount the volume inside the container
- mode: Either rw to mount the volume read/write, or ro to mount it read-only.
For example:
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
volumes_from (List[str]): List of container names or IDs to get volumes from.
working_dir (str): Path to the working directory.
Raises:
ImageNotFound: when Image not found by Podman service
APIError: when Podman service reports an error
"""
if isinstance(image, Image):
image = image.id
payload = {"image": image, "command": command}
payload.update(kwargs)
payload = self._render_payload(payload)
payload = api.prepare_body(payload)
response = self.client.post(
"/containers/create", headers={"content-type": "application/json"}, data=payload
)
response.raise_for_status(not_found=ImageNotFound)
body = response.json()
return self.get(body["Id"])
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
@staticmethod
def _render_payload(kwargs: MutableMapping[str, Any]) -> Dict[str, Any]:
"""Map create/run kwargs into body parameters."""
args = copy.copy(kwargs)
if "links" in args:
if len(args["links"]) > 0:
raise ValueError("'links' are not supported by Podman service.")
del args["links"]
# Ignore these keywords
for key in (
"cpu_count",
"cpu_percent",
"nano_cpus",
"platform", # used by caller
"remove", # used by caller
"stderr", # used by caller
"stdout", # used by caller
"stream", # used by caller
"detach", # used by caller
"volume_driver",
):
with suppress(KeyError):
del args[key]
# These keywords are not supported for various reasons.
unsupported_keys = set(args.keys()).intersection(
(
"blkio_weight",
"blkio_weight_device", # FIXME In addition to device Major/Minor include path
"device_cgroup_rules", # FIXME Where to map for Podman API?
"device_read_bps", # FIXME In addition to device Major/Minor include path
"device_read_iops", # FIXME In addition to device Major/Minor include path
"device_requests", # FIXME In addition to device Major/Minor include path
"device_write_bps", # FIXME In addition to device Major/Minor include path
"device_write_iops", # FIXME In addition to device Major/Minor include path
"devices", # FIXME In addition to device Major/Minor include path
"domainname",
"network_disabled", # FIXME Where to map for Podman API?
"storage_opt", # FIXME Where to map for Podman API?
"tmpfs", # FIXME Where to map for Podman API?
)
)
if len(unsupported_keys) > 0:
raise TypeError(
f"""Keyword(s) '{" ,".join(unsupported_keys)}' are"""
f""" currently not supported by Podman API."""
)
def pop(k):
return args.pop(k, None)
def to_bytes(size: Union[int, str, None]) -> Union[int, None]:
"""
Converts str or int to bytes.
Input can be in the following forms :
0) None - e.g. None -> returns None
1) int - e.g. 100 == 100 bytes
2) str - e.g. '100' == 100 bytes
3) str with suffix - available suffixes:
b | B - bytes
k | K = kilobytes
m | M = megabytes
g | G = gigabytes
e.g. '100m' == 104857600 bytes
"""
size_type = type(size)
if size is None:
return size
if size_type is int:
return size
if size_type is str:
try:
return int(size)
except ValueError as bad_size:
mapping = {'b': 0, 'k': 1, 'm': 2, 'g': 3}
mapping_regex = ''.join(mapping.keys())
search = re.search(rf'^(\d+)([{mapping_regex}])$', size.lower())
if search:
return int(search.group(1)) * (1024 ** mapping[search.group(2)])
raise TypeError(
f"Passed string size {size} should be in format\\d+[bBkKmMgG] (e.g."
" '100m')"
) from bad_size
else:
raise TypeError(
f"Passed size {size} should be a type of unicode, str "
f"or int (found : {size_type})"
)
# Transform keywords into parameters
params = {
"aliases": pop("aliases"), # TODO document, podman only
"annotations": pop("annotations"), # TODO document, podman only
"apparmor_profile": pop("apparmor_profile"), # TODO document, podman only
"cap_add": pop("cap_add"),
"cap_drop": pop("cap_drop"),
"cgroup_parent": pop("cgroup_parent"),
"cgroups_mode": pop("cgroups_mode"), # TODO document, podman only
"cni_networks": [pop("network")],
"command": args.pop("command", args.pop("cmd", None)),
"conmon_pid_file": pop("conmon_pid_file"), # TODO document, podman only
"containerCreateCommand": pop("containerCreateCommand"), # TODO document, podman only
"dns_options": pop("dns_opt"),
"dns_search": pop("dns_search"),
"dns_server": pop("dns"),
"entrypoint": pop("entrypoint"),
"env": pop("environment"),
"env_host": pop("env_host"), # TODO document, podman only
"expose": {},
"groups": pop("group_add"),
"healthconfig": pop("healthcheck"),
"hostadd": [],
"hostname": pop("hostname"),
"httpproxy": pop("use_config_proxy"),
"idmappings": pop("idmappings"), # TODO document, podman only
"image": pop("image"),
"image_volume_mode": pop("image_volume_mode"), # TODO document, podman only
"image_volumes": pop("image_volumes"), # TODO document, podman only
"init": pop("init"),
"init_path": pop("init_path"),
"isolation": pop("isolation"),
"labels": pop("labels"),
"log_configuration": {},
"lxc_config": pop("lxc_config"),
"mask": pop("masked_paths"),
"mounts": [],
"name": pop("name"),
"namespace": pop("namespace"), # TODO What is this for?
"network_options": pop("network_options"), # TODO document, podman only
"no_new_privileges": pop("no_new_privileges"), # TODO document, podman only
"oci_runtime": pop("runtime"),
"oom_score_adj": pop("oom_score_adj"),
"overlay_volumes": pop("overlay_volumes"), # TODO document, podman only
"portmappings": [],
"privileged": pop("privileged"),
"procfs_opts": pop("procfs_opts"), # TODO document, podman only
"publish_image_ports": pop("publish_all_ports"),
"r_limits": [],
"raw_image_name": pop("raw_image_name"), # TODO document, podman only
"read_only_filesystem": pop("read_only"),
"remove": args.pop("remove", args.pop("auto_remove", None)),
"resource_limits": {},
"rootfs": pop("rootfs"),
"rootfs_propagation": pop("rootfs_propagation"),
"sdnotifyMode": pop("sdnotifyMode"), # TODO document, podman only
"seccomp_policy": pop("seccomp_policy"), # TODO document, podman only
"seccomp_profile_path": pop("seccomp_profile_path"), # TODO document, podman only
"secrets": pop("secrets"), # TODO document, podman only
"selinux_opts": pop("security_opt"),
"shm_size": to_bytes(pop("shm_size")),
"static_ip": pop("static_ip"), # TODO document, podman only
"static_ipv6": pop("static_ipv6"), # TODO document, podman only
"static_mac": pop("mac_address"),
"stdin": pop("stdin_open"),
"stop_signal": pop("stop_signal"),
"stop_timeout": pop("stop_timeout"), # TODO document, podman only
"sysctl": pop("sysctls"),
"systemd": pop("systemd"), # TODO document, podman only
"terminal": pop("tty"),
"timezone": pop("timezone"),
"umask": pop("umask"), # TODO document, podman only
"unified": pop("unified"), # TODO document, podman only
"unmask": pop("unmasked_paths"), # TODO document, podman only
"use_image_hosts": pop("use_image_hosts"), # TODO document, podman only
"use_image_resolve_conf": pop("use_image_resolve_conf"), # TODO document, podman only
"user": pop("user"),
"version": pop("version"),
"volumes": [],
"volumes_from": pop("volumes_from"),
"work_dir": pop("working_dir"),
}
for item in args.pop("exposed_ports", []):
port, protocol = item.split("/")
params["expose"][int(port)] = protocol
for hostname, ip in args.pop("extra_hosts", {}).items():
params["hostadd"].append(f"{hostname}:{ip}")
if "log_config" in args:
params["log_configuration"]["driver"] = args["log_config"].get("Type")
if "Config" in args["log_config"]:
params["log_configuration"]["path"] = args["log_config"]["Config"].get("path")
params["log_configuration"]["size"] = args["log_config"]["Config"].get("size")
params["log_configuration"]["options"] = args["log_config"]["Config"].get(
"options"
)
args.pop("log_config")
for item in args.pop("mounts", []):
mount_point = {
"destination": item.get("target"),
"options": [],
"source": item.get("source"),
"type": item.get("type"),
}
options = []
if "read_only" in item:
options.append("ro")
if "consistency" in item:
options.append(f"consistency={item['consistency']}")
if "mode" in item:
options.append(f"mode={item['mode']}")
if "propagation" in item:
options.append(item["propagation"])
if "size" in item:
options.append(f"size={item['size']}")
mount_point["options"] = options
params["mounts"].append(mount_point)
if "pod" in args:
pod = args.pop("pod")
if isinstance(pod, Pod):
pod = pod.id
params["pod"] = pod # TODO document, podman only
for container, host in args.pop("ports", {}).items():
if "/" in container:
container_port, protocol = container.split("/")
else:
container_port, protocol = container, "tcp"
port_map = {"container_port": int(container_port), "protocol": protocol}
if host is None:
pass
elif isinstance(host, int) or isinstance(host, str) and host.isdigit():
port_map["host_port"] = int(host)
elif isinstance(host, tuple):
port_map["host_ip"] = host[0]
port_map["host_port"] = int(host[1])
elif isinstance(host, list):
raise ValueError(
"Podman API does not support multiple port bound to a single host port."
)
else:
raise ValueError(f"'ports' value of '{host}' is not supported.")
params["portmappings"].append(port_map)
if "restart_policy" in args:
params["restart_policy"] = args["restart_policy"].get("Name")
params["restart_tries"] = args["restart_policy"].get("MaximumRetryCount")
args.pop("restart_policy")
params["resource_limits"]["pids"] = {"limit": args.pop("pids_limit", None)}
params["resource_limits"]["cpu"] = {
"cpus": args.pop("cpuset_cpus", None),
"mems": args.pop("cpuset_mems", None),
"period": args.pop("cpu_period", None),
"quota": args.pop("cpu_quota", None),
"realtimePeriod": args.pop("cpu_rt_period", None),
"realtimeRuntime": args.pop("cpu_rt_runtime", None),
"shares": args.pop("cpu_shares", None),
}
params["resource_limits"]["memory"] = {
"disableOOMKiller": args.pop("oom_kill_disable", None),
"kernel": to_bytes(args.pop("kernel_memory", None)),
"kernelTCP": args.pop("kernel_memory_tcp", None),
"limit": to_bytes(args.pop("mem_limit", None)),
"reservation": to_bytes(args.pop("mem_reservation", None)),
"swap": args.pop("memswap_limit", None),
"swappiness": args.pop("mem_swappiness", None),
"useHierarchy": args.pop("mem_use_hierarchy", None),
}
for item in args.pop("ulimits", []):
params["r_limits"].append(
{
"type": item["Name"],
"hard": item["Hard"],
"soft": item["Soft"],
}
)
for item in args.pop("volumes", {}).items():
key, value = item
volume = {
"Name": key,
"Dest": value["bind"],
"Options": [value["mode"]] if "mode" in value else [],
}
params["volumes"].append(volume)
if "cgroupns" in args:
params["cgroupns"] = {"nsmode": args.pop("cgroupns")}
if "ipc_mode" in args:
params["ipcns"] = {"nsmode": args.pop("ipc_mode")}
if "network_mode" in args:
params["netns"] = {"nsmode": args.pop("network_mode")}
if "pid_mode" in args:
params["pidns"] = {"nsmode": args.pop("pid_mode")}
if "userns_mode" in args:
params["userns"] = {"nsmode": args.pop("userns_mode")}
if "uts_mode" in args:
params["utsns"] = {"nsmode": args.pop("uts_mode")}
if len(args) > 0:
raise TypeError(
"Unknown keyword argument(s): " + " ,".join(f"'{k}'" for k in args.keys())
)
return params
| """Mixin to provide Container create() method."""
import copy
import logging
import re
from contextlib import suppress
from typing import Any, Dict, List, MutableMapping, Union
from podman import api
from podman.domain.containers import Container
from podman.domain.images import Image
from podman.domain.pods import Pod
from podman.errors import ImageNotFound
logger = logging.getLogger("podman.containers")
class CreateMixin: # pylint: disable=too-few-public-methods
"""Class providing create method for ContainersManager."""
def create(
self, image: Union[Image, str], command: Union[str, List[str], None] = None, **kwargs
) -> Container:
"""Create a container.
Args:
image: Image to run.
command: Command to run in the container.
Keyword Args:
auto_remove (bool): Enable auto-removal of the container on daemon side when the
container's process exits.
blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight)
in the form of: [{"Path": "device_path", "Weight": weight}].
blkio_weight (int): Block IO weight (relative weight), accepts a weight value
between 10 and 1000.
cap_add (List[str]): Add kernel capabilities. For example: ["SYS_ADMIN", "MKNOD"]
cap_drop (List[str]): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs (Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can get in a CPU period.
cpu_rt_period (int): Limit CPU real-time period in microseconds.
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution (0-3, 0,1).
Only effective on NUMA systems.
detach (bool): Run container in the background and return a Container object.
device_cgroup_rules (List[str]): A list of cgroup rules to apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device in the form of:
`[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (List[str): Expose host devices to the container, as a List[str] in the form
<path_on_host>:<path_in_container>:<cgroup_permissions>.
For example:
/dev/sda:/dev/xvda:rwm allows the container to have read-write access to the
host's /dev/sda via a node named /dev/xvda inside the container.
dns (List[str]): Set custom DNS servers.
dns_opt (List[str]): Additional options to be added to the container's resolv.conf file.
dns_search (List[str]): DNS search domains.
domainname (Union[str, List[str]]): Set custom DNS search domains.
entrypoint (Union[str, List[str]]): The entrypoint for the container.
environment (Union[Dict[str, str], List[str]): Environment variables to set inside
the container, as a dictionary or a List[str] in the format
["SOMEVARIABLE=xxx", "SOMEOTHERVARIABLE=xyz"].
extra_hosts (Dict[str, str]): Additional hostnames to resolve inside the container,
as a mapping of hostname to IP address.
group_add (List[str]): List of additional group names and/or IDs that the container
process will run as.
healthcheck (Dict[str,Any]): Specify a test to perform to check that the
container is healthy.
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (Union[Dict[str, str], List[str]): A dictionary of name-value labels (e.g.
{"label1": "value1", "label2": "value2"}) or a list of names of labels to set
with empty values (e.g. ["label1", "label2"])
links (Optional[Dict[str, str]]): Mapping of links using the {'container': 'alias'}
format. The alias is optional. Containers declared in this dict will be linked to
the new container using the provided alias. Default: None.
log_config (LogConfig): Logging configuration.
lxc_config (Dict[str, str]): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (Union[int, str]): Memory limit. Accepts float values (which represent the
memory limit of the created container in bytes) or a string with a units
identification char (100000b, 1000k, 128m, 1g). If a string is specified without
a units character, bytes are assumed as an intended unit.
mem_reservation (Union[int, str]): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number
between 0 and 100.
memswap_limit (Union[int, str]): Maximum amount of memory + swap a container is allowed
to consume.
mounts (List[Mount]): Specification for mounts to be added to the container. More
powerful alternative to volumes. Each item in the list is expected to be a
Mount object.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected to at creation time.
You can connect to additional networks using Network.connect.
Incompatible with network_mode.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- bridge: Create a new network stack for the container
on the bridge network.
- none: No networking for this container.
- container:<name|id>: Reuse another container's network
stack.
- host: Use the host network stack.
Incompatible with network.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given to the container in
order to tune OOM killer preferences.
pid_mode (str): If set to host, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set -1 for unlimited.
platform (str): Platform in the format os[/arch[/variant]]. Only used if the method
needs to pull the requested image.
ports (Dict[str, Union[int, Tuple[str, int], List[int]]]): Ports to bind inside
the container.
The keys of the dictionary are the ports to bind inside the container, either as an
integer or a string in the form port/protocol, where the protocol is either
tcp, udp, or sctp.
The values of the dictionary are the corresponding ports to open on the host,
which can be either:
- The port number, as an integer.
For example: {'2222/tcp': 3333} will expose port 2222 inside the container
as port 3333 on the host.
- None, to assign a random host port.
For example: {'2222/tcp': None}.
- A tuple of (address, port) if you want to specify the host interface.
For example: {'1111/tcp': ('127.0.0.1', 1111)}.
- A list of integers, if you want to bind multiple host ports to a single container
port.
For example: {'1111/tcp': [1234, 4567]}.
For example: {'9090': 7878, '10932/tcp': '8781',
"8989/tcp": ("127.0.0.1", 9091)}
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read only.
remove (bool): Remove the container when it has finished running. Default: False.
restart_policy (Dict[str, Union[str, int]]): Restart the container when it exits.
Configured as a dictionary with keys:
- Name: One of on-failure, or always.
- MaximumRetryCount: Number of times to restart the container on failure.
For example: {"Name": "on-failure", "MaximumRetryCount": 5}
runtime (str): Runtime to use with this container.
security_opt (List[str]): A List[str]ing values to customize labels for MLS systems,
such as SELinux.
shm_size (Union[str, int]): Size of /dev/shm (e.g. 1G).
stdin_open (bool): Keep STDIN open even if not attached.
stdout (bool): Return logs from STDOUT when detach=False. Default: True.
stderr (bool): Return logs from STDERR when detach=False. Default: False.
stop_signal (str): The stop signal to use to stop the container (e.g. SIGINT).
storage_opt (Dict[str, str]): Storage driver options per container as a
key-value mapping.
stream (bool): If true and detach is false, return a log generator instead of a string.
Ignored if detach is true. Default: False.
sysctls (Dict[str, str]): Kernel parameters to set in the container.
tmpfs (Dict[str, str]): Temporary filesystems to mount, as a dictionary mapping a
path inside the container to options for that path.
For example: {'/mnt/vol2': '', '/mnt/vol1': 'size=3G,uid=1000'}
tty (bool): Allocate a pseudo-TTY.
ulimits (List[Ulimit]): Ulimits to set inside the container.
use_config_proxy (bool): If True, and if the docker client configuration
file (~/.config/containers/config.json by default) contains a proxy configuration,
the corresponding environment variables will be set in the container being built.
user (Union[str, int]): Username or UID to run commands as inside the container.
userns_mode (str): Sets the user namespace mode for the container when user namespace
remapping option is enabled. Supported values are: host
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: host
version (str): The version of the API to use. Set to auto to automatically detect
the server's version. Default: 3.0.0
volume_driver (str): The name of a volume driver/plugin.
volumes (Dict[str, Dict[str, str]]): A dictionary to configure volumes mounted inside
the container. The key is either the host path or a volume name, and the value is
a dictionary with the keys:
- bind: The path to mount the volume inside the container
- mode: Either rw to mount the volume read/write, or ro to mount it read-only.
For example:
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
volumes_from (List[str]): List of container names or IDs to get volumes from.
working_dir (str): Path to the working directory.
Raises:
ImageNotFound: when Image not found by Podman service
APIError: when Podman service reports an error
"""
if isinstance(image, Image):
image = image.id
payload = {"image": image, "command": command}
payload.update(kwargs)
payload = self._render_payload(payload)
payload = api.prepare_body(payload)
response = self.client.post(
"/containers/create", headers={"content-type": "application/json"}, data=payload
)
response.raise_for_status(not_found=ImageNotFound)
body = response.json()
return self.get(body["Id"])
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
@staticmethod
def _render_payload(kwargs: MutableMapping[str, Any]) -> Dict[str, Any]:
"""Map create/run kwargs into body parameters."""
args = copy.copy(kwargs)
if "links" in args:
if len(args["links"]) > 0:
raise ValueError("'links' are not supported by Podman service.")
del args["links"]
# Ignore these keywords
for key in (
"cpu_count",
"cpu_percent",
"nano_cpus",
"platform", # used by caller
"remove", # used by caller
"stderr", # used by caller
"stdout", # used by caller
"stream", # used by caller
"detach", # used by caller
"volume_driver",
):
with suppress(KeyError):
del args[key]
# These keywords are not supported for various reasons.
unsupported_keys = set(args.keys()).intersection(
(
"blkio_weight",
"blkio_weight_device", # FIXME In addition to device Major/Minor include path
"device_cgroup_rules", # FIXME Where to map for Podman API?
"device_read_bps", # FIXME In addition to device Major/Minor include path
"device_read_iops", # FIXME In addition to device Major/Minor include path
"device_requests", # FIXME In addition to device Major/Minor include path
"device_write_bps", # FIXME In addition to device Major/Minor include path
"device_write_iops", # FIXME In addition to device Major/Minor include path
"devices", # FIXME In addition to device Major/Minor include path
"domainname",
"network_disabled", # FIXME Where to map for Podman API?
"storage_opt", # FIXME Where to map for Podman API?
"tmpfs", # FIXME Where to map for Podman API?
)
)
if len(unsupported_keys) > 0:
raise TypeError(
f"""Keyword(s) '{" ,".join(unsupported_keys)}' are"""
f""" currently not supported by Podman API."""
)
def pop(k):
return args.pop(k, None)
def to_bytes(size: Union[int, str, None]) -> Union[int, None]:
"""
Converts str or int to bytes.
Input can be in the following forms :
0) None - e.g. None -> returns None
1) int - e.g. 100 == 100 bytes
2) str - e.g. '100' == 100 bytes
3) str with suffix - available suffixes:
b | B - bytes
k | K = kilobytes
m | M = megabytes
g | G = gigabytes
e.g. '100m' == 104857600 bytes
"""
size_type = type(size)
if size is None:
return size
if size_type is int:
return size
if size_type is str:
try:
return int(size)
except ValueError as bad_size:
mapping = {'b': 0, 'k': 1, 'm': 2, 'g': 3}
mapping_regex = ''.join(mapping.keys())
search = re.search(rf'^(\d+)([{mapping_regex}])$', size.lower())
if search:
return int(search.group(1)) * (1024 ** mapping[search.group(2)])
raise TypeError(
f"Passed string size {size} should be in format\\d+[bBkKmMgG] (e.g."
" '100m')"
) from bad_size
else:
raise TypeError(
f"Passed size {size} should be a type of unicode, str "
f"or int (found : {size_type})"
)
# Transform keywords into parameters
params = {
"aliases": pop("aliases"), # TODO document, podman only
"annotations": pop("annotations"), # TODO document, podman only
"apparmor_profile": pop("apparmor_profile"), # TODO document, podman only
"cap_add": pop("cap_add"),
"cap_drop": pop("cap_drop"),
"cgroup_parent": pop("cgroup_parent"),
"cgroups_mode": pop("cgroups_mode"), # TODO document, podman only
"cni_networks": [pop("network")],
"command": args.pop("command", args.pop("cmd", None)),
"conmon_pid_file": pop("conmon_pid_file"), # TODO document, podman only
"containerCreateCommand": pop("containerCreateCommand"), # TODO document, podman only
"dns_options": pop("dns_opt"),
"dns_search": pop("dns_search"),
"dns_server": pop("dns"),
"entrypoint": pop("entrypoint"),
"env": pop("environment"),
"env_host": pop("env_host"), # TODO document, podman only
"expose": {},
"groups": pop("group_add"),
"healthconfig": pop("healthcheck"),
"hostadd": [],
"hostname": pop("hostname"),
"httpproxy": pop("use_config_proxy"),
"idmappings": pop("idmappings"), # TODO document, podman only
"image": pop("image"),
"image_volume_mode": pop("image_volume_mode"), # TODO document, podman only
"image_volumes": pop("image_volumes"), # TODO document, podman only
"init": pop("init"),
"init_path": pop("init_path"),
"isolation": pop("isolation"),
"labels": pop("labels"),
"log_configuration": {},
"lxc_config": pop("lxc_config"),
"mask": pop("masked_paths"),
"mounts": [],
"name": pop("name"),
"namespace": pop("namespace"), # TODO What is this for?
"network_options": pop("network_options"), # TODO document, podman only
"no_new_privileges": pop("no_new_privileges"), # TODO document, podman only
"oci_runtime": pop("runtime"),
"oom_score_adj": pop("oom_score_adj"),
"overlay_volumes": pop("overlay_volumes"), # TODO document, podman only
"portmappings": [],
"privileged": pop("privileged"),
"procfs_opts": pop("procfs_opts"), # TODO document, podman only
"publish_image_ports": pop("publish_all_ports"),
"r_limits": [],
"raw_image_name": pop("raw_image_name"), # TODO document, podman only
"read_only_filesystem": pop("read_only"),
"remove": args.pop("remove", args.pop("auto_remove", None)),
"resource_limits": {},
"rootfs": pop("rootfs"),
"rootfs_propagation": pop("rootfs_propagation"),
"sdnotifyMode": pop("sdnotifyMode"), # TODO document, podman only
"seccomp_policy": pop("seccomp_policy"), # TODO document, podman only
"seccomp_profile_path": pop("seccomp_profile_path"), # TODO document, podman only
"secrets": pop("secrets"), # TODO document, podman only
"selinux_opts": pop("security_opt"),
"shm_size": to_bytes(pop("shm_size")),
"static_ip": pop("static_ip"), # TODO document, podman only
"static_ipv6": pop("static_ipv6"), # TODO document, podman only
"static_mac": pop("mac_address"),
"stdin": pop("stdin_open"),
"stop_signal": pop("stop_signal"),
"stop_timeout": pop("stop_timeout"), # TODO document, podman only
"sysctl": pop("sysctls"),
"systemd": pop("systemd"), # TODO document, podman only
"terminal": pop("tty"),
"timezone": pop("timezone"),
"umask": pop("umask"), # TODO document, podman only
"unified": pop("unified"), # TODO document, podman only
"unmask": pop("unmasked_paths"), # TODO document, podman only
"use_image_hosts": pop("use_image_hosts"), # TODO document, podman only
"use_image_resolve_conf": pop("use_image_resolve_conf"), # TODO document, podman only
"user": pop("user"),
"version": pop("version"),
"volumes": [],
"volumes_from": pop("volumes_from"),
"work_dir": pop("working_dir"),
}
for item in args.pop("exposed_ports", []):
port, protocol = item.split("/")
params["expose"][int(port)] = protocol
for hostname, ip in args.pop("extra_hosts", {}).items():
params["hostadd"].append(f"{hostname}:{ip}")
if "log_config" in args:
params["log_configuration"]["driver"] = args["log_config"].get("Type")
if "Config" in args["log_config"]:
params["log_configuration"]["path"] = args["log_config"]["Config"].get("path")
params["log_configuration"]["size"] = args["log_config"]["Config"].get("size")
params["log_configuration"]["options"] = args["log_config"]["Config"].get(
"options"
)
args.pop("log_config")
for item in args.pop("mounts", []):
mount_point = {
"destination": item.get("target"),
"options": [],
"source": item.get("source"),
"type": item.get("type"),
}
options = []
if "read_only" in item:
options.append("ro")
if "consistency" in item:
options.append(f"consistency={item['consistency']}")
if "mode" in item:
options.append(f"mode={item['mode']}")
if "propagation" in item:
options.append(item["propagation"])
if "size" in item:
options.append(f"size={item['size']}")
mount_point["options"] = options
params["mounts"].append(mount_point)
if "pod" in args:
pod = args.pop("pod")
if isinstance(pod, Pod):
pod = pod.id
params["pod"] = pod # TODO document, podman only
for container, host in args.pop("ports", {}).items():
if "/" in container:
container_port, protocol = container.split("/")
else:
container_port, protocol = container, "tcp"
port_map = {"container_port": int(container_port), "protocol": protocol}
if host is None:
pass
elif isinstance(host, int) or isinstance(host, str) and host.isdigit():
port_map["host_port"] = int(host)
elif isinstance(host, tuple):
port_map["host_ip"] = host[0]
port_map["host_port"] = int(host[1])
elif isinstance(host, list):
raise ValueError(
"Podman API does not support multiple port bound to a single host port."
)
else:
raise ValueError(f"'ports' value of '{host}' is not supported.")
params["portmappings"].append(port_map)
if "restart_policy" in args:
params["restart_policy"] = args["restart_policy"].get("Name")
params["restart_tries"] = args["restart_policy"].get("MaximumRetryCount")
args.pop("restart_policy")
params["resource_limits"]["pids"] = {"limit": args.pop("pids_limit", None)}
params["resource_limits"]["cpu"] = {
"cpus": args.pop("cpuset_cpus", None),
"mems": args.pop("cpuset_mems", None),
"period": args.pop("cpu_period", None),
"quota": args.pop("cpu_quota", None),
"realtimePeriod": args.pop("cpu_rt_period", None),
"realtimeRuntime": args.pop("cpu_rt_runtime", None),
"shares": args.pop("cpu_shares", None),
}
params["resource_limits"]["memory"] = {
"disableOOMKiller": args.pop("oom_kill_disable", None),
"kernel": to_bytes(args.pop("kernel_memory", None)),
"kernelTCP": args.pop("kernel_memory_tcp", None),
"limit": to_bytes(args.pop("mem_limit", None)),
"reservation": to_bytes(args.pop("mem_reservation", None)),
"swap": args.pop("memswap_limit", None),
"swappiness": args.pop("mem_swappiness", None),
"useHierarchy": args.pop("mem_use_hierarchy", None),
}
for item in args.pop("ulimits", []):
params["r_limits"].append(
{
"type": item["Name"],
"hard": item["Hard"],
"soft": item["Soft"],
}
)
for item in args.pop("volumes", {}).items():
key, value = item
volume = {
"Name": key,
"Dest": value["bind"],
"Options": [value["mode"]] if "mode" in value else [],
}
params["volumes"].append(volume)
if "cgroupns" in args:
params["cgroupns"] = {"nsmode": args.pop("cgroupns")}
if "ipc_mode" in args:
params["ipcns"] = {"nsmode": args.pop("ipc_mode")}
if "network_mode" in args:
params["netns"] = {"nsmode": args.pop("network_mode")}
if "pid_mode" in args:
params["pidns"] = {"nsmode": args.pop("pid_mode")}
if "userns_mode" in args:
params["userns"] = {"nsmode": args.pop("userns_mode")}
if "uts_mode" in args:
params["utsns"] = {"nsmode": args.pop("uts_mode")}
if len(args) > 0:
raise TypeError(
"Unknown keyword argument(s): " + " ,".join(f"'{k}'" for k in args.keys())
)
return params
| en | 0.701232 | Mixin to provide Container create() method. # pylint: disable=too-few-public-methods Class providing create method for ContainersManager. Create a container. Args: image: Image to run. command: Command to run in the container. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{"Path": "device_path", "Weight": weight}]. blkio_weight (int): Block IO weight (relative weight), accepts a weight value between 10 and 1000. cap_add (List[str]): Add kernel capabilities. For example: ["SYS_ADMIN", "MKNOD"] cap_drop (List[str]): Drop kernel capabilities. cgroup_parent (str): Override the default parent cgroup. cpu_count (int): Number of usable CPUs (Windows only). cpu_percent (int): Usable percentage of the available CPUs (Windows only). cpu_period (int): The length of a CPU period in microseconds. cpu_quota (int): Microseconds of CPU time that the container can get in a CPU period. cpu_rt_period (int): Limit CPU real-time period in microseconds. cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds. cpu_shares (int): CPU shares (relative weight). cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1). cpuset_mems (str): Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. detach (bool): Run container in the background and return a Container object. device_cgroup_rules (List[str]): A list of cgroup rules to apply to the container. device_read_bps: Limit read rate (bytes per second) from a device in the form of: `[{"Path": "device_path", "Rate": rate}]` device_read_iops: Limit read rate (IO per second) from a device. device_write_bps: Limit write rate (bytes per second) from a device. device_write_iops: Limit write rate (IO per second) from a device. devices (List[str): Expose host devices to the container, as a List[str] in the form <path_on_host>:<path_in_container>:<cgroup_permissions>. For example: /dev/sda:/dev/xvda:rwm allows the container to have read-write access to the host's /dev/sda via a node named /dev/xvda inside the container. dns (List[str]): Set custom DNS servers. dns_opt (List[str]): Additional options to be added to the container's resolv.conf file. dns_search (List[str]): DNS search domains. domainname (Union[str, List[str]]): Set custom DNS search domains. entrypoint (Union[str, List[str]]): The entrypoint for the container. environment (Union[Dict[str, str], List[str]): Environment variables to set inside the container, as a dictionary or a List[str] in the format ["SOMEVARIABLE=xxx", "SOMEOTHERVARIABLE=xyz"]. extra_hosts (Dict[str, str]): Additional hostnames to resolve inside the container, as a mapping of hostname to IP address. group_add (List[str]): List of additional group names and/or IDs that the container process will run as. healthcheck (Dict[str,Any]): Specify a test to perform to check that the container is healthy. hostname (str): Optional hostname for the container. init (bool): Run an init inside the container that forwards signals and reaps processes init_path (str): Path to the docker-init binary ipc_mode (str): Set the IPC mode for the container. isolation (str): Isolation technology to use. Default: `None`. kernel_memory (int or str): Kernel memory limit labels (Union[Dict[str, str], List[str]): A dictionary of name-value labels (e.g. {"label1": "value1", "label2": "value2"}) or a list of names of labels to set with empty values (e.g. ["label1", "label2"]) links (Optional[Dict[str, str]]): Mapping of links using the {'container': 'alias'} format. The alias is optional. Containers declared in this dict will be linked to the new container using the provided alias. Default: None. log_config (LogConfig): Logging configuration. lxc_config (Dict[str, str]): LXC config. mac_address (str): MAC address to assign to the container. mem_limit (Union[int, str]): Memory limit. Accepts float values (which represent the memory limit of the created container in bytes) or a string with a units identification char (100000b, 1000k, 128m, 1g). If a string is specified without a units character, bytes are assumed as an intended unit. mem_reservation (Union[int, str]): Memory soft limit. mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number between 0 and 100. memswap_limit (Union[int, str]): Maximum amount of memory + swap a container is allowed to consume. mounts (List[Mount]): Specification for mounts to be added to the container. More powerful alternative to volumes. Each item in the list is expected to be a Mount object. name (str): The name for this container. nano_cpus (int): CPU quota in units of 1e-9 CPUs. network (str): Name of the network this container will be connected to at creation time. You can connect to additional networks using Network.connect. Incompatible with network_mode. network_disabled (bool): Disable networking. network_mode (str): One of: - bridge: Create a new network stack for the container on the bridge network. - none: No networking for this container. - container:<name|id>: Reuse another container's network stack. - host: Use the host network stack. Incompatible with network. oom_kill_disable (bool): Whether to disable OOM killer. oom_score_adj (int): An integer value containing the score given to the container in order to tune OOM killer preferences. pid_mode (str): If set to host, use the host PID namespace inside the container. pids_limit (int): Tune a container's pids limit. Set -1 for unlimited. platform (str): Platform in the format os[/arch[/variant]]. Only used if the method needs to pull the requested image. ports (Dict[str, Union[int, Tuple[str, int], List[int]]]): Ports to bind inside the container. The keys of the dictionary are the ports to bind inside the container, either as an integer or a string in the form port/protocol, where the protocol is either tcp, udp, or sctp. The values of the dictionary are the corresponding ports to open on the host, which can be either: - The port number, as an integer. For example: {'2222/tcp': 3333} will expose port 2222 inside the container as port 3333 on the host. - None, to assign a random host port. For example: {'2222/tcp': None}. - A tuple of (address, port) if you want to specify the host interface. For example: {'1111/tcp': ('127.0.0.1', 1111)}. - A list of integers, if you want to bind multiple host ports to a single container port. For example: {'1111/tcp': [1234, 4567]}. For example: {'9090': 7878, '10932/tcp': '8781', "8989/tcp": ("127.0.0.1", 9091)} privileged (bool): Give extended privileges to this container. publish_all_ports (bool): Publish all ports to the host. read_only (bool): Mount the container's root filesystem as read only. remove (bool): Remove the container when it has finished running. Default: False. restart_policy (Dict[str, Union[str, int]]): Restart the container when it exits. Configured as a dictionary with keys: - Name: One of on-failure, or always. - MaximumRetryCount: Number of times to restart the container on failure. For example: {"Name": "on-failure", "MaximumRetryCount": 5} runtime (str): Runtime to use with this container. security_opt (List[str]): A List[str]ing values to customize labels for MLS systems, such as SELinux. shm_size (Union[str, int]): Size of /dev/shm (e.g. 1G). stdin_open (bool): Keep STDIN open even if not attached. stdout (bool): Return logs from STDOUT when detach=False. Default: True. stderr (bool): Return logs from STDERR when detach=False. Default: False. stop_signal (str): The stop signal to use to stop the container (e.g. SIGINT). storage_opt (Dict[str, str]): Storage driver options per container as a key-value mapping. stream (bool): If true and detach is false, return a log generator instead of a string. Ignored if detach is true. Default: False. sysctls (Dict[str, str]): Kernel parameters to set in the container. tmpfs (Dict[str, str]): Temporary filesystems to mount, as a dictionary mapping a path inside the container to options for that path. For example: {'/mnt/vol2': '', '/mnt/vol1': 'size=3G,uid=1000'} tty (bool): Allocate a pseudo-TTY. ulimits (List[Ulimit]): Ulimits to set inside the container. use_config_proxy (bool): If True, and if the docker client configuration file (~/.config/containers/config.json by default) contains a proxy configuration, the corresponding environment variables will be set in the container being built. user (Union[str, int]): Username or UID to run commands as inside the container. userns_mode (str): Sets the user namespace mode for the container when user namespace remapping option is enabled. Supported values are: host uts_mode (str): Sets the UTS namespace mode for the container. Supported values are: host version (str): The version of the API to use. Set to auto to automatically detect the server's version. Default: 3.0.0 volume_driver (str): The name of a volume driver/plugin. volumes (Dict[str, Dict[str, str]]): A dictionary to configure volumes mounted inside the container. The key is either the host path or a volume name, and the value is a dictionary with the keys: - bind: The path to mount the volume inside the container - mode: Either rw to mount the volume read/write, or ro to mount it read-only. For example: {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'}, '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}} volumes_from (List[str]): List of container names or IDs to get volumes from. working_dir (str): Path to the working directory. Raises: ImageNotFound: when Image not found by Podman service APIError: when Podman service reports an error # pylint: disable=too-many-locals,too-many-statements,too-many-branches Map create/run kwargs into body parameters. # Ignore these keywords # used by caller # used by caller # used by caller # used by caller # used by caller # used by caller # These keywords are not supported for various reasons. # FIXME In addition to device Major/Minor include path # FIXME Where to map for Podman API? # FIXME In addition to device Major/Minor include path # FIXME In addition to device Major/Minor include path # FIXME In addition to device Major/Minor include path # FIXME In addition to device Major/Minor include path # FIXME In addition to device Major/Minor include path # FIXME In addition to device Major/Minor include path # FIXME Where to map for Podman API? # FIXME Where to map for Podman API? # FIXME Where to map for Podman API? Keyword(s) '{" ,".join(unsupported_keys)}' are currently not supported by Podman API. Converts str or int to bytes. Input can be in the following forms : 0) None - e.g. None -> returns None 1) int - e.g. 100 == 100 bytes 2) str - e.g. '100' == 100 bytes 3) str with suffix - available suffixes: b | B - bytes k | K = kilobytes m | M = megabytes g | G = gigabytes e.g. '100m' == 104857600 bytes # Transform keywords into parameters # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO What is this for? # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only # TODO document, podman only | 2.315223 | 2 |
CAM.py | Harry24k/CAM | 1 | 6630189 | import numpy as np
import torch
import torch.nn as nn
import torchvision.utils
r"""
Implementation of CAM
Arguments:
model (nn.Module): a model with one GAP(Global Average Pooling) and one FC(Fully-Connected).
images (torch.tensor): input images of (batch_size, n_channel, height, width).
last_conv_name (str) : the name of the last convolutional layer of the model.
fc_name (str) : the name of the last fully-connected layer of the model.
label (list): According to the label, activated area will be changed.
* Default : None (It will be automatically determined by predicted label)
* Warning : It has to be same size as the batch_size of the input images.
normalize (Bool) : Normalized output will be returned if it is True.
* Default : True (The output have a value between 0 and 255)
resize (Bool) : Resized output will be returned.
* Default : True (The output will be resized as same as the input images)
.. note:: it is modified from "https://github.com/metalbubble/CAM/blob/master/pytorch_CAM.py"
"""
def CAM(model, images, last_conv_name, fc_name, label=None, normalize=True, resize=True) :
device = next(model.parameters()).device
size = images.shape[-2:]
# 가장 마지막 Conv Layer의 Output 가져오기
last_conv_features = []
def hook_feature(module, input, output):
last_conv_features.append(output.data)
# inception5b가 Output을 출력할 때마다 hook_feature을 호출
model._modules.get(last_conv_name).register_forward_hook(hook_feature)
# FC Layer의 weight을 가져오기
params = dict(getattr(model, fc_name).named_parameters())
weight_softmax = params['weight'].data
# eval 모드에서 forward 진행
model.eval()
feature = model(images.to(device))
# 예측값 가져오기
_, pre = feature.max(dim=1)
conv_feature = last_conv_features[0]
b, nc, h, w = conv_feature.shape
if label is None :
label = pre
cam = torch.bmm(weight_softmax[label].reshape(b, 1, nc), conv_feature.reshape((b, nc, h*w)))
cam = cam.reshape(b, 1, h, w)
# Min-Max Normalization
if normalize :
cam = (cam - cam.min()) / (cam.max()- cam.min())
cam = (255 * cam).int().float()
# Resize
if resize :
cam = nn.UpsamplingBilinear2d(size=size)(cam)
return cam, pre | import numpy as np
import torch
import torch.nn as nn
import torchvision.utils
r"""
Implementation of CAM
Arguments:
model (nn.Module): a model with one GAP(Global Average Pooling) and one FC(Fully-Connected).
images (torch.tensor): input images of (batch_size, n_channel, height, width).
last_conv_name (str) : the name of the last convolutional layer of the model.
fc_name (str) : the name of the last fully-connected layer of the model.
label (list): According to the label, activated area will be changed.
* Default : None (It will be automatically determined by predicted label)
* Warning : It has to be same size as the batch_size of the input images.
normalize (Bool) : Normalized output will be returned if it is True.
* Default : True (The output have a value between 0 and 255)
resize (Bool) : Resized output will be returned.
* Default : True (The output will be resized as same as the input images)
.. note:: it is modified from "https://github.com/metalbubble/CAM/blob/master/pytorch_CAM.py"
"""
def CAM(model, images, last_conv_name, fc_name, label=None, normalize=True, resize=True) :
device = next(model.parameters()).device
size = images.shape[-2:]
# 가장 마지막 Conv Layer의 Output 가져오기
last_conv_features = []
def hook_feature(module, input, output):
last_conv_features.append(output.data)
# inception5b가 Output을 출력할 때마다 hook_feature을 호출
model._modules.get(last_conv_name).register_forward_hook(hook_feature)
# FC Layer의 weight을 가져오기
params = dict(getattr(model, fc_name).named_parameters())
weight_softmax = params['weight'].data
# eval 모드에서 forward 진행
model.eval()
feature = model(images.to(device))
# 예측값 가져오기
_, pre = feature.max(dim=1)
conv_feature = last_conv_features[0]
b, nc, h, w = conv_feature.shape
if label is None :
label = pre
cam = torch.bmm(weight_softmax[label].reshape(b, 1, nc), conv_feature.reshape((b, nc, h*w)))
cam = cam.reshape(b, 1, h, w)
# Min-Max Normalization
if normalize :
cam = (cam - cam.min()) / (cam.max()- cam.min())
cam = (255 * cam).int().float()
# Resize
if resize :
cam = nn.UpsamplingBilinear2d(size=size)(cam)
return cam, pre | en | 0.734249 | Implementation of CAM Arguments: model (nn.Module): a model with one GAP(Global Average Pooling) and one FC(Fully-Connected). images (torch.tensor): input images of (batch_size, n_channel, height, width). last_conv_name (str) : the name of the last convolutional layer of the model. fc_name (str) : the name of the last fully-connected layer of the model. label (list): According to the label, activated area will be changed. * Default : None (It will be automatically determined by predicted label) * Warning : It has to be same size as the batch_size of the input images. normalize (Bool) : Normalized output will be returned if it is True. * Default : True (The output have a value between 0 and 255) resize (Bool) : Resized output will be returned. * Default : True (The output will be resized as same as the input images) .. note:: it is modified from "https://github.com/metalbubble/CAM/blob/master/pytorch_CAM.py" # 가장 마지막 Conv Layer의 Output 가져오기 # inception5b가 Output을 출력할 때마다 hook_feature을 호출 # FC Layer의 weight을 가져오기 # eval 모드에서 forward 진행 # 예측값 가져오기 # Min-Max Normalization # Resize | 2.980266 | 3 |
poseidon/base/MQHelper.py | peterkang2001/Poseidon | 2 | 6630190 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: kangliang
date: 2019-05-08
"""
import logging
import base64
from poseidon.api.RequestsHelper import Requests
class MessageQueue:
request = Requests()
def getRabbitMqConfig(self, configKey):
if True:
mqConfig = ""
else:
mqConfig = None
logging.error("请输入正确的configKey")
logging.info("获取mq配置信息:{0}".format(mqConfig))
return mqConfig
def cleanRabbitMqQueueMessages(self, configKey):
"""
删除指定消息队列中的所有数据
:param configKey: 在base.py中配置文件中关于消息队列的key
:return:
"""
configInfo = self.getRabbitMqConfig(configKey)
userName = configInfo.get("username" , False)
password = configInfo.get("password" , False)
server = configInfo.get("server" , False)
port = configInfo.get("port" , False)
vhost = configInfo.get("vhost" , False)
queue = configInfo.get("queue" , False)
headers = ['authorization:{}'.format(self.getBasicAuth(userName, password))]
url = "http://{0}:{1}/api/queues/{2}/{3}/contents".format(server, port, vhost, queue)
# resp = self.request.sendRequest(url=url, method="DELETE", headers=headers,needJson=False, httpStatusExp=204)
resp = self.request.sendRequest(url=url, method="DELETE", headers=headers,needJson=False)
# 由于curl暂时不支持status_code 先注释掉
# if resp.status_code == 204: # 返回状态为204表示请求成功
# logging.info('status_code is 204, 清除RabbitMq消息队列成功')
# return True
# else:
# logging.info('status_code is {0}, 清除RabbitMq消息队列失败', resp.status_code)
# return False
def getRabbitMqQueueMessages(self, configKey):
"""
返回存在消息队列中的数据,
注意事项:
1.当消息队列中没有数据会返回一个空list
2.调用此方法不会影响消息队列中的数据
:return:list
"""
configInfo = self.getRabbitMqConfig(configKey)
userName = configInfo.get("username", False)
password = configInfo.get("password", False)
server = configInfo.get("server", False)
port = configInfo.get("port", False)
vhost = configInfo.get("vhost", False)
queue = configInfo.get("queue", False)
headers = ['authorization:{}'.format(self.getBasicAuth(userName, password))]
url = "http://{0}:{1}/api/queues/{2}/{3}/get".format(server, port, vhost, queue)
# data = {"count": 5000, "requeue": True, "encoding": "auto", "truncate": 50000} # 旧版本MQ
data = {"count":5,"ackmode":"ack_requeue_true","encoding":"auto","truncate":50000} # 新版本MQ
resp = self.request.sendRequest(url=url, method="POST", headers=headers, data=data, needJson=True, httpStatusExp=200)
return resp
def getBasicAuth(self, userName, password):
"""
返回BasicAuth的值
:param userName:
:param password:
:return:
"""
token = "{0}:{1}".format(userName, password)
base64string = base64.encodestring(token.encode(encoding="utf-8"))[:-1]
authheader = "Basic %s" % base64string.decode(encoding="utf-8")
return authheader
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: kangliang
date: 2019-05-08
"""
import logging
import base64
from poseidon.api.RequestsHelper import Requests
class MessageQueue:
request = Requests()
def getRabbitMqConfig(self, configKey):
if True:
mqConfig = ""
else:
mqConfig = None
logging.error("请输入正确的configKey")
logging.info("获取mq配置信息:{0}".format(mqConfig))
return mqConfig
def cleanRabbitMqQueueMessages(self, configKey):
"""
删除指定消息队列中的所有数据
:param configKey: 在base.py中配置文件中关于消息队列的key
:return:
"""
configInfo = self.getRabbitMqConfig(configKey)
userName = configInfo.get("username" , False)
password = configInfo.get("password" , False)
server = configInfo.get("server" , False)
port = configInfo.get("port" , False)
vhost = configInfo.get("vhost" , False)
queue = configInfo.get("queue" , False)
headers = ['authorization:{}'.format(self.getBasicAuth(userName, password))]
url = "http://{0}:{1}/api/queues/{2}/{3}/contents".format(server, port, vhost, queue)
# resp = self.request.sendRequest(url=url, method="DELETE", headers=headers,needJson=False, httpStatusExp=204)
resp = self.request.sendRequest(url=url, method="DELETE", headers=headers,needJson=False)
# 由于curl暂时不支持status_code 先注释掉
# if resp.status_code == 204: # 返回状态为204表示请求成功
# logging.info('status_code is 204, 清除RabbitMq消息队列成功')
# return True
# else:
# logging.info('status_code is {0}, 清除RabbitMq消息队列失败', resp.status_code)
# return False
def getRabbitMqQueueMessages(self, configKey):
"""
返回存在消息队列中的数据,
注意事项:
1.当消息队列中没有数据会返回一个空list
2.调用此方法不会影响消息队列中的数据
:return:list
"""
configInfo = self.getRabbitMqConfig(configKey)
userName = configInfo.get("username", False)
password = configInfo.get("password", False)
server = configInfo.get("server", False)
port = configInfo.get("port", False)
vhost = configInfo.get("vhost", False)
queue = configInfo.get("queue", False)
headers = ['authorization:{}'.format(self.getBasicAuth(userName, password))]
url = "http://{0}:{1}/api/queues/{2}/{3}/get".format(server, port, vhost, queue)
# data = {"count": 5000, "requeue": True, "encoding": "auto", "truncate": 50000} # 旧版本MQ
data = {"count":5,"ackmode":"ack_requeue_true","encoding":"auto","truncate":50000} # 新版本MQ
resp = self.request.sendRequest(url=url, method="POST", headers=headers, data=data, needJson=True, httpStatusExp=200)
return resp
def getBasicAuth(self, userName, password):
"""
返回BasicAuth的值
:param userName:
:param password:
:return:
"""
token = "{0}:{1}".format(userName, password)
base64string = base64.encodestring(token.encode(encoding="utf-8"))[:-1]
authheader = "Basic %s" % base64string.decode(encoding="utf-8")
return authheader
| zh | 0.365511 | #!/usr/bin/env python # -*- coding: utf-8 -*- Author: kangliang date: 2019-05-08 删除指定消息队列中的所有数据 :param configKey: 在base.py中配置文件中关于消息队列的key :return: # resp = self.request.sendRequest(url=url, method="DELETE", headers=headers,needJson=False, httpStatusExp=204) # 由于curl暂时不支持status_code 先注释掉 # if resp.status_code == 204: # 返回状态为204表示请求成功 # logging.info('status_code is 204, 清除RabbitMq消息队列成功') # return True # else: # logging.info('status_code is {0}, 清除RabbitMq消息队列失败', resp.status_code) # return False 返回存在消息队列中的数据, 注意事项: 1.当消息队列中没有数据会返回一个空list 2.调用此方法不会影响消息队列中的数据 :return:list # data = {"count": 5000, "requeue": True, "encoding": "auto", "truncate": 50000} # 旧版本MQ # 新版本MQ 返回BasicAuth的值 :param userName: :param password: :return: | 2.428764 | 2 |
AbhishekManjaro/weather.py | munagekar/MJRDarkConcky | 3 | 6630191 | <gh_stars>1-10
import urllib,json
url = "https://query.yahooapis.com/v1/public/yql?format=json&q=select+title%2C+units.temperature%2C+item.forecast%0Afrom+weather.forecast%0Awhere+woeid+in+%28select+woeid+from+geo.places+where+text%3D%22Katraj%2C+India%22%29%0Aand+u+%3D+%27C%27%0Alimit+5%0A%7C%0Asort%28field%3D%22item.forecast.date%22%2C+descending%3D%22false%22%29%0A%3B"
response = urllib.urlopen(url)
data =json.loads(response.read())
file = open('conkyweather.txt','w')
for entryitem in ["day","code","high","low"]:
for i in range(4):
file.write(data["query"]["results"]["channel"][i]['item']['forecast'][entryitem]+"\n")
file.close()
| import urllib,json
url = "https://query.yahooapis.com/v1/public/yql?format=json&q=select+title%2C+units.temperature%2C+item.forecast%0Afrom+weather.forecast%0Awhere+woeid+in+%28select+woeid+from+geo.places+where+text%3D%22Katraj%2C+India%22%29%0Aand+u+%3D+%27C%27%0Alimit+5%0A%7C%0Asort%28field%3D%22item.forecast.date%22%2C+descending%3D%22false%22%29%0A%3B"
response = urllib.urlopen(url)
data =json.loads(response.read())
file = open('conkyweather.txt','w')
for entryitem in ["day","code","high","low"]:
for i in range(4):
file.write(data["query"]["results"]["channel"][i]['item']['forecast'][entryitem]+"\n")
file.close() | none | 1 | 2.916437 | 3 |
|
Documentation/ManualSource/wikicmd/MoinMoin/action/twikidraw.py | sleyzerzon/soar | 1 | 6630192 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - twikidraw
This action is used to call twikidraw
@copyright: 2001 by <NAME> (<EMAIL>),
2001-2004 by <NAME> <<EMAIL>>,
2005 MoinMoin:AlexanderSchremmer,
2005 DiegoOngaro at ETSZONE (<EMAIL>),
2007-2008 MoinMoin:ThomasWaldmann,
2005-2009 MoinMoin:ReimarBauer,
@license: GNU GPL, see COPYING for details.
"""
import os, re
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin import wikiutil, config
from MoinMoin.action import AttachFile, do_show
from MoinMoin.action.AttachFile import _write_stream
from MoinMoin.security.textcha import TextCha
action_name = __name__.split('.')[-1]
def gedit_drawing(self, url, text, **kw):
# This is called for displaying a drawing image by gui editor.
_ = self.request.getText
# TODO: this 'text' argument is kind of superfluous, replace by using alt=... kw arg
# ToDo: make this clickable for the gui editor
if 'alt' not in kw or not kw['alt']:
kw['alt'] = text
# we force the title here, needed later for html>wiki converter
kw['title'] = "drawing:%s" % wikiutil.quoteWikinameURL(url)
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request)
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
kw['src'] = ci.member_url('drawing.png')
return self.image(**kw)
def attachment_drawing(self, url, text, **kw):
# This is called for displaying a clickable drawing image by text_html formatter.
# XXX text arg is unused!
_ = self.request.getText
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request, do='modify')
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
title = _('Edit drawing %(filename)s (opens in new window)') % {'filename': self.text(containername)}
kw['src'] = src = ci.member_url('drawing.png')
kw['css'] = 'drawing'
try:
mapfile = ci.get('drawing.map')
map = mapfile.read()
mapfile.close()
map = map.decode(config.charset)
except (KeyError, IOError, OSError):
map = u''
if map:
# we have a image map. inline it and add a map ref to the img tag
# we have also to set a unique ID
mapid = u'ImageMapOf%s%s' % (self.request.uid_generator(pagename), drawing)
map = map.replace(u'%MAPNAME%', mapid)
# add alt and title tags to areas
map = re.sub(ur'href\s*=\s*"((?!%TWIKIDRAW%).+?)"', ur'href="\1" alt="\1" title="\1"', map)
map = map.replace(u'%TWIKIDRAW%"', u'%s" alt="%s" title="%s"' % (
wikiutil.escape(drawing_url, 1), title, title))
# unxml, because 4.01 concrete will not validate />
map = map.replace(u'/>', u'>')
title = _('Clickable drawing: %(filename)s') % {'filename': self.text(containername)}
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
kw['usemap'] = '#'+mapid
return self.url(1, drawing_url) + map + self.image(**kw) + self.url(0)
else:
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
return self.url(1, drawing_url) + self.image(**kw) + self.url(0)
class TwikiDraw(object):
""" twikidraw action """
def __init__(self, request, pagename, target):
self.request = request
self.pagename = pagename
self.target = target
def save(self):
request = self.request
_ = request.getText
if not wikiutil.checkTicket(request, request.args.get('ticket', '')):
return _('Please use the interactive user interface to use action %(actionname)s!') % {'actionname': 'twikidraw.save' }
pagename = self.pagename
target = self.target
if not request.user.may.write(pagename):
return _('You are not allowed to save a drawing on this page.')
if not target:
return _("Empty target name given.")
file_upload = request.files.get('filepath')
if not file_upload:
# This might happen when trying to upload file names
# with non-ascii characters on Safari.
return _("No file content. Delete non ASCII characters from the file name and try again.")
filename = request.form['filename']
basepath, basename = os.path.split(filename)
basename, ext = os.path.splitext(basename)
ci = AttachFile.ContainerItem(request, pagename, target)
filecontent = file_upload.stream
content_length = None
if ext == '.draw': # TWikiDraw POSTs this first
AttachFile._addLogEntry(request, 'ATTDRW', pagename, target)
ci.truncate()
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.replace("\r", "")
elif ext == '.map':
# touch attachment directory to invalidate cache if new map is saved
attach_dir = AttachFile.getAttachDir(request, pagename)
os.utime(attach_dir, None)
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.strip()
else:
#content_length = file_upload.content_length
# XXX gives -1 for wsgiref :( If this is fixed, we could use the file obj,
# without reading it into memory completely:
filecontent = filecontent.read()
ci.put('drawing' + ext, filecontent, content_length)
def render(self):
request = self.request
_ = request.getText
pagename = self.pagename
target = self.target
if not request.user.may.read(pagename):
return _('You are not allowed to view attachments of this page.')
if not target:
return _("Empty target name given.")
ci = AttachFile.ContainerItem(request, pagename, target)
if ci.exists():
drawurl = ci.member_url('drawing.draw')
pngurl = ci.member_url('drawing.png')
else:
drawurl = 'drawing.draw'
pngurl = 'drawing.png'
pageurl = request.href(pagename)
saveurl = request.href(pagename, action=action_name, do='save', target=target,
ticket=wikiutil.createTicket(request))
helpurl = request.href("HelpOnActions/AttachFile")
html = """
<p>
<applet code="CH.ifa.draw.twiki.TWikiDraw.class"
archive="%(htdocs)s/applets/TWikiDrawPlugin/twikidraw.jar" width="640" height="480">
<param name="drawpath" value="%(drawurl)s">
<param name="pngpath" value="%(pngurl)s">
<param name="savepath" value="%(saveurl)s">
<param name="basename" value="%(basename)s">
<param name="viewpath" value="%(pageurl)s">
<param name="helppath" value="%(helpurl)s">
<strong>NOTE:</strong> You need a Java enabled browser to edit the drawing.
</applet>
</p>
""" % dict(
htdocs=request.cfg.url_prefix_static,
basename=wikiutil.escape(target, 1),
drawurl=wikiutil.escape(drawurl, 1),
pngurl=wikiutil.escape(pngurl, 1),
pageurl=wikiutil.escape(pageurl, 1),
saveurl=wikiutil.escape(saveurl, 1),
helpurl=wikiutil.escape(helpurl, 1),
)
title = "%s %s:%s" % (_("Edit drawing"), pagename, target)
request.theme.send_title(title, page=request.page, pagename=pagename)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.rawHTML(html))
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
def execute(pagename, request):
target = request.values.get('target')
twd = TwikiDraw(request, pagename, target)
do = request.values.get('do')
if do == 'save':
msg = twd.save()
else:
msg = twd.render()
if msg:
request.theme.add_msg(msg, 'error')
do_show(pagename, request)
| # -*- coding: iso-8859-1 -*-
"""
MoinMoin - twikidraw
This action is used to call twikidraw
@copyright: 2001 by <NAME> (<EMAIL>),
2001-2004 by <NAME> <<EMAIL>>,
2005 MoinMoin:AlexanderSchremmer,
2005 DiegoOngaro at ETSZONE (<EMAIL>),
2007-2008 MoinMoin:ThomasWaldmann,
2005-2009 MoinMoin:ReimarBauer,
@license: GNU GPL, see COPYING for details.
"""
import os, re
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin import wikiutil, config
from MoinMoin.action import AttachFile, do_show
from MoinMoin.action.AttachFile import _write_stream
from MoinMoin.security.textcha import TextCha
action_name = __name__.split('.')[-1]
def gedit_drawing(self, url, text, **kw):
# This is called for displaying a drawing image by gui editor.
_ = self.request.getText
# TODO: this 'text' argument is kind of superfluous, replace by using alt=... kw arg
# ToDo: make this clickable for the gui editor
if 'alt' not in kw or not kw['alt']:
kw['alt'] = text
# we force the title here, needed later for html>wiki converter
kw['title'] = "drawing:%s" % wikiutil.quoteWikinameURL(url)
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request)
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
kw['src'] = ci.member_url('drawing.png')
return self.image(**kw)
def attachment_drawing(self, url, text, **kw):
# This is called for displaying a clickable drawing image by text_html formatter.
# XXX text arg is unused!
_ = self.request.getText
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request, do='modify')
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
title = _('Edit drawing %(filename)s (opens in new window)') % {'filename': self.text(containername)}
kw['src'] = src = ci.member_url('drawing.png')
kw['css'] = 'drawing'
try:
mapfile = ci.get('drawing.map')
map = mapfile.read()
mapfile.close()
map = map.decode(config.charset)
except (KeyError, IOError, OSError):
map = u''
if map:
# we have a image map. inline it and add a map ref to the img tag
# we have also to set a unique ID
mapid = u'ImageMapOf%s%s' % (self.request.uid_generator(pagename), drawing)
map = map.replace(u'%MAPNAME%', mapid)
# add alt and title tags to areas
map = re.sub(ur'href\s*=\s*"((?!%TWIKIDRAW%).+?)"', ur'href="\1" alt="\1" title="\1"', map)
map = map.replace(u'%TWIKIDRAW%"', u'%s" alt="%s" title="%s"' % (
wikiutil.escape(drawing_url, 1), title, title))
# unxml, because 4.01 concrete will not validate />
map = map.replace(u'/>', u'>')
title = _('Clickable drawing: %(filename)s') % {'filename': self.text(containername)}
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
kw['usemap'] = '#'+mapid
return self.url(1, drawing_url) + map + self.image(**kw) + self.url(0)
else:
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
return self.url(1, drawing_url) + self.image(**kw) + self.url(0)
class TwikiDraw(object):
""" twikidraw action """
def __init__(self, request, pagename, target):
self.request = request
self.pagename = pagename
self.target = target
def save(self):
request = self.request
_ = request.getText
if not wikiutil.checkTicket(request, request.args.get('ticket', '')):
return _('Please use the interactive user interface to use action %(actionname)s!') % {'actionname': 'twikidraw.save' }
pagename = self.pagename
target = self.target
if not request.user.may.write(pagename):
return _('You are not allowed to save a drawing on this page.')
if not target:
return _("Empty target name given.")
file_upload = request.files.get('filepath')
if not file_upload:
# This might happen when trying to upload file names
# with non-ascii characters on Safari.
return _("No file content. Delete non ASCII characters from the file name and try again.")
filename = request.form['filename']
basepath, basename = os.path.split(filename)
basename, ext = os.path.splitext(basename)
ci = AttachFile.ContainerItem(request, pagename, target)
filecontent = file_upload.stream
content_length = None
if ext == '.draw': # TWikiDraw POSTs this first
AttachFile._addLogEntry(request, 'ATTDRW', pagename, target)
ci.truncate()
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.replace("\r", "")
elif ext == '.map':
# touch attachment directory to invalidate cache if new map is saved
attach_dir = AttachFile.getAttachDir(request, pagename)
os.utime(attach_dir, None)
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.strip()
else:
#content_length = file_upload.content_length
# XXX gives -1 for wsgiref :( If this is fixed, we could use the file obj,
# without reading it into memory completely:
filecontent = filecontent.read()
ci.put('drawing' + ext, filecontent, content_length)
def render(self):
request = self.request
_ = request.getText
pagename = self.pagename
target = self.target
if not request.user.may.read(pagename):
return _('You are not allowed to view attachments of this page.')
if not target:
return _("Empty target name given.")
ci = AttachFile.ContainerItem(request, pagename, target)
if ci.exists():
drawurl = ci.member_url('drawing.draw')
pngurl = ci.member_url('drawing.png')
else:
drawurl = 'drawing.draw'
pngurl = 'drawing.png'
pageurl = request.href(pagename)
saveurl = request.href(pagename, action=action_name, do='save', target=target,
ticket=wikiutil.createTicket(request))
helpurl = request.href("HelpOnActions/AttachFile")
html = """
<p>
<applet code="CH.ifa.draw.twiki.TWikiDraw.class"
archive="%(htdocs)s/applets/TWikiDrawPlugin/twikidraw.jar" width="640" height="480">
<param name="drawpath" value="%(drawurl)s">
<param name="pngpath" value="%(pngurl)s">
<param name="savepath" value="%(saveurl)s">
<param name="basename" value="%(basename)s">
<param name="viewpath" value="%(pageurl)s">
<param name="helppath" value="%(helpurl)s">
<strong>NOTE:</strong> You need a Java enabled browser to edit the drawing.
</applet>
</p>
""" % dict(
htdocs=request.cfg.url_prefix_static,
basename=wikiutil.escape(target, 1),
drawurl=wikiutil.escape(drawurl, 1),
pngurl=wikiutil.escape(pngurl, 1),
pageurl=wikiutil.escape(pageurl, 1),
saveurl=wikiutil.escape(saveurl, 1),
helpurl=wikiutil.escape(helpurl, 1),
)
title = "%s %s:%s" % (_("Edit drawing"), pagename, target)
request.theme.send_title(title, page=request.page, pagename=pagename)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.rawHTML(html))
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
def execute(pagename, request):
target = request.values.get('target')
twd = TwikiDraw(request, pagename, target)
do = request.values.get('do')
if do == 'save':
msg = twd.save()
else:
msg = twd.render()
if msg:
request.theme.add_msg(msg, 'error')
do_show(pagename, request)
| en | 0.697322 | # -*- coding: iso-8859-1 -*- MoinMoin - twikidraw This action is used to call twikidraw @copyright: 2001 by <NAME> (<EMAIL>), 2001-2004 by <NAME> <<EMAIL>>, 2005 MoinMoin:AlexanderSchremmer, 2005 DiegoOngaro at ETSZONE (<EMAIL>), 2007-2008 MoinMoin:ThomasWaldmann, 2005-2009 MoinMoin:ReimarBauer, @license: GNU GPL, see COPYING for details. # This is called for displaying a drawing image by gui editor. # TODO: this 'text' argument is kind of superfluous, replace by using alt=... kw arg # ToDo: make this clickable for the gui editor # we force the title here, needed later for html>wiki converter # TODO: we need a new "drawimg" in similar grey style and size # This is called for displaying a clickable drawing image by text_html formatter. # XXX text arg is unused! # TODO: we need a new "drawimg" in similar grey style and size # we have a image map. inline it and add a map ref to the img tag # we have also to set a unique ID # add alt and title tags to areas # unxml, because 4.01 concrete will not validate /> twikidraw action # This might happen when trying to upload file names # with non-ascii characters on Safari. # TWikiDraw POSTs this first # read file completely into memory # touch attachment directory to invalidate cache if new map is saved # read file completely into memory #content_length = file_upload.content_length # XXX gives -1 for wsgiref :( If this is fixed, we could use the file obj, # without reading it into memory completely: <p> <applet code="CH.ifa.draw.twiki.TWikiDraw.class" archive="%(htdocs)s/applets/TWikiDrawPlugin/twikidraw.jar" width="640" height="480"> <param name="drawpath" value="%(drawurl)s"> <param name="pngpath" value="%(pngurl)s"> <param name="savepath" value="%(saveurl)s"> <param name="basename" value="%(basename)s"> <param name="viewpath" value="%(pageurl)s"> <param name="helppath" value="%(helpurl)s"> <strong>NOTE:</strong> You need a Java enabled browser to edit the drawing. </applet> </p> | 2.429481 | 2 |
rpilcd2menu/rpilcd2menu.py | SNOC/rpilcd2 | 0 | 6630193 | import time
# OLED screen
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
# offscreen
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = 25
# Note the following are only used with SPI:
DC = 24
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# buttons
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.IN) # SW1
GPIO.setup(17, GPIO.IN) # SW2
GPIO.setup(18, GPIO.IN) # SW4
GPIO.setup(27, GPIO.IN) # SW3
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
padding = 2
top = padding
bottom = height-padding
x = padding
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Load default font.
font = ImageFont.load_default()
def drawMenuItem(str, selected, x, y, displayWidth, imageWidth):
sizeX, sizeY = draw.textsize(str)
if selected :
draw.rectangle((x, y, x + sizeX, y + sizeY), outline=255, fill=255)
draw.text((x, y), str, font=font, fill=0)
else :
draw.text((x, y), str, font=font, fill=255)
if imageWidth > (displayWidth - x - sizeX) :
imageWidth = displayWidth - x - sizeX
return y + sizeY, imageWidth
selectedItem = 1
itemCount = 5
imageWidth = disp.width
updateMenu = True
updateDisplay = True
repeatDecounter=0
print('Press Ctrl-C to quit.')
while True:
if not GPIO.input(4): # SW1 - selection UP
if selectedItem > 1 :
selectedItem -= 1
updateMenu = True
repeatDecounter=15
elif not GPIO.input(27) : # SW3 - selection DOWN
if selectedItem < itemCount:
selectedItem += 1
updateMenu = True
repeatDecounter=15
elif not GPIO.input(18) : # SW4 - display an image named 1.png if item 1 is selected, 2.png if item 2 is selected, etc.
readImage = Image.open('{0}.png'.format(selectedItem)).convert('1')
aspectRatio = readImage.size[1] / (readImage.size[0] * 1.0)
if aspectRatio < 1.0 and readImage.size[0] > imageWidth :
readImage = readImage.resize((imageWidth, (int) (imageWidth * aspectRatio)), Image.ANTIALIAS)
elif readImage.size[1] > disp.height :
readImage = readImage.resize(((int) (disp.height / aspectRatio), disp.height), Image.ANTIALIAS)
hFill = (imageWidth - readImage.size[0]) / 2
vFill = (disp.height - readImage.size[1]) / 2
draw.rectangle((disp.width - imageWidth, 0, disp.width, disp.height), outline=0, fill=0)
image.paste(readImage, (disp.width - imageWidth + hFill, vFill, disp.width - imageWidth + hFill + readImage.size[0], vFill + readImage.size[1]))
updateDisplay = True
elif not GPIO.input(17) : # SW2
draw.rectangle((disp.width - imageWidth, 0, disp.width, disp.height), outline=0, fill=0)
updateDisplay = True
if updateMenu :
updateMenu = False
updateDisplay = True
# Draw a black filled box to clear the image.
draw.rectangle((0,0,disp.width - imageWidth,height), outline=0, fill=0)
# Draw menu
starty = top
starty, imageWidth = drawMenuItem('Chat 1', selectedItem == 1, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('Chat 2', selectedItem == 2, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('SNOC', selectedItem == 3, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('Yadom', selectedItem == 4, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('Renard', selectedItem == 5, x, starty, disp.width, imageWidth)
if updateDisplay :
# Display imageFalse
updateDisplay = False
disp.image(image)
disp.display()
# wait all buttons are released
while ((not GPIO.input(4)) or (not GPIO.input(27))) and (repeatDecounter != 0) :
time.sleep(0.05)
if repeatDecounter != 0:
repeatDecounter -= 1
while (not GPIO.input(18)) or (not GPIO.input(17)) :
time.sleep(0.05)
| import time
# OLED screen
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
# offscreen
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = 25
# Note the following are only used with SPI:
DC = 24
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# buttons
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.IN) # SW1
GPIO.setup(17, GPIO.IN) # SW2
GPIO.setup(18, GPIO.IN) # SW4
GPIO.setup(27, GPIO.IN) # SW3
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
padding = 2
top = padding
bottom = height-padding
x = padding
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Load default font.
font = ImageFont.load_default()
def drawMenuItem(str, selected, x, y, displayWidth, imageWidth):
sizeX, sizeY = draw.textsize(str)
if selected :
draw.rectangle((x, y, x + sizeX, y + sizeY), outline=255, fill=255)
draw.text((x, y), str, font=font, fill=0)
else :
draw.text((x, y), str, font=font, fill=255)
if imageWidth > (displayWidth - x - sizeX) :
imageWidth = displayWidth - x - sizeX
return y + sizeY, imageWidth
selectedItem = 1
itemCount = 5
imageWidth = disp.width
updateMenu = True
updateDisplay = True
repeatDecounter=0
print('Press Ctrl-C to quit.')
while True:
if not GPIO.input(4): # SW1 - selection UP
if selectedItem > 1 :
selectedItem -= 1
updateMenu = True
repeatDecounter=15
elif not GPIO.input(27) : # SW3 - selection DOWN
if selectedItem < itemCount:
selectedItem += 1
updateMenu = True
repeatDecounter=15
elif not GPIO.input(18) : # SW4 - display an image named 1.png if item 1 is selected, 2.png if item 2 is selected, etc.
readImage = Image.open('{0}.png'.format(selectedItem)).convert('1')
aspectRatio = readImage.size[1] / (readImage.size[0] * 1.0)
if aspectRatio < 1.0 and readImage.size[0] > imageWidth :
readImage = readImage.resize((imageWidth, (int) (imageWidth * aspectRatio)), Image.ANTIALIAS)
elif readImage.size[1] > disp.height :
readImage = readImage.resize(((int) (disp.height / aspectRatio), disp.height), Image.ANTIALIAS)
hFill = (imageWidth - readImage.size[0]) / 2
vFill = (disp.height - readImage.size[1]) / 2
draw.rectangle((disp.width - imageWidth, 0, disp.width, disp.height), outline=0, fill=0)
image.paste(readImage, (disp.width - imageWidth + hFill, vFill, disp.width - imageWidth + hFill + readImage.size[0], vFill + readImage.size[1]))
updateDisplay = True
elif not GPIO.input(17) : # SW2
draw.rectangle((disp.width - imageWidth, 0, disp.width, disp.height), outline=0, fill=0)
updateDisplay = True
if updateMenu :
updateMenu = False
updateDisplay = True
# Draw a black filled box to clear the image.
draw.rectangle((0,0,disp.width - imageWidth,height), outline=0, fill=0)
# Draw menu
starty = top
starty, imageWidth = drawMenuItem('Chat 1', selectedItem == 1, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('Chat 2', selectedItem == 2, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('SNOC', selectedItem == 3, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('Yadom', selectedItem == 4, x, starty, disp.width, imageWidth)
starty, imageWidth = drawMenuItem('Renard', selectedItem == 5, x, starty, disp.width, imageWidth)
if updateDisplay :
# Display imageFalse
updateDisplay = False
disp.image(image)
disp.display()
# wait all buttons are released
while ((not GPIO.input(4)) or (not GPIO.input(27))) and (repeatDecounter != 0) :
time.sleep(0.05)
if repeatDecounter != 0:
repeatDecounter -= 1
while (not GPIO.input(18)) or (not GPIO.input(17)) :
time.sleep(0.05)
| en | 0.736387 | # OLED screen # offscreen # Raspberry Pi pin configuration: # Note the following are only used with SPI: # 128x64 display with hardware SPI: # buttons # SW1 # SW2 # SW4 # SW3 # Initialize library. # Clear display. # Create blank image for drawing. # Make sure to create image with mode '1' for 1-bit color. # Get drawing object to draw on image. # Draw a black filled box to clear the image. # Load default font. # SW1 - selection UP # SW3 - selection DOWN # SW4 - display an image named 1.png if item 1 is selected, 2.png if item 2 is selected, etc. # SW2 # Draw a black filled box to clear the image. # Draw menu # Display imageFalse # wait all buttons are released | 2.76329 | 3 |
PathPlanning/BatchInformedRRTStar/batch_informed_rrtstar.py | tanishqjasoria/PythonRobotics | 7 | 6630194 | <filename>PathPlanning/BatchInformedRRTStar/batch_informed_rrtstar.py
"""
Batch Informed Trees based path planning:
Uses a heuristic to efficiently search increasingly dense
RGGs while reusing previous information. Provides faster
convergence that RRT*, Informed RRT* and other sampling based
methods.
Uses lazy connecting by combining sampling based methods and A*
like incremental graph search algorithms.
author: <NAME>(@karanchawla)
<NAME>(@Atsushi_twi)
Reference: https://arxiv.org/abs/1405.5848
"""
import random
import numpy as np
import math
import matplotlib.pyplot as plt
show_animation = True
class RTree(object):
# Class to represent the explicit tree created
# while sampling through the state space
def __init__(self, start=[0, 0], lowerLimit=[0, 0], upperLimit=[10, 10], resolution=1):
self.vertices = dict()
self.edges = []
self.start = start
self.lowerLimit = lowerLimit
self.upperLimit = upperLimit
self.dimension = len(lowerLimit)
self.num_cells = [0] * self.dimension
self.resolution = resolution
# compute the number of grid cells based on the limits and
# resolution given
for idx in range(self.dimension):
self.num_cells[idx] = np.ceil(
(upperLimit[idx] - lowerLimit[idx]) / resolution)
vertex_id = self.realWorldToNodeId(start)
self.vertices[vertex_id] = []
def getRootId(self):
# return the id of the root of the tree
return 0
def addVertex(self, vertex):
# add a vertex to the tree
vertex_id = self.realWorldToNodeId(vertex)
self.vertices[vertex_id] = []
return vertex_id
def addEdge(self, v, x):
# create an edge between v and x vertices
if (v, x) not in self.edges:
self.edges.append((v, x))
# since the tree is undirected
self.vertices[v].append(x)
self.vertices[x].append(v)
def realCoordsToGridCoord(self, real_coord):
# convert real world coordinates to grid space
# depends on the resolution of the grid
# the output is the same as real world coords if the resolution
# is set to 1
coord = [0] * self.dimension
for i in range(len(coord)):
start = self.lowerLimit[i] # start of the grid space
coord[i] = np.around((real_coord[i] - start) / self.resolution)
return coord
def gridCoordinateToNodeId(self, coord):
# This function maps a grid coordinate to a unique
# node id
nodeId = 0
for i in range(len(coord) - 1, -1, -1):
product = 1
for j in range(0, i):
product = product * self.num_cells[j]
nodeId = nodeId + coord[i] * product
return nodeId
def realWorldToNodeId(self, real_coord):
# first convert the given coordinates to grid space and then
# convert the grid space coordinates to a unique node id
return self.gridCoordinateToNodeId(self.realCoordsToGridCoord(real_coord))
def gridCoordToRealWorldCoord(self, coord):
# This function smaps a grid coordinate in discrete space
# to a configuration in the full configuration space
config = [0] * self.dimension
for i in range(0, len(coord)):
# start of the real world / configuration space
start = self.lowerLimit[i]
# step from the coordinate in the grid
grid_step = self.resolution * coord[i]
config[i] = start + grid_step
return config
def nodeIdToGridCoord(self, node_id):
# This function maps a node id to the associated
# grid coordinate
coord = [0] * len(self.lowerLimit)
for i in range(len(coord) - 1, -1, -1):
# Get the product of the grid space maximums
prod = 1
for j in range(0, i):
prod = prod * self.num_cells[j]
coord[i] = np.floor(node_id / prod)
node_id = node_id - (coord[i] * prod)
return coord
def nodeIdToRealWorldCoord(self, nid):
# This function maps a node in discrete space to a configuraiton
# in the full configuration space
return self.gridCoordToRealWorldCoord(self.nodeIdToGridCoord(nid))
# Uses Batch Informed Trees to find a path from start to goal
class BITStar(object):
def __init__(self, start, goal,
obstacleList, randArea, eta=2.0,
maxIter=80):
self.start = start
self.goal = goal
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.maxIter = maxIter
self.obstacleList = obstacleList
self.vertex_queue = []
self.edge_queue = []
self.samples = dict()
self.g_scores = dict()
self.f_scores = dict()
self.nodes = dict()
self.r = float('inf')
self.eta = eta # tunable parameter
self.unit_ball_measure = 1
self.old_vertices = []
# initialize tree
lowerLimit = [randArea[0], randArea[0]]
upperLimit = [randArea[1], randArea[1]]
self.tree = RTree(start=start, lowerLimit=lowerLimit,
upperLimit=upperLimit, resolution=0.01)
def setup_planning(self):
self.startId = self.tree.realWorldToNodeId(self.start)
self.goalId = self.tree.realWorldToNodeId(self.goal)
# add goal to the samples
self.samples[self.goalId] = self.goal
self.g_scores[self.goalId] = float('inf')
self.f_scores[self.goalId] = 0
# add the start id to the tree
self.tree.addVertex(self.start)
self.g_scores[self.startId] = 0
self.f_scores[self.startId] = self.computeHeuristicCost(
self.startId, self.goalId)
# max length we expect to find in our 'informed' sample space, starts as infinite
cBest = self.g_scores[self.goalId]
# Computing the sampling space
cMin = math.sqrt(pow(self.start[0] - self.goal[1], 2) +
pow(self.start[0] - self.goal[1], 2)) / 1.5
xCenter = np.matrix([[(self.start[0] + self.goal[0]) / 2.0],
[(self.goal[1] - self.start[1]) / 2.0], [0]])
a1 = np.matrix([[(self.goal[0] - self.start[0]) / cMin],
[(self.goal[1] - self.start[1]) / cMin], [0]])
etheta = math.atan2(a1[1], a1[0])
# first column of idenity matrix transposed
id1_t = np.matrix([1.0, 0.0, 0.0])
M = np.dot(a1, id1_t)
U, S, Vh = np.linalg.svd(M, 1, 1)
C = np.dot(np.dot(U, np.diag(
[1.0, 1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh))])), Vh)
self.samples.update(self.informedSample(
200, cBest, cMin, xCenter, C))
return etheta, cMin, xCenter, C, cBest
def setup_sample(self, iterations, foundGoal, cMin, xCenter, C, cBest):
if len(self.vertex_queue) == 0 and len(self.edge_queue) == 0:
print("Batch: ", iterations)
# Using informed rrt star way of computing the samples
self.r = 2.0
if iterations != 0:
if foundGoal:
# a better way to do this would be to make number of samples
# a function of cMin
m = 200
self.samples = dict()
self.samples[self.goalId] = self.goal
else:
m = 100
cBest = self.g_scores[self.goalId]
self.samples.update(self.informedSample(
m, cBest, cMin, xCenter, C))
# make the old vertices the new vertices
self.old_vertices += self.tree.vertices.keys()
# add the vertices to the vertex queue
for nid in self.tree.vertices.keys():
if nid not in self.vertex_queue:
self.vertex_queue.append(nid)
return cBest
def plan(self, animation=True):
etheta, cMin, xCenter, C, cBest = self.setup_planning()
iterations = 0
foundGoal = False
# run until done
while (iterations < self.maxIter):
cBest = self.setup_sample(iterations,
foundGoal, cMin, xCenter, C, cBest)
# expand the best vertices until an edge is better than the vertex
# this is done because the vertex cost represents the lower bound
# on the edge cost
while(self.bestVertexQueueValue() <= self.bestEdgeQueueValue()):
self.expandVertex(self.bestInVertexQueue())
# add the best edge to the tree
bestEdge = self.bestInEdgeQueue()
self.edge_queue.remove(bestEdge)
# Check if this can improve the current solution
estimatedCostOfVertex = self.g_scores[bestEdge[0]] + self.computeDistanceCost(
bestEdge[0], bestEdge[1]) + self.computeHeuristicCost(bestEdge[1], self.goalId)
estimatedCostOfEdge = self.computeDistanceCost(self.startId, bestEdge[0]) + self.computeHeuristicCost(
bestEdge[0], bestEdge[1]) + self.computeHeuristicCost(bestEdge[1], self.goalId)
actualCostOfEdge = self.g_scores[bestEdge[0]] + \
self.computeDistanceCost(bestEdge[0], bestEdge[1])
f1 = estimatedCostOfVertex < self.g_scores[self.goalId]
f2 = estimatedCostOfEdge < self.g_scores[self.goalId]
f3 = actualCostOfEdge < self.g_scores[self.goalId]
if f1 and f2 and f3:
# connect this edge
firstCoord = self.tree.nodeIdToRealWorldCoord(
bestEdge[0])
secondCoord = self.tree.nodeIdToRealWorldCoord(
bestEdge[1])
path = self.connect(firstCoord, secondCoord)
lastEdge = self.tree.realWorldToNodeId(secondCoord)
if path is None or len(path) == 0:
continue
nextCoord = path[len(path) - 1, :]
nextCoordPathId = self.tree.realWorldToNodeId(
nextCoord)
bestEdge = (bestEdge[0], nextCoordPathId)
if(bestEdge[1] in self.tree.vertices.keys()):
continue
else:
try:
del self.samples[bestEdge[1]]
except(KeyError):
pass
eid = self.tree.addVertex(nextCoord)
self.vertex_queue.append(eid)
if eid == self.goalId or bestEdge[0] == self.goalId or bestEdge[1] == self.goalId:
print("Goal found")
foundGoal = True
self.tree.addEdge(bestEdge[0], bestEdge[1])
g_score = self.computeDistanceCost(
bestEdge[0], bestEdge[1])
self.g_scores[bestEdge[1]] = g_score + \
self.g_scores[bestEdge[0]]
self.f_scores[bestEdge[1]] = g_score + \
self.computeHeuristicCost(bestEdge[1], self.goalId)
self.updateGraph()
# visualize new edge
if animation:
self.drawGraph(xCenter=xCenter, cBest=cBest,
cMin=cMin, etheta=etheta, samples=self.samples.values(),
start=firstCoord, end=secondCoord, tree=self.tree.edges)
self.remove_queue(lastEdge, bestEdge)
else:
print("Nothing good")
self.edge_queue = []
self.vertex_queue = []
iterations += 1
print("Finding the path")
return self.find_final_path()
def find_final_path(self):
plan = []
plan.append(self.goal)
currId = self.goalId
while (currId != self.startId):
plan.append(self.tree.nodeIdToRealWorldCoord(currId))
try:
currId = self.nodes[currId]
except(KeyError):
print("Path key error")
return []
plan.append(self.start)
plan = plan[::-1] # reverse the plan
return plan
def remove_queue(self, lastEdge, bestEdge):
for edge in self.edge_queue:
if(edge[1] == bestEdge[1]):
if self.g_scores[edge[1]] + self.computeDistanceCost(edge[1], bestEdge[1]) >= self.g_scores[self.goalId]:
if(lastEdge, bestEdge[1]) in self.edge_queue:
self.edge_queue.remove(
(lastEdge, bestEdge[1]))
def connect(self, start, end):
# A function which attempts to extend from a start coordinates
# to goal coordinates
steps = int(self.computeDistanceCost(self.tree.realWorldToNodeId(
start), self.tree.realWorldToNodeId(end)) * 10)
x = np.linspace(start[0], end[0], num=steps)
y = np.linspace(start[1], end[1], num=steps)
for i in range(len(x)):
if(self._collisionCheck(x[i], y[i])):
if(i == 0):
return None
# if collision, send path until collision
return np.vstack((x[0:i], y[0:i])).transpose()
return np.vstack((x, y)).transpose()
def _collisionCheck(self, x, y):
for (ox, oy, size) in self.obstacleList:
dx = ox - x
dy = oy - y
d = dx * dx + dy * dy
if d <= size ** 2:
return True # collision
return False
# def prune(self, c):
def computeHeuristicCost(self, start_id, goal_id):
# Using Manhattan distance as heuristic
start = np.array(self.tree.nodeIdToRealWorldCoord(start_id))
goal = np.array(self.tree.nodeIdToRealWorldCoord(goal_id))
return np.linalg.norm(start - goal, 2)
def computeDistanceCost(self, vid, xid):
# L2 norm distance
start = np.array(self.tree.nodeIdToRealWorldCoord(vid))
stop = np.array(self.tree.nodeIdToRealWorldCoord(xid))
return np.linalg.norm(stop - start, 2)
# Sample free space confined in the radius of ball R
def informedSample(self, m, cMax, cMin, xCenter, C):
samples = dict()
print("g_Score goal id: ", self.g_scores[self.goalId])
for i in range(m + 1):
if cMax < float('inf'):
r = [cMax / 2.0,
math.sqrt(cMax**2 - cMin**2) / 2.0,
math.sqrt(cMax**2 - cMin**2) / 2.0]
L = np.diag(r)
xBall = self.sampleUnitBall()
rnd = np.dot(np.dot(C, L), xBall) + xCenter
rnd = [rnd[(0, 0)], rnd[(1, 0)]]
random_id = self.tree.realWorldToNodeId(rnd)
samples[random_id] = rnd
else:
rnd = self.sampleFreeSpace()
random_id = self.tree.realWorldToNodeId(rnd)
samples[random_id] = rnd
return samples
# Sample point in a unit ball
def sampleUnitBall(self):
a = random.random()
b = random.random()
if b < a:
a, b = b, a
sample = (b * math.cos(2 * math.pi * a / b),
b * math.sin(2 * math.pi * a / b))
return np.array([[sample[0]], [sample[1]], [0]])
def sampleFreeSpace(self):
rnd = [random.uniform(self.minrand, self.maxrand),
random.uniform(self.minrand, self.maxrand)]
return rnd
def bestVertexQueueValue(self):
if(len(self.vertex_queue) == 0):
return float('inf')
values = [self.g_scores[v] +
self.computeHeuristicCost(v, self.goalId) for v in self.vertex_queue]
values.sort()
return values[0]
def bestEdgeQueueValue(self):
if(len(self.edge_queue) == 0):
return float('inf')
# return the best value in the queue by score g_tau[v] + c(v,x) + h(x)
values = [self.g_scores[e[0]] + self.computeDistanceCost(e[0], e[1]) +
self.computeHeuristicCost(e[1], self.goalId) for e in self.edge_queue]
values.sort(reverse=True)
return values[0]
def bestInVertexQueue(self):
# return the best value in the vertex queue
v_plus_vals = [(v, self.g_scores[v] + self.computeHeuristicCost(v, self.goalId))
for v in self.vertex_queue]
v_plus_vals = sorted(v_plus_vals, key=lambda x: x[1])
# print(v_plus_vals)
return v_plus_vals[0][0]
def bestInEdgeQueue(self):
e_and_values = [(e[0], e[1], self.g_scores[e[0]] + self.computeDistanceCost(
e[0], e[1]) + self.computeHeuristicCost(e[1], self.goalId)) for e in self.edge_queue]
e_and_values = sorted(e_and_values, key=lambda x: x[2])
return (e_and_values[0][0], e_and_values[0][1])
def expandVertex(self, vid):
self.vertex_queue.remove(vid)
# get the coordinates for given vid
currCoord = np.array(self.tree.nodeIdToRealWorldCoord(vid))
# get the nearest value in vertex for every one in samples where difference is
# less than the radius
neigbors = []
for sid, scoord in self.samples.items():
scoord = np.array(scoord)
if(np.linalg.norm(scoord - currCoord, 2) <= self.r and sid != vid):
neigbors.append((sid, scoord))
# add an edge to the edge queue is the path might improve the solution
for neighbor in neigbors:
sid = neighbor[0]
estimated_f_score = self.computeDistanceCost(
self.startId, vid) + self.computeHeuristicCost(sid, self.goalId) + self.computeDistanceCost(vid, sid)
if estimated_f_score < self.g_scores[self.goalId]:
self.edge_queue.append((vid, sid))
# add the vertex to the edge queue
self.add_vertex_to_edge_queue(vid, currCoord)
def add_vertex_to_edge_queue(self, vid, currCoord):
if vid not in self.old_vertices:
neigbors = []
for v, edges in self.tree.vertices.items():
if v != vid and (v, vid) not in self.edge_queue and (vid, v) not in self.edge_queue:
vcoord = self.tree.nodeIdToRealWorldCoord(v)
if(np.linalg.norm(vcoord - currCoord, 2) <= self.r):
neigbors.append((vid, vcoord))
for neighbor in neigbors:
sid = neighbor[0]
estimated_f_score = self.computeDistanceCost(self.startId, vid) + \
self.computeDistanceCost(
vid, sid) + self.computeHeuristicCost(sid, self.goalId)
if estimated_f_score < self.g_scores[self.goalId] and (self.g_scores[vid] + self.computeDistanceCost(vid, sid)) < self.g_scores[sid]:
self.edge_queue.append((vid, sid))
def updateGraph(self):
closedSet = []
openSet = []
currId = self.startId
openSet.append(currId)
while len(openSet) != 0:
# get the element with lowest f_score
currId = min(openSet, key=lambda x: self.f_scores[x])
# remove element from open set
openSet.remove(currId)
# Check if we're at the goal
if(currId == self.goalId):
self.nodes[self.goalId]
break
if(currId not in closedSet):
closedSet.append(currId)
# find a non visited successor to the current node
successors = self.tree.vertices[currId]
for succesor in successors:
if(succesor in closedSet):
continue
else:
# claculate tentative g score
g_score = self.g_scores[currId] + \
self.computeDistanceCost(currId, succesor)
if succesor not in openSet:
# add the successor to open set
openSet.append(succesor)
elif g_score >= self.g_scores[succesor]:
continue
# update g and f scores
self.g_scores[succesor] = g_score
self.f_scores[succesor] = g_score + \
self.computeHeuristicCost(succesor, self.goalId)
# store the parent and child
self.nodes[succesor] = currId
def drawGraph(self, xCenter=None, cBest=None, cMin=None, etheta=None,
samples=None, start=None, end=None, tree=None):
print("Plotting Graph")
plt.clf()
for rnd in samples:
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
if cBest != float('inf'):
self.plot_ellipse(xCenter, cBest, cMin, etheta)
if start is not None and end is not None:
plt.plot([start[0], start[1]], [end[0], end[1]], "-g")
for (ox, oy, size) in self.obstacleList:
plt.plot(ox, oy, "ok", ms=30 * size)
plt.plot(self.start[0], self.start[1], "xr")
plt.plot(self.goal[0], self.goal[1], "xr")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
def plot_ellipse(self, xCenter, cBest, cMin, etheta):
a = math.sqrt(cBest**2 - cMin**2) / 2.0
b = cBest / 2.0
angle = math.pi / 2.0 - etheta
cx = xCenter[0]
cy = xCenter[1]
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
R = np.matrix([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
fx = R * np.matrix([x, y])
px = np.array(fx[0, :] + cx).flatten()
py = np.array(fx[1, :] + cy).flatten()
plt.plot(cx, cy, "xc")
plt.plot(px, py, "--c")
def main():
print("Starting Batch Informed Trees Star planning")
obstacleList = [
(5, 5, 0.5),
(9, 6, 1),
(7, 5, 1),
(1, 5, 1),
(3, 6, 1),
(7, 9, 1)
]
bitStar = BITStar(start=[-1, 0], goal=[3, 8], obstacleList=obstacleList,
randArea=[-2, 15])
path = bitStar.plan(animation=show_animation)
print("Done")
if show_animation:
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.05)
plt.show()
if __name__ == '__main__':
main()
| <filename>PathPlanning/BatchInformedRRTStar/batch_informed_rrtstar.py
"""
Batch Informed Trees based path planning:
Uses a heuristic to efficiently search increasingly dense
RGGs while reusing previous information. Provides faster
convergence that RRT*, Informed RRT* and other sampling based
methods.
Uses lazy connecting by combining sampling based methods and A*
like incremental graph search algorithms.
author: <NAME>(@karanchawla)
<NAME>(@Atsushi_twi)
Reference: https://arxiv.org/abs/1405.5848
"""
import random
import numpy as np
import math
import matplotlib.pyplot as plt
show_animation = True
class RTree(object):
# Class to represent the explicit tree created
# while sampling through the state space
def __init__(self, start=[0, 0], lowerLimit=[0, 0], upperLimit=[10, 10], resolution=1):
self.vertices = dict()
self.edges = []
self.start = start
self.lowerLimit = lowerLimit
self.upperLimit = upperLimit
self.dimension = len(lowerLimit)
self.num_cells = [0] * self.dimension
self.resolution = resolution
# compute the number of grid cells based on the limits and
# resolution given
for idx in range(self.dimension):
self.num_cells[idx] = np.ceil(
(upperLimit[idx] - lowerLimit[idx]) / resolution)
vertex_id = self.realWorldToNodeId(start)
self.vertices[vertex_id] = []
def getRootId(self):
# return the id of the root of the tree
return 0
def addVertex(self, vertex):
# add a vertex to the tree
vertex_id = self.realWorldToNodeId(vertex)
self.vertices[vertex_id] = []
return vertex_id
def addEdge(self, v, x):
# create an edge between v and x vertices
if (v, x) not in self.edges:
self.edges.append((v, x))
# since the tree is undirected
self.vertices[v].append(x)
self.vertices[x].append(v)
def realCoordsToGridCoord(self, real_coord):
# convert real world coordinates to grid space
# depends on the resolution of the grid
# the output is the same as real world coords if the resolution
# is set to 1
coord = [0] * self.dimension
for i in range(len(coord)):
start = self.lowerLimit[i] # start of the grid space
coord[i] = np.around((real_coord[i] - start) / self.resolution)
return coord
def gridCoordinateToNodeId(self, coord):
# This function maps a grid coordinate to a unique
# node id
nodeId = 0
for i in range(len(coord) - 1, -1, -1):
product = 1
for j in range(0, i):
product = product * self.num_cells[j]
nodeId = nodeId + coord[i] * product
return nodeId
def realWorldToNodeId(self, real_coord):
# first convert the given coordinates to grid space and then
# convert the grid space coordinates to a unique node id
return self.gridCoordinateToNodeId(self.realCoordsToGridCoord(real_coord))
def gridCoordToRealWorldCoord(self, coord):
# This function smaps a grid coordinate in discrete space
# to a configuration in the full configuration space
config = [0] * self.dimension
for i in range(0, len(coord)):
# start of the real world / configuration space
start = self.lowerLimit[i]
# step from the coordinate in the grid
grid_step = self.resolution * coord[i]
config[i] = start + grid_step
return config
def nodeIdToGridCoord(self, node_id):
# This function maps a node id to the associated
# grid coordinate
coord = [0] * len(self.lowerLimit)
for i in range(len(coord) - 1, -1, -1):
# Get the product of the grid space maximums
prod = 1
for j in range(0, i):
prod = prod * self.num_cells[j]
coord[i] = np.floor(node_id / prod)
node_id = node_id - (coord[i] * prod)
return coord
def nodeIdToRealWorldCoord(self, nid):
# This function maps a node in discrete space to a configuraiton
# in the full configuration space
return self.gridCoordToRealWorldCoord(self.nodeIdToGridCoord(nid))
# Uses Batch Informed Trees to find a path from start to goal
class BITStar(object):
def __init__(self, start, goal,
obstacleList, randArea, eta=2.0,
maxIter=80):
self.start = start
self.goal = goal
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.maxIter = maxIter
self.obstacleList = obstacleList
self.vertex_queue = []
self.edge_queue = []
self.samples = dict()
self.g_scores = dict()
self.f_scores = dict()
self.nodes = dict()
self.r = float('inf')
self.eta = eta # tunable parameter
self.unit_ball_measure = 1
self.old_vertices = []
# initialize tree
lowerLimit = [randArea[0], randArea[0]]
upperLimit = [randArea[1], randArea[1]]
self.tree = RTree(start=start, lowerLimit=lowerLimit,
upperLimit=upperLimit, resolution=0.01)
def setup_planning(self):
self.startId = self.tree.realWorldToNodeId(self.start)
self.goalId = self.tree.realWorldToNodeId(self.goal)
# add goal to the samples
self.samples[self.goalId] = self.goal
self.g_scores[self.goalId] = float('inf')
self.f_scores[self.goalId] = 0
# add the start id to the tree
self.tree.addVertex(self.start)
self.g_scores[self.startId] = 0
self.f_scores[self.startId] = self.computeHeuristicCost(
self.startId, self.goalId)
# max length we expect to find in our 'informed' sample space, starts as infinite
cBest = self.g_scores[self.goalId]
# Computing the sampling space
cMin = math.sqrt(pow(self.start[0] - self.goal[1], 2) +
pow(self.start[0] - self.goal[1], 2)) / 1.5
xCenter = np.matrix([[(self.start[0] + self.goal[0]) / 2.0],
[(self.goal[1] - self.start[1]) / 2.0], [0]])
a1 = np.matrix([[(self.goal[0] - self.start[0]) / cMin],
[(self.goal[1] - self.start[1]) / cMin], [0]])
etheta = math.atan2(a1[1], a1[0])
# first column of idenity matrix transposed
id1_t = np.matrix([1.0, 0.0, 0.0])
M = np.dot(a1, id1_t)
U, S, Vh = np.linalg.svd(M, 1, 1)
C = np.dot(np.dot(U, np.diag(
[1.0, 1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh))])), Vh)
self.samples.update(self.informedSample(
200, cBest, cMin, xCenter, C))
return etheta, cMin, xCenter, C, cBest
def setup_sample(self, iterations, foundGoal, cMin, xCenter, C, cBest):
if len(self.vertex_queue) == 0 and len(self.edge_queue) == 0:
print("Batch: ", iterations)
# Using informed rrt star way of computing the samples
self.r = 2.0
if iterations != 0:
if foundGoal:
# a better way to do this would be to make number of samples
# a function of cMin
m = 200
self.samples = dict()
self.samples[self.goalId] = self.goal
else:
m = 100
cBest = self.g_scores[self.goalId]
self.samples.update(self.informedSample(
m, cBest, cMin, xCenter, C))
# make the old vertices the new vertices
self.old_vertices += self.tree.vertices.keys()
# add the vertices to the vertex queue
for nid in self.tree.vertices.keys():
if nid not in self.vertex_queue:
self.vertex_queue.append(nid)
return cBest
def plan(self, animation=True):
etheta, cMin, xCenter, C, cBest = self.setup_planning()
iterations = 0
foundGoal = False
# run until done
while (iterations < self.maxIter):
cBest = self.setup_sample(iterations,
foundGoal, cMin, xCenter, C, cBest)
# expand the best vertices until an edge is better than the vertex
# this is done because the vertex cost represents the lower bound
# on the edge cost
while(self.bestVertexQueueValue() <= self.bestEdgeQueueValue()):
self.expandVertex(self.bestInVertexQueue())
# add the best edge to the tree
bestEdge = self.bestInEdgeQueue()
self.edge_queue.remove(bestEdge)
# Check if this can improve the current solution
estimatedCostOfVertex = self.g_scores[bestEdge[0]] + self.computeDistanceCost(
bestEdge[0], bestEdge[1]) + self.computeHeuristicCost(bestEdge[1], self.goalId)
estimatedCostOfEdge = self.computeDistanceCost(self.startId, bestEdge[0]) + self.computeHeuristicCost(
bestEdge[0], bestEdge[1]) + self.computeHeuristicCost(bestEdge[1], self.goalId)
actualCostOfEdge = self.g_scores[bestEdge[0]] + \
self.computeDistanceCost(bestEdge[0], bestEdge[1])
f1 = estimatedCostOfVertex < self.g_scores[self.goalId]
f2 = estimatedCostOfEdge < self.g_scores[self.goalId]
f3 = actualCostOfEdge < self.g_scores[self.goalId]
if f1 and f2 and f3:
# connect this edge
firstCoord = self.tree.nodeIdToRealWorldCoord(
bestEdge[0])
secondCoord = self.tree.nodeIdToRealWorldCoord(
bestEdge[1])
path = self.connect(firstCoord, secondCoord)
lastEdge = self.tree.realWorldToNodeId(secondCoord)
if path is None or len(path) == 0:
continue
nextCoord = path[len(path) - 1, :]
nextCoordPathId = self.tree.realWorldToNodeId(
nextCoord)
bestEdge = (bestEdge[0], nextCoordPathId)
if(bestEdge[1] in self.tree.vertices.keys()):
continue
else:
try:
del self.samples[bestEdge[1]]
except(KeyError):
pass
eid = self.tree.addVertex(nextCoord)
self.vertex_queue.append(eid)
if eid == self.goalId or bestEdge[0] == self.goalId or bestEdge[1] == self.goalId:
print("Goal found")
foundGoal = True
self.tree.addEdge(bestEdge[0], bestEdge[1])
g_score = self.computeDistanceCost(
bestEdge[0], bestEdge[1])
self.g_scores[bestEdge[1]] = g_score + \
self.g_scores[bestEdge[0]]
self.f_scores[bestEdge[1]] = g_score + \
self.computeHeuristicCost(bestEdge[1], self.goalId)
self.updateGraph()
# visualize new edge
if animation:
self.drawGraph(xCenter=xCenter, cBest=cBest,
cMin=cMin, etheta=etheta, samples=self.samples.values(),
start=firstCoord, end=secondCoord, tree=self.tree.edges)
self.remove_queue(lastEdge, bestEdge)
else:
print("Nothing good")
self.edge_queue = []
self.vertex_queue = []
iterations += 1
print("Finding the path")
return self.find_final_path()
def find_final_path(self):
plan = []
plan.append(self.goal)
currId = self.goalId
while (currId != self.startId):
plan.append(self.tree.nodeIdToRealWorldCoord(currId))
try:
currId = self.nodes[currId]
except(KeyError):
print("Path key error")
return []
plan.append(self.start)
plan = plan[::-1] # reverse the plan
return plan
def remove_queue(self, lastEdge, bestEdge):
for edge in self.edge_queue:
if(edge[1] == bestEdge[1]):
if self.g_scores[edge[1]] + self.computeDistanceCost(edge[1], bestEdge[1]) >= self.g_scores[self.goalId]:
if(lastEdge, bestEdge[1]) in self.edge_queue:
self.edge_queue.remove(
(lastEdge, bestEdge[1]))
def connect(self, start, end):
# A function which attempts to extend from a start coordinates
# to goal coordinates
steps = int(self.computeDistanceCost(self.tree.realWorldToNodeId(
start), self.tree.realWorldToNodeId(end)) * 10)
x = np.linspace(start[0], end[0], num=steps)
y = np.linspace(start[1], end[1], num=steps)
for i in range(len(x)):
if(self._collisionCheck(x[i], y[i])):
if(i == 0):
return None
# if collision, send path until collision
return np.vstack((x[0:i], y[0:i])).transpose()
return np.vstack((x, y)).transpose()
def _collisionCheck(self, x, y):
for (ox, oy, size) in self.obstacleList:
dx = ox - x
dy = oy - y
d = dx * dx + dy * dy
if d <= size ** 2:
return True # collision
return False
# def prune(self, c):
def computeHeuristicCost(self, start_id, goal_id):
# Using Manhattan distance as heuristic
start = np.array(self.tree.nodeIdToRealWorldCoord(start_id))
goal = np.array(self.tree.nodeIdToRealWorldCoord(goal_id))
return np.linalg.norm(start - goal, 2)
def computeDistanceCost(self, vid, xid):
# L2 norm distance
start = np.array(self.tree.nodeIdToRealWorldCoord(vid))
stop = np.array(self.tree.nodeIdToRealWorldCoord(xid))
return np.linalg.norm(stop - start, 2)
# Sample free space confined in the radius of ball R
def informedSample(self, m, cMax, cMin, xCenter, C):
samples = dict()
print("g_Score goal id: ", self.g_scores[self.goalId])
for i in range(m + 1):
if cMax < float('inf'):
r = [cMax / 2.0,
math.sqrt(cMax**2 - cMin**2) / 2.0,
math.sqrt(cMax**2 - cMin**2) / 2.0]
L = np.diag(r)
xBall = self.sampleUnitBall()
rnd = np.dot(np.dot(C, L), xBall) + xCenter
rnd = [rnd[(0, 0)], rnd[(1, 0)]]
random_id = self.tree.realWorldToNodeId(rnd)
samples[random_id] = rnd
else:
rnd = self.sampleFreeSpace()
random_id = self.tree.realWorldToNodeId(rnd)
samples[random_id] = rnd
return samples
# Sample point in a unit ball
def sampleUnitBall(self):
a = random.random()
b = random.random()
if b < a:
a, b = b, a
sample = (b * math.cos(2 * math.pi * a / b),
b * math.sin(2 * math.pi * a / b))
return np.array([[sample[0]], [sample[1]], [0]])
def sampleFreeSpace(self):
rnd = [random.uniform(self.minrand, self.maxrand),
random.uniform(self.minrand, self.maxrand)]
return rnd
def bestVertexQueueValue(self):
if(len(self.vertex_queue) == 0):
return float('inf')
values = [self.g_scores[v] +
self.computeHeuristicCost(v, self.goalId) for v in self.vertex_queue]
values.sort()
return values[0]
def bestEdgeQueueValue(self):
if(len(self.edge_queue) == 0):
return float('inf')
# return the best value in the queue by score g_tau[v] + c(v,x) + h(x)
values = [self.g_scores[e[0]] + self.computeDistanceCost(e[0], e[1]) +
self.computeHeuristicCost(e[1], self.goalId) for e in self.edge_queue]
values.sort(reverse=True)
return values[0]
def bestInVertexQueue(self):
# return the best value in the vertex queue
v_plus_vals = [(v, self.g_scores[v] + self.computeHeuristicCost(v, self.goalId))
for v in self.vertex_queue]
v_plus_vals = sorted(v_plus_vals, key=lambda x: x[1])
# print(v_plus_vals)
return v_plus_vals[0][0]
def bestInEdgeQueue(self):
e_and_values = [(e[0], e[1], self.g_scores[e[0]] + self.computeDistanceCost(
e[0], e[1]) + self.computeHeuristicCost(e[1], self.goalId)) for e in self.edge_queue]
e_and_values = sorted(e_and_values, key=lambda x: x[2])
return (e_and_values[0][0], e_and_values[0][1])
def expandVertex(self, vid):
self.vertex_queue.remove(vid)
# get the coordinates for given vid
currCoord = np.array(self.tree.nodeIdToRealWorldCoord(vid))
# get the nearest value in vertex for every one in samples where difference is
# less than the radius
neigbors = []
for sid, scoord in self.samples.items():
scoord = np.array(scoord)
if(np.linalg.norm(scoord - currCoord, 2) <= self.r and sid != vid):
neigbors.append((sid, scoord))
# add an edge to the edge queue is the path might improve the solution
for neighbor in neigbors:
sid = neighbor[0]
estimated_f_score = self.computeDistanceCost(
self.startId, vid) + self.computeHeuristicCost(sid, self.goalId) + self.computeDistanceCost(vid, sid)
if estimated_f_score < self.g_scores[self.goalId]:
self.edge_queue.append((vid, sid))
# add the vertex to the edge queue
self.add_vertex_to_edge_queue(vid, currCoord)
def add_vertex_to_edge_queue(self, vid, currCoord):
if vid not in self.old_vertices:
neigbors = []
for v, edges in self.tree.vertices.items():
if v != vid and (v, vid) not in self.edge_queue and (vid, v) not in self.edge_queue:
vcoord = self.tree.nodeIdToRealWorldCoord(v)
if(np.linalg.norm(vcoord - currCoord, 2) <= self.r):
neigbors.append((vid, vcoord))
for neighbor in neigbors:
sid = neighbor[0]
estimated_f_score = self.computeDistanceCost(self.startId, vid) + \
self.computeDistanceCost(
vid, sid) + self.computeHeuristicCost(sid, self.goalId)
if estimated_f_score < self.g_scores[self.goalId] and (self.g_scores[vid] + self.computeDistanceCost(vid, sid)) < self.g_scores[sid]:
self.edge_queue.append((vid, sid))
def updateGraph(self):
closedSet = []
openSet = []
currId = self.startId
openSet.append(currId)
while len(openSet) != 0:
# get the element with lowest f_score
currId = min(openSet, key=lambda x: self.f_scores[x])
# remove element from open set
openSet.remove(currId)
# Check if we're at the goal
if(currId == self.goalId):
self.nodes[self.goalId]
break
if(currId not in closedSet):
closedSet.append(currId)
# find a non visited successor to the current node
successors = self.tree.vertices[currId]
for succesor in successors:
if(succesor in closedSet):
continue
else:
# claculate tentative g score
g_score = self.g_scores[currId] + \
self.computeDistanceCost(currId, succesor)
if succesor not in openSet:
# add the successor to open set
openSet.append(succesor)
elif g_score >= self.g_scores[succesor]:
continue
# update g and f scores
self.g_scores[succesor] = g_score
self.f_scores[succesor] = g_score + \
self.computeHeuristicCost(succesor, self.goalId)
# store the parent and child
self.nodes[succesor] = currId
def drawGraph(self, xCenter=None, cBest=None, cMin=None, etheta=None,
samples=None, start=None, end=None, tree=None):
print("Plotting Graph")
plt.clf()
for rnd in samples:
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
if cBest != float('inf'):
self.plot_ellipse(xCenter, cBest, cMin, etheta)
if start is not None and end is not None:
plt.plot([start[0], start[1]], [end[0], end[1]], "-g")
for (ox, oy, size) in self.obstacleList:
plt.plot(ox, oy, "ok", ms=30 * size)
plt.plot(self.start[0], self.start[1], "xr")
plt.plot(self.goal[0], self.goal[1], "xr")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
def plot_ellipse(self, xCenter, cBest, cMin, etheta):
a = math.sqrt(cBest**2 - cMin**2) / 2.0
b = cBest / 2.0
angle = math.pi / 2.0 - etheta
cx = xCenter[0]
cy = xCenter[1]
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
R = np.matrix([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
fx = R * np.matrix([x, y])
px = np.array(fx[0, :] + cx).flatten()
py = np.array(fx[1, :] + cy).flatten()
plt.plot(cx, cy, "xc")
plt.plot(px, py, "--c")
def main():
print("Starting Batch Informed Trees Star planning")
obstacleList = [
(5, 5, 0.5),
(9, 6, 1),
(7, 5, 1),
(1, 5, 1),
(3, 6, 1),
(7, 9, 1)
]
bitStar = BITStar(start=[-1, 0], goal=[3, 8], obstacleList=obstacleList,
randArea=[-2, 15])
path = bitStar.plan(animation=show_animation)
print("Done")
if show_animation:
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.05)
plt.show()
if __name__ == '__main__':
main()
| en | 0.796656 | Batch Informed Trees based path planning: Uses a heuristic to efficiently search increasingly dense RGGs while reusing previous information. Provides faster convergence that RRT*, Informed RRT* and other sampling based methods. Uses lazy connecting by combining sampling based methods and A* like incremental graph search algorithms. author: <NAME>(@karanchawla) <NAME>(@Atsushi_twi) Reference: https://arxiv.org/abs/1405.5848 # Class to represent the explicit tree created # while sampling through the state space # compute the number of grid cells based on the limits and # resolution given # return the id of the root of the tree # add a vertex to the tree # create an edge between v and x vertices # since the tree is undirected # convert real world coordinates to grid space # depends on the resolution of the grid # the output is the same as real world coords if the resolution # is set to 1 # start of the grid space # This function maps a grid coordinate to a unique # node id # first convert the given coordinates to grid space and then # convert the grid space coordinates to a unique node id # This function smaps a grid coordinate in discrete space # to a configuration in the full configuration space # start of the real world / configuration space # step from the coordinate in the grid # This function maps a node id to the associated # grid coordinate # Get the product of the grid space maximums # This function maps a node in discrete space to a configuraiton # in the full configuration space # Uses Batch Informed Trees to find a path from start to goal # tunable parameter # initialize tree # add goal to the samples # add the start id to the tree # max length we expect to find in our 'informed' sample space, starts as infinite # Computing the sampling space # first column of idenity matrix transposed # Using informed rrt star way of computing the samples # a better way to do this would be to make number of samples # a function of cMin # make the old vertices the new vertices # add the vertices to the vertex queue # run until done # expand the best vertices until an edge is better than the vertex # this is done because the vertex cost represents the lower bound # on the edge cost # add the best edge to the tree # Check if this can improve the current solution # connect this edge # visualize new edge # reverse the plan # A function which attempts to extend from a start coordinates # to goal coordinates # if collision, send path until collision # collision # def prune(self, c): # Using Manhattan distance as heuristic # L2 norm distance # Sample free space confined in the radius of ball R # Sample point in a unit ball # return the best value in the queue by score g_tau[v] + c(v,x) + h(x) # return the best value in the vertex queue # print(v_plus_vals) # get the coordinates for given vid # get the nearest value in vertex for every one in samples where difference is # less than the radius # add an edge to the edge queue is the path might improve the solution # add the vertex to the edge queue # get the element with lowest f_score # remove element from open set # Check if we're at the goal # find a non visited successor to the current node # claculate tentative g score # add the successor to open set # update g and f scores # store the parent and child | 2.832183 | 3 |
bioslds/sources.py | ttesileanu/bio-time-series | 0 | 6630195 | """ Define convenient objects to use as sources for Arma processes. """
import numpy as np
import copy
from typing import Sequence, Union, Callable
from scipy import optimize
class Constant(object):
""" A source that always returns the same value.
Attributes
----------
value
Value returned by the source.
"""
def __init__(self, value: float):
self.value = value
def __call__(self, size: int) -> np.ndarray:
""" Generate constant values.
Parameter
---------
n
Number of values to generate.
"""
return np.repeat(self.value, size)
def __repr__(self) -> str:
return f"Constant({self.value})"
class Stream(object):
""" A source that streams data from an array.
Attributes
----------
data_store
Array from which values are returned.
ptr_
Current location in array.
"""
def __init__(self, data: Sequence):
self.data_store = data
self.ptr_ = 0
def __call__(self, size: int) -> np.ndarray:
""" Return values from the data store.
Raises `IndexError` if not enough data is available.
Parameter
---------
n
Number of values to return.
"""
if self.ptr_ + size > len(self.data_store):
raise IndexError("Ran out of data.")
data = self.data_store[self.ptr_ : self.ptr_ + size]
self.ptr_ += size
return data
def __repr__(self) -> str:
return f"Stream(data_store={self.data_store}, ptr_={self.ptr_})"
class GaussianNoise(object):
""" A source that generates random Gaussian noise.
Attributes
----------
rng
Random number generator.
loc
Location (mean) of normal distribution.
scale
Scale (standard deviation) of normal distribution.
"""
def __init__(
self,
rng: Union[int, np.random.Generator, np.random.RandomState] = 0,
loc: float = 0,
scale: float = 1,
):
""" Initialize the Gaussian noise source.
Parameters
----------
rng
Random number generator or seed. If seed, a random number generator
is created using `np.random.default_rng`.
loc
Location (mean) of distribution.
scale
Scale (standard deviation) of distribution.
"""
if isinstance(rng, int):
rng = np.random.default_rng(rng)
self.rng = rng
self.loc = loc
self.scale = scale
def __call__(self, size: int) -> np.ndarray:
""" Return Gaussian random values.
Parameter
---------
n
Number of values to return.
"""
return self.rng.normal(self.loc, self.scale, size=size)
def __str__(self) -> str:
return f"GaussianNoise(loc={self.loc}, scale={self.scale})"
def __repr__(self) -> str:
r = f"GaussianNoise(loc={self.loc}, scale={self.scale}, " + f"rng={self.rng})"
return r
def fix_source_scale(
transformer, output_std: float = 1, n_samples: int = 1000, use_copy: bool = True,
) -> float:
""" Adjust the scale for a data source to fix the output variance of a
transformer.
The transformer's data source must have a `scale` parameter.
Parameters
----------
transformer
Transformer whose output variance is optimized. This should behave like
`Arma`: it needs to have a `transform` method that can be called like
`transformer.transform(U=source)`; and it needs an attribute called
`default_source`.
output_std
Value to which to fix the transformer's output standard deviation.
n_samples
Number of samples to generate for each optimization iteration.
use_copy
If true, a deep copy of the data source is made for the optimization, so
that the source's random generator is unaffected by this procedure.
Returns the final value for the scale.
"""
output_var = output_std ** 2
source = transformer.default_source
if use_copy:
source_copy = copy.deepcopy(source)
else:
source_copy = source
def objective(scale: float):
source_copy.scale = np.abs(scale)
samples = transformer.transform(n_samples, X=source_copy)
return np.var(samples) / output_var - 1
soln = optimize.root_scalar(
objective, x0=np.sqrt(output_var / 2), x1=np.sqrt(2 * output_var), maxiter=100,
)
source.scale = np.abs(soln.root)
return source.scale
def fix_transformer_scale(
transformer,
output_std: float = 1,
n_samples: int = 1000,
source_constructor: Callable = GaussianNoise,
) -> float:
""" Adjust the source scaling for a transformer in order to fix its output variance.
Parameters
----------
transformer
Transformer whose output variance is optimized. This should behave like
`Arma`: it needs to have a `transform` method that can be called like
`transformer.transform(U=source)`; it needs an attribute called
`default_source`; and it needs an attribute called `source_scaling`.
output_std
Value to which to fix the transformer's output standard deviation.
n_samples
Number of samples to generate for each optimization iteration.
source_constructor
Callable to use to create a source for adjusting the scaling factor.
The created object needs to have a `scale` attribute.
Returns the final value for the scale.
"""
# don't mess with the transformer's initial default_source
old_source = transformer.default_source
# make a new source, and use fix_source_scale
source = source_constructor()
transformer.default_source = source
# XXX this isn't very robust: the meaning of `scale` to a particular source may well
# be very different from simply scaling the source values after they're generated
scale = fix_source_scale(
transformer, output_std=output_std, n_samples=n_samples, use_copy=False
)
# revert to original source
transformer.default_source = old_source
# set scaling factor
transformer.source_scaling = scale
return scale
| """ Define convenient objects to use as sources for Arma processes. """
import numpy as np
import copy
from typing import Sequence, Union, Callable
from scipy import optimize
class Constant(object):
""" A source that always returns the same value.
Attributes
----------
value
Value returned by the source.
"""
def __init__(self, value: float):
self.value = value
def __call__(self, size: int) -> np.ndarray:
""" Generate constant values.
Parameter
---------
n
Number of values to generate.
"""
return np.repeat(self.value, size)
def __repr__(self) -> str:
return f"Constant({self.value})"
class Stream(object):
""" A source that streams data from an array.
Attributes
----------
data_store
Array from which values are returned.
ptr_
Current location in array.
"""
def __init__(self, data: Sequence):
self.data_store = data
self.ptr_ = 0
def __call__(self, size: int) -> np.ndarray:
""" Return values from the data store.
Raises `IndexError` if not enough data is available.
Parameter
---------
n
Number of values to return.
"""
if self.ptr_ + size > len(self.data_store):
raise IndexError("Ran out of data.")
data = self.data_store[self.ptr_ : self.ptr_ + size]
self.ptr_ += size
return data
def __repr__(self) -> str:
return f"Stream(data_store={self.data_store}, ptr_={self.ptr_})"
class GaussianNoise(object):
""" A source that generates random Gaussian noise.
Attributes
----------
rng
Random number generator.
loc
Location (mean) of normal distribution.
scale
Scale (standard deviation) of normal distribution.
"""
def __init__(
self,
rng: Union[int, np.random.Generator, np.random.RandomState] = 0,
loc: float = 0,
scale: float = 1,
):
""" Initialize the Gaussian noise source.
Parameters
----------
rng
Random number generator or seed. If seed, a random number generator
is created using `np.random.default_rng`.
loc
Location (mean) of distribution.
scale
Scale (standard deviation) of distribution.
"""
if isinstance(rng, int):
rng = np.random.default_rng(rng)
self.rng = rng
self.loc = loc
self.scale = scale
def __call__(self, size: int) -> np.ndarray:
""" Return Gaussian random values.
Parameter
---------
n
Number of values to return.
"""
return self.rng.normal(self.loc, self.scale, size=size)
def __str__(self) -> str:
return f"GaussianNoise(loc={self.loc}, scale={self.scale})"
def __repr__(self) -> str:
r = f"GaussianNoise(loc={self.loc}, scale={self.scale}, " + f"rng={self.rng})"
return r
def fix_source_scale(
transformer, output_std: float = 1, n_samples: int = 1000, use_copy: bool = True,
) -> float:
""" Adjust the scale for a data source to fix the output variance of a
transformer.
The transformer's data source must have a `scale` parameter.
Parameters
----------
transformer
Transformer whose output variance is optimized. This should behave like
`Arma`: it needs to have a `transform` method that can be called like
`transformer.transform(U=source)`; and it needs an attribute called
`default_source`.
output_std
Value to which to fix the transformer's output standard deviation.
n_samples
Number of samples to generate for each optimization iteration.
use_copy
If true, a deep copy of the data source is made for the optimization, so
that the source's random generator is unaffected by this procedure.
Returns the final value for the scale.
"""
output_var = output_std ** 2
source = transformer.default_source
if use_copy:
source_copy = copy.deepcopy(source)
else:
source_copy = source
def objective(scale: float):
source_copy.scale = np.abs(scale)
samples = transformer.transform(n_samples, X=source_copy)
return np.var(samples) / output_var - 1
soln = optimize.root_scalar(
objective, x0=np.sqrt(output_var / 2), x1=np.sqrt(2 * output_var), maxiter=100,
)
source.scale = np.abs(soln.root)
return source.scale
def fix_transformer_scale(
transformer,
output_std: float = 1,
n_samples: int = 1000,
source_constructor: Callable = GaussianNoise,
) -> float:
""" Adjust the source scaling for a transformer in order to fix its output variance.
Parameters
----------
transformer
Transformer whose output variance is optimized. This should behave like
`Arma`: it needs to have a `transform` method that can be called like
`transformer.transform(U=source)`; it needs an attribute called
`default_source`; and it needs an attribute called `source_scaling`.
output_std
Value to which to fix the transformer's output standard deviation.
n_samples
Number of samples to generate for each optimization iteration.
source_constructor
Callable to use to create a source for adjusting the scaling factor.
The created object needs to have a `scale` attribute.
Returns the final value for the scale.
"""
# don't mess with the transformer's initial default_source
old_source = transformer.default_source
# make a new source, and use fix_source_scale
source = source_constructor()
transformer.default_source = source
# XXX this isn't very robust: the meaning of `scale` to a particular source may well
# be very different from simply scaling the source values after they're generated
scale = fix_source_scale(
transformer, output_std=output_std, n_samples=n_samples, use_copy=False
)
# revert to original source
transformer.default_source = old_source
# set scaling factor
transformer.source_scaling = scale
return scale
| en | 0.79163 | Define convenient objects to use as sources for Arma processes. A source that always returns the same value. Attributes ---------- value Value returned by the source. Generate constant values. Parameter --------- n Number of values to generate. A source that streams data from an array. Attributes ---------- data_store Array from which values are returned. ptr_ Current location in array. Return values from the data store. Raises `IndexError` if not enough data is available. Parameter --------- n Number of values to return. A source that generates random Gaussian noise. Attributes ---------- rng Random number generator. loc Location (mean) of normal distribution. scale Scale (standard deviation) of normal distribution. Initialize the Gaussian noise source. Parameters ---------- rng Random number generator or seed. If seed, a random number generator is created using `np.random.default_rng`. loc Location (mean) of distribution. scale Scale (standard deviation) of distribution. Return Gaussian random values. Parameter --------- n Number of values to return. Adjust the scale for a data source to fix the output variance of a transformer. The transformer's data source must have a `scale` parameter. Parameters ---------- transformer Transformer whose output variance is optimized. This should behave like `Arma`: it needs to have a `transform` method that can be called like `transformer.transform(U=source)`; and it needs an attribute called `default_source`. output_std Value to which to fix the transformer's output standard deviation. n_samples Number of samples to generate for each optimization iteration. use_copy If true, a deep copy of the data source is made for the optimization, so that the source's random generator is unaffected by this procedure. Returns the final value for the scale. Adjust the source scaling for a transformer in order to fix its output variance. Parameters ---------- transformer Transformer whose output variance is optimized. This should behave like `Arma`: it needs to have a `transform` method that can be called like `transformer.transform(U=source)`; it needs an attribute called `default_source`; and it needs an attribute called `source_scaling`. output_std Value to which to fix the transformer's output standard deviation. n_samples Number of samples to generate for each optimization iteration. source_constructor Callable to use to create a source for adjusting the scaling factor. The created object needs to have a `scale` attribute. Returns the final value for the scale. # don't mess with the transformer's initial default_source # make a new source, and use fix_source_scale # XXX this isn't very robust: the meaning of `scale` to a particular source may well # be very different from simply scaling the source values after they're generated # revert to original source # set scaling factor | 3.21132 | 3 |
ejercicio_1.py | Taller-Abierto-de-Humanidades-Digitales/programacion | 0 | 6630196 | '''
Ejercicio: Con este listado de Estados y capitales, crear un programa que sea capaz
de separar cada entidad y entregar un mensaje para cada Estado que diga:
La capital del Estado de Aguascalientes es Aguascalientes
La capital del Estado de Baja California es Mexicali
La capital del Estado de Baja California Sur es La Paz
La capital del Estado de Campeche es San Francisco de Campeche
...
La capital del Estado de Zacatecas es Zacatecas
'''
estados_y_capitales = """Aguascalientes: Aguascalientes, Baja California: Mexicali, Baja California Sur: La Paz, Campeche: San Francisco de Campeche, Chihuahua: Chihuahua, Chiapas: Tuxtla Gutiérrez, Ciudad de México: Ciudad de México, Coahuila: Saltillo, Colima: Colima, Durango: Victoria de Durango, Guanajuato: Guanajuato, Guerrero: Chilpancingo de los Bravo, Hidalgo: Pachuca de Soto, Jalisco: Guadalajara, México: Toluca de Lerdo, Michoacán: Morelia, Morelos: Cuernavaca, Nayarit: Tepic, Nuevo León: Monterrey, Oaxaca: Oaxaca de Juárez, Puebla: Puebla de Zaragoza, Querétaro: Santiago de Querétaro, Quintana Roo: Chetumal, San Luis Potosí: San Luis Potosí, Sinaloa: Culiacán Rosales, Sonora: Hermosillo, Tabasco: Villahermosa, Tamaulipas: Ciudad Victoria, Tlaxcala: Tlaxcala de Xicohténcatl, Veracruz: Xalapa-Enríquez, Yucatán: Mérida, Zacatecas: Zacatecas
"""
| '''
Ejercicio: Con este listado de Estados y capitales, crear un programa que sea capaz
de separar cada entidad y entregar un mensaje para cada Estado que diga:
La capital del Estado de Aguascalientes es Aguascalientes
La capital del Estado de Baja California es Mexicali
La capital del Estado de Baja California Sur es La Paz
La capital del Estado de Campeche es San Francisco de Campeche
...
La capital del Estado de Zacatecas es Zacatecas
'''
estados_y_capitales = """Aguascalientes: Aguascalientes, Baja California: Mexicali, Baja California Sur: La Paz, Campeche: San Francisco de Campeche, Chihuahua: Chihuahua, Chiapas: Tuxtla Gutiérrez, Ciudad de México: Ciudad de México, Coahuila: Saltillo, Colima: Colima, Durango: Victoria de Durango, Guanajuato: Guanajuato, Guerrero: Chilpancingo de los Bravo, Hidalgo: Pachuca de Soto, Jalisco: Guadalajara, México: Toluca de Lerdo, Michoacán: Morelia, Morelos: Cuernavaca, Nayarit: Tepic, Nuevo León: Monterrey, Oaxaca: Oaxaca de Juárez, Puebla: Puebla de Zaragoza, Querétaro: Santiago de Querétaro, Quintana Roo: Chetumal, San Luis Potosí: San Luis Potosí, Sinaloa: Culiacán Rosales, Sonora: Hermosillo, Tabasco: Villahermosa, Tamaulipas: Ciudad Victoria, Tlaxcala: Tlaxcala de Xicohténcatl, Veracruz: Xalapa-Enríquez, Yucatán: Mérida, Zacatecas: Zacatecas
"""
| es | 0.623431 | Ejercicio: Con este listado de Estados y capitales, crear un programa que sea capaz de separar cada entidad y entregar un mensaje para cada Estado que diga: La capital del Estado de Aguascalientes es Aguascalientes La capital del Estado de Baja California es Mexicali La capital del Estado de Baja California Sur es La Paz La capital del Estado de Campeche es San Francisco de Campeche ... La capital del Estado de Zacatecas es Zacatecas Aguascalientes: Aguascalientes, Baja California: Mexicali, Baja California Sur: La Paz, Campeche: San Francisco de Campeche, Chihuahua: Chihuahua, Chiapas: Tuxtla Gutiérrez, Ciudad de México: Ciudad de México, Coahuila: Saltillo, Colima: Colima, Durango: Victoria de Durango, Guanajuato: Guanajuato, Guerrero: Chilpancingo de los Bravo, Hidalgo: Pachuca de Soto, Jalisco: Guadalajara, México: Toluca de Lerdo, Michoacán: Morelia, Morelos: Cuernavaca, Nayarit: Tepic, Nuevo León: Monterrey, Oaxaca: Oaxaca de Juárez, Puebla: Puebla de Zaragoza, Querétaro: Santiago de Querétaro, Quintana Roo: Chetumal, San Luis Potosí: San Luis Potosí, Sinaloa: Culiacán Rosales, Sonora: Hermosillo, Tabasco: Villahermosa, Tamaulipas: Ciudad Victoria, Tlaxcala: Tlaxcala de Xicohténcatl, Veracruz: Xalapa-Enríquez, Yucatán: Mérida, Zacatecas: Zacatecas | 2.179801 | 2 |
larch/wxlib/xrfdisplay_fitpeaks.py | fmneto/xraylarch | 0 | 6630197 | #!/usr/bin/env python
"""
fitting GUI for XRF display
"""
import time
import copy
from functools import partial
from collections import OrderedDict
from threading import Thread
import json
import numpy as np
import wx
import wx.lib.agw.pycollapsiblepane as CP
import wx.lib.scrolledpanel as scrolled
import wx.dataview as dv
DVSTYLE = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES
from peakutils import peak
from lmfit import Parameter, Minimizer
from lmfit.printfuncs import gformat
from wxutils import (SimpleText, FloatCtrl, FloatSpin, Choice, Font, pack,
Button, Check, HLine, GridPanel, RowPanel, CEN, LEFT,
RIGHT, FileSave, GUIColors, FRAMESTYLE, BitmapButton,
SetTip, GridPanel, Popup, FloatSpinWithPin, get_icon,
fix_filename)
from . import FONTSIZE
from xraydb import (material_mu, xray_edge, materials, add_material,
atomic_number, atomic_symbol, xray_line)
from .notebooks import flatnotebook
from .parameter import ParameterPanel
from .periodictable import PeriodicTablePanel
from larch import Group
from ..xrf import xrf_background, MCA, FanoFactors
from ..utils.jsonutils import encode4js, decode4js
from .xrfdisplay_utils import (XRFGROUP, mcaname, XRFRESULTS_GROUP,
MAKE_XRFRESULTS_GROUP)
def read_filterdata(flist, _larch):
""" read filters data"""
materials = _larch.symtable.get_symbol('_xray._materials')
out = OrderedDict()
out['None'] = ('', 0)
for name in flist:
if name in materials:
out[name] = materials[name]
return out
def VarChoice(p, default=0, size=(75, -1)):
return Choice(p, choices=['Fix', 'Vary'],
size=size, default=default)
NFILTERS = 4
MIN_CORREL = 0.10
tooltips = {'ptable': 'Select Elements to include in model',
'step': 'size of step extending to low energy side of peak, fraction of peak height',
'gamma': 'gamma (lorentzian-like weight) of Voigt function',
'tail': 'intensity of tail function at low energy side of peak',
'beta': 'width of tail function at low energy side of peak',
'sigmax': 'scale sigma from Energy/Noise by this amount',
}
CompositionUnits = ('ng/mm^2', 'wt %', 'ppm')
Detector_Materials = ['Si', 'Ge']
EFano_Text = 'Peak Widths: sigma = sqrt(E_Fano * Energy + Noise**2) '
Geom_Text = 'Angles in degrees: 90=normal to surface, 0=grazing surface'
Energy_Text = 'All energies in keV'
xrfmod_setup = """## Set up XRF Model
_xrfmodel = xrf_model(xray_energy={en_xray:.2f}, count_time={count_time:.5f},
energy_min={en_min:.2f}, energy_max={en_max:.2f})
_xrfmodel.set_detector(thickness={det_thk:.5f}, material='{det_mat:s}',
cal_offset={cal_offset:.5f}, cal_slope={cal_slope:.5f},
vary_cal_offset={cal_vary!r}, vary_cal_slope={cal_vary!r},
peak_step={peak_step:.5f}, vary_peak_step={peak_step_vary:s},
peak_tail={peak_tail:.5f}, vary_peak_tail={peak_tail_vary:s},
peak_beta={peak_beta:.5f}, vary_peak_beta={peak_beta_vary:s},
peak_gamma={peak_gamma:.5f}, vary_peak_gamma={peak_gamma_vary:s},
noise={det_noise:.5f}, vary_noise={det_noise_vary:s})"""
xrfmod_scattpeak = """_xrfmodel.add_scatter_peak(name='{peakname:s}', center={_cen:.2f},
amplitude=1e5, step={_step:.5f}, tail={_tail:.5f}, beta={_beta:.5f},
sigmax={_sigma:.5f}, vary_center={vcen:s}, vary_step={vstep:s},
vary_tail={vtail:s}, vary_beta={vbeta:s}, vary_sigmax={vsigma:s})"""
xrfmod_fitscript = """
_xrffitresult = _xrfmodel.fit_spectrum({group:s}, energy_min={emin:.2f}, energy_max={emax:.2f})
_xrfresults.insert(0, _xrffitresult)
"""
xrfmod_filter = "_xrfmodel.add_filter('{name:s}', {thick:.5f}, vary_thickness={vary:s})"
xrfmod_matrix = "_xrfmodel.set_matrix('{name:s}', {thick:.5f}, density={density:.5f})"
xrfmod_pileup = "_xrfmodel.add_pileup(scale={scale:.3f}, vary={vary:s})"
xrfmod_escape = "_xrfmodel.add_escape(scale={scale:.3f}, vary={vary:s})"
xrfmod_savejs = "_xrfresults[{nfit:d}].save('{filename:s}')"
xrfmod_elems = """
for atsym in {elemlist:s}:
_xrfmodel.add_element(atsym)
#endfor
del atsym"""
Filter_Lengths = ['microns', 'mm', 'cm']
Filter_Materials = ['None', 'air', 'nitrogen', 'helium', 'kapton',
'beryllium', 'aluminum', 'mylar', 'pmma']
class FitSpectraFrame(wx.Frame):
"""Frame for Spectral Analysis"""
def __init__(self, parent, size=(700, 825)):
self.parent = parent
self._larch = parent.larch
symtable = self._larch.symtable
# fetch current spectra from parent
if not symtable.has_group(XRFRESULTS_GROUP):
self._larch.eval(MAKE_XRFRESULTS_GROUP)
self.xrfresults = symtable.get_symbol(XRFRESULTS_GROUP)
xrfgroup = symtable.get_group(XRFGROUP)
mcagroup = getattr(xrfgroup, '_mca')
self.mca = getattr(xrfgroup, mcagroup)
self.mcagroup = '%s.%s' % (XRFGROUP, mcagroup)
efactor = 1.0 if max(self.mca.energy) < 250. else 1000.0
if self.mca.incident_energy is None:
self.mca.incident_energy = 20.0
if self.mca.incident_energy > 250:
self.mca.incident_energy /= 1000.0
self.nfit = 0
self.colors = GUIColors()
wx.Frame.__init__(self, parent, -1, 'Fit XRF Spectra',
size=size, style=wx.DEFAULT_FRAME_STYLE)
self.wids = {}
self.owids = {}
pan = GridPanel(self)
mca_label = getattr(self.mca, 'label', None)
if mca_label is None:
mca_label = getattr(self.mca, 'filename', 'mca')
self.wids['mca_name'] = SimpleText(pan, mca_label, size=(300, -1), style=LEFT)
self.wids['btn_calc'] = Button(pan, 'Calculate Model', size=(150, -1),
action=self.onShowModel)
self.wids['btn_fit'] = Button(pan, 'Fit Model', size=(150, -1),
action=self.onFitModel)
pan.AddText(" XRF Spectrum: ", colour='#880000')
pan.Add(self.wids['mca_name'], dcol=3)
pan.Add(self.wids['btn_calc'], newrow=True)
pan.Add(self.wids['btn_fit'])
self.panels = {}
self.panels['Beam & Detector'] = self.beamdet_page
self.panels['Filters & Matrix'] = self.materials_page
self.panels['Elements & Peaks'] = self.elempeaks_page
self.panels['Fit Results'] = self.fitresult_page
self.panels['Composition'] = self.composition_page
self.nb = flatnotebook(pan, self.panels,
on_change=self.onNBChanged)
pan.Add((5, 5), newrow=True)
pan.Add(self.nb, dcol=5, drow=10, newrow=True)
pan.pack()
self.Show()
self.Raise()
def onNBChanged(self, event=None):
pagelabel = self.nb._pages.GetPageText(event.GetSelection()).strip()
if pagelabel.startswith('Composition'):
self.UpdateCompositionPage()
def elempeaks_page(self, **kws):
"elements and peaks parameters"
mca = self.parent.mca
wids = self.wids
p = GridPanel(self)
self.selected_elems = []
self.ptable = PeriodicTablePanel(p, multi_select=True, fontsize=12,
tooltip_msg=tooltips['ptable'],
onselect=self.onElemSelect)
dstep, dtail, dbeta, dgamma = 0.05, 0.10, 0.5, 0.05
wids['peak_step'] = FloatSpin(p, value=dstep, digits=3, min_val=0,
max_val=1.0, increment=0.01,
tooltip=tooltips['step'])
wids['peak_gamma'] = FloatSpin(p, value=dgamma, digits=3, min_val=0,
max_val=10.0, increment=0.01,
tooltip=tooltips['gamma'])
wids['peak_tail'] = FloatSpin(p, value=dtail, digits=3, min_val=0,
max_val=1.0, increment=0.05,
tooltip=tooltips['tail'])
wids['peak_beta'] = FloatSpin(p, value=dbeta, digits=3, min_val=0,
max_val=10.0, increment=0.01,
tooltip=tooltips['beta'])
wids['peak_step_vary'] = VarChoice(p, default=0)
wids['peak_tail_vary'] = VarChoice(p, default=0)
wids['peak_gamma_vary'] = VarChoice(p, default=0)
wids['peak_beta_vary'] = VarChoice(p, default=0)
btn_from_peaks = Button(p, 'Guess Peaks', size=(150, -1),
action=self.onElems_GuessPeaks)
# tooltip='Guess elements from peak locations')
btn_from_rois = Button(p, 'Use ROIS as Peaks', size=(150, -1),
action=self.onElems_FromROIS)
btn_clear_elems = Button(p, 'Clear All Peaks', size=(150, -1),
action=self.onElems_Clear)
wx.CallAfter(self.onElems_GuessPeaks)
p.AddText('Elements to model:', colour='#880000', dcol=2)
p.Add((2, 2), newrow=True)
p.Add(self.ptable, dcol=5, drow=5)
irow = p.irow
p.Add(btn_from_peaks, icol=6, dcol=2, irow=irow)
p.Add(btn_from_rois, icol=6, dcol=2, irow=irow+1)
p.Add(btn_clear_elems, icol=6, dcol=2, irow=irow+2)
p.irow += 5
p.Add((2, 2), newrow=True)
p.AddText(' Step: ')
p.Add(wids['peak_step'])
p.Add(wids['peak_step_vary'])
p.AddText(' Gamma : ')
p.Add(wids['peak_gamma'])
p.Add(wids['peak_gamma_vary'])
p.Add((2, 2), newrow=True)
p.AddText(' Beta: ')
p.Add(wids['peak_beta'])
p.Add(wids['peak_beta_vary'])
p.AddText(' Tail: ')
p.Add(wids['peak_tail'])
p.Add(wids['peak_tail_vary'])
p.Add((2, 2), newrow=True)
p.Add(HLine(p, size=(650, 3)), dcol=8)
p.Add((2, 2), newrow=True)
# name, escale, step, sigmax, beta, tail
scatter_peaks = (('Elastic', 1.00, 0.05, 1.0, 0.5, 0.10),
('Compton1', 0.97, 0.05, 1.5, 2.0, 0.25),
('Compton2', 0.94, 0.05, 2.0, 2.5, 0.25))
opts = dict(size=(100, -1), min_val=0, digits=4, increment=0.010)
for name, escale, dstep, dsigma, dbeta, dtail in scatter_peaks:
en = escale * self.mca.incident_energy
t = name.lower()
vary_en = 1 if t.startswith('compton') else 0
wids['%s_use'%t] = Check(p, label='Include', default=True)
wids['%s_cen_vary'%t] = VarChoice(p, default=vary_en)
wids['%s_step_vary'%t] = VarChoice(p, default=0)
wids['%s_beta_vary'%t] = VarChoice(p, default=0)
wids['%s_tail_vary'%t] = VarChoice(p, default=0)
wids['%s_sigma_vary'%t] = VarChoice(p, default=0)
wids['%s_cen'%t] = FloatSpin(p, value=en, digits=3, min_val=0,
increment=0.01)
wids['%s_step'%t] = FloatSpin(p, value=dstep, digits=3, min_val=0,
max_val=1.0, increment=0.01,
tooltip=tooltips['step'])
wids['%s_tail'%t] = FloatSpin(p, value=dtail, digits=3, min_val=0,
max_val=1.0, increment=0.05,
tooltip=tooltips['tail'])
wids['%s_beta'%t] = FloatSpin(p, value=dbeta, digits=3, min_val=0,
max_val=10.0, increment=0.10,
tooltip=tooltips['beta'])
wids['%s_sigma'%t] = FloatSpin(p, value=dsigma, digits=3, min_val=0,
max_val=10.0, increment=0.05,
tooltip=tooltips['sigmax'])
p.Add((2, 2), newrow=True)
p.AddText(" %s Peak:" % name, colour='#880000')
p.Add(wids['%s_use' % t], dcol=2)
p.AddText(' Energy (keV): ')
p.Add(wids['%s_cen'%t])
p.Add(wids['%s_cen_vary'%t])
p.Add((2, 2), newrow=True)
p.AddText(' Step: ')
p.Add(wids['%s_step'%t])
p.Add(wids['%s_step_vary'%t])
p.AddText(' Sigma Scale : ')
p.Add(wids['%s_sigma'%t])
p.Add(wids['%s_sigma_vary'%t])
p.Add((2, 2), newrow=True)
p.AddText(' Beta : ')
p.Add(wids['%s_beta'%t])
p.Add(wids['%s_beta_vary'%t])
p.AddText(' Tail: ')
p.Add(wids['%s_tail'%t])
p.Add(wids['%s_tail_vary'%t])
p.Add((2, 2), newrow=True)
p.Add(HLine(p, size=(650, 3)), dcol=7)
p.pack()
return p
def beamdet_page(self, **kws):
"beam / detector settings"
mca = self.mca
en_min = 2.0
en_max = self.mca.incident_energy
cal_offset = getattr(mca, 'offset', 0)
cal_slope = getattr(mca, 'slope', 0.010)
det_noise = getattr(mca, 'det_noise', 0.035)
escape_amp = getattr(mca, 'escape_amp', 1.0)
pileup_amp = getattr(mca, 'pileup_amp', 0.1)
wids = self.wids
# main = wx.Panel(self)
pdet = GridPanel(self, itemstyle=LEFT)
def addLine(pan):
pan.Add(HLine(pan, size=(650, 3)), dcol=6, newrow=True)
wids['escape_use'] = Check(pdet, label='Include Escape in Fit',
default=True, action=self.onUsePileupEscape)
wids['escape_amp'] = FloatSpin(pdet, value=escape_amp,
min_val=0, max_val=100, digits=2,
increment=0.02, size=(100, -1))
wids['pileup_use'] = Check(pdet, label='Include Pileup in Fit',
default=True, action=self.onUsePileupEscape)
wids['pileup_amp'] = FloatSpin(pdet, value=pileup_amp,
min_val=0, max_val=100, digits=2,
increment=0.02, size=(100, -1))
wids['escape_amp_vary'] = VarChoice(pdet, default=True)
wids['pileup_amp_vary'] = VarChoice(pdet, default=True)
wids['cal_slope'] = FloatSpin(pdet, value=cal_slope,
min_val=0, max_val=100,
digits=4, increment=0.01, size=(100, -1))
wids['cal_offset'] = FloatSpin(pdet, value=cal_offset,
min_val=-500, max_val=500,
digits=4, increment=0.01, size=(100, -1))
wids['cal_vary'] = Check(pdet, label='Vary Calibration in Fit', default=True)
wids['det_mat'] = Choice(pdet, choices=Detector_Materials,
size=(70, -1), default=0,
action=self.onDetMaterial)
wids['det_thk'] = FloatSpin(pdet, value=0.400, size=(100, -1),
increment=0.010, min_val=0, max_val=10,
digits=4)
wids['det_noise_vary'] = VarChoice(pdet, default=1)
opts = dict(size=(100, -1), min_val=0, max_val=500, digits=3,
increment=0.10)
wids['en_xray'] = FloatSpin(pdet, value=self.mca.incident_energy,
action=self.onSetXrayEnergy, **opts)
wids['en_min'] = FloatSpin(pdet, value=en_min, **opts)
wids['en_max'] = FloatSpin(pdet, value=en_max, **opts)
wids['flux_in'] = FloatCtrl(pdet, value=5.e10, gformat=True,
minval=0, size=(100, -1))
opts.update({'increment': 0.005})
wids['det_noise'] = FloatSpin(pdet, value=det_noise, **opts)
wids['det_efano'] = SimpleText(pdet, size=(200, -1),
label='E_Fano= %.4e' % FanoFactors['Si'])
opts.update(digits=1, max_val=90, min_val=0, increment=1)
wids['angle_in'] = FloatSpin(pdet, value=45, **opts)
wids['angle_out'] = FloatSpin(pdet, value=45, **opts)
opts.update(digits=1, max_val=5e9, min_val=0, increment=1)
wids['det_dist'] = FloatSpin(pdet, value=50, **opts)
wids['det_area'] = FloatSpin(pdet, value=50, **opts)
for notyet in ('angle_in', 'angle_out', 'det_dist', 'det_area',
'flux_in'):
wids[notyet].Disable()
pdet.AddText(' Beam Energy, Fit Range :', colour='#880000', dcol=2)
pdet.AddText(' X-ray Energy (keV): ', newrow=True)
pdet.Add(wids['en_xray'])
pdet.AddText('Incident Flux (Hz): ', newrow=False)
pdet.Add(wids['flux_in'])
pdet.AddText(' Fit Energy Min (keV): ', newrow=True)
pdet.Add(wids['en_min'])
pdet.AddText('Fit Energy Max (keV): ')
pdet.Add(wids['en_max'])
addLine(pdet)
pdet.AddText(' Energy Calibration :', colour='#880000', dcol=1, newrow=True)
pdet.Add(wids['cal_vary'], dcol=2)
pdet.AddText(' Offset (keV): ', newrow=True)
pdet.Add(wids['cal_offset'])
pdet.AddText('Slope (keV/bin): ')
pdet.Add(wids['cal_slope'])
addLine(pdet)
pdet.AddText(' Detector Material:', colour='#880000', dcol=1, newrow=True)
pdet.AddText(EFano_Text, dcol=3)
pdet.AddText(' Material: ', newrow=True)
pdet.Add(wids['det_mat'])
pdet.Add(wids['det_efano'], dcol=2)
pdet.AddText(' Thickness (mm): ', newrow=True)
pdet.Add(wids['det_thk'])
pdet.AddText(' Noise (keV): ', newrow=True)
pdet.Add(wids['det_noise'])
pdet.Add(wids['det_noise_vary'], dcol=2)
addLine(pdet)
pdet.AddText(' Escape && Pileup:', colour='#880000', dcol=2, newrow=True)
pdet.AddText(' Escape Scale:', newrow=True)
pdet.Add(wids['escape_amp'])
pdet.Add(wids['escape_amp_vary'])
pdet.Add(wids['escape_use'], dcol=3)
pdet.AddText(' Pileup Scale:', newrow=True)
pdet.Add(wids['pileup_amp'])
pdet.Add(wids['pileup_amp_vary'])
pdet.Add(wids['pileup_use'], dcol=3)
addLine(pdet)
pdet.AddText(' Geometry:', colour='#880000', dcol=1, newrow=True)
pdet.AddText(Geom_Text, dcol=3)
pdet.AddText(' Incident Angle (deg):', newrow=True)
pdet.Add(wids['angle_in'])
pdet.AddText(' Exit Angle (deg):', newrow=False)
pdet.Add(wids['angle_out'])
pdet.AddText(' Detector Distance (mm): ', newrow=True)
pdet.Add(wids['det_dist'])
pdet.AddText(' Detector Area (mm^2): ', newrow=False)
pdet.Add(wids['det_area'])
addLine(pdet)
pdet.pack()
return pdet
def materials_page(self, **kws):
"filters and matrix settings"
wids = self.wids
pan = GridPanel(self, itemstyle=LEFT)
pan.AddText(' Filters :', colour='#880000', dcol=2) # , newrow=True)
pan.AddManyText((' Filter #', 'Material', 'Thickness (mm)',
'Vary Thickness'), style=LEFT, newrow=True)
opts = dict(size=(125, -1), min_val=0, digits=5, increment=0.005)
for i in range(NFILTERS):
t = 'filter%d' % (i+1)
wids['%s_mat'%t] = Choice(pan, choices=Filter_Materials, default=0,
size=(150, -1),
action=partial(self.onFilterMaterial, index=i+1))
wids['%s_thk'%t] = FloatSpin(pan, value=0.0, **opts)
wids['%s_var'%t] = VarChoice(pan, default=0)
if i == 0: # first selection
wids['%s_mat'%t].SetStringSelection('beryllium')
wids['%s_thk'%t].SetValue(0.0250)
elif i == 1: # second selection
wids['%s_mat'%t].SetStringSelection('air')
wids['%s_thk'%t].SetValue(50.00)
elif i == 2: # third selection
wids['%s_mat'%t].SetStringSelection('kapton')
wids['%s_thk'%t].SetValue(0.00)
elif i == 3: # third selection
wids['%s_mat'%t].SetStringSelection('aluminum')
wids['%s_thk'%t].SetValue(0.00)
pan.AddText(' %i' % (i+1), newrow=True)
pan.Add(wids['%s_mat' % t])
pan.Add(wids['%s_thk' % t])
pan.Add(wids['%s_var' % t])
pan.Add(HLine(pan, size=(650, 3)), dcol=6, newrow=True)
pan.AddText(' Matrix:', colour='#880000', newrow=True)
pan.AddText(' NOTE: thin film limit only', dcol=3)
wids['matrix_mat'] = wx.TextCtrl(pan, value='', size=(275, -1))
wids['matrix_thk'] = FloatSpin(pan, value=0.0, **opts)
wids['matrix_den'] = FloatSpin(pan, value=1.0, **opts)
wids['matrix_btn'] = Button(pan, 'Use Material', size=(175, -1),
action=self.onUseCurrentMaterialAsFilter)
wids['matrix_btn'].Disable()
pan.AddText(' Material/Formula:', dcol=1, newrow=True)
pan.Add(wids['matrix_mat'], dcol=2)
pan.Add(wids['matrix_btn'], dcol=3)
pan.AddText(' Thickness (mm):', newrow=True)
pan.Add(wids['matrix_thk'])
pan.AddText(' Density (gr/cm^3):', newrow=False)
pan.Add(wids['matrix_den'])
pan.Add(HLine(pan, size=(650, 3)), dcol=6, newrow=True)
# Materials
pan.AddText(' Known Materials:', colour='#880000', dcol=4, newrow=True)
mview = self.owids['materials'] = dv.DataViewListCtrl(pan, style=DVSTYLE)
mview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectMaterial)
self.selected_material = ''
mview.AppendTextColumn('Name', width=150)
mview.AppendTextColumn('Formula', width=325)
mview.AppendTextColumn('density', width=90)
mview.AppendToggleColumn('Filter?', width=75)
for col in range(4):
this = mview.Columns[col]
align = wx.ALIGN_LEFT
this.Sortable = True
this.Alignment = this.Renderer.Alignment = align
mview.SetMinSize((675, 170))
mview.DeleteAllItems()
self.materials_data = {}
for name, data in materials._read_materials_db().items():
formula, density = data
self.materials_data[name] = (formula, density)
mview.AppendItem((name, formula, "%9.6f"%density,
name in Filter_Materials))
pan.Add(mview, dcol=5, newrow=True)
pan.AddText(' Add Material:', colour='#880000', newrow=True)
pan.Add(Button(pan, 'Add', size=(175, -1),
action=self.onAddMaterial))
pan.Add((10, 10))
bx = Button(pan, 'Update Filter List', size=(175, -1),
action=self.onUpdateFilterList)
pan.Add(bx)
self.owids['newmat_name'] = wx.TextCtrl(pan, value='', size=(175, -1))
self.owids['newmat_dens'] = FloatSpin(pan, value=1.0, **opts)
self.owids['newmat_form'] = wx.TextCtrl(pan, value='', size=(400, -1))
for notyet in ('matrix_mat', 'matrix_thk', 'matrix_den',
'matrix_btn'):
wids[notyet].Disable()
pan.AddText(' Name:', newrow=True)
pan.Add(self.owids['newmat_name'])
pan.AddText(' Density (gr/cm^3):', newrow=False)
pan.Add(self.owids['newmat_dens'])
pan.AddText(' Formula:', newrow=True)
pan.Add(self.owids['newmat_form'], dcol=3)
pan.pack()
return pan
def fitresult_page(self, **kws):
sizer = wx.GridBagSizer(10, 5)
panel = scrolled.ScrolledPanel(self)
# title row
wids = self.owids
title = SimpleText(panel, 'Fit Results', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['data_title'] = SimpleText(panel, '< > ', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['fitlabel_lab'] = SimpleText(panel, ' Fit Label: ')
wids['fitlabel_txt'] = wx.TextCtrl(panel, -1, ' ', size=(150, -1))
wids['fitlabel_btn'] = Button(panel, 'Set Label', size=(150, -1),
action=self.onChangeFitLabel)
opts = dict(default=False, size=(175, -1), action=self.onPlot)
wids['plot_comps'] = Check(panel, label='Show Components?', **opts)
self.plot_choice = Button(panel, 'Plot',
size=(150, -1), action=self.onPlot)
self.save_result = Button(panel, 'Save Model',
size=(150, -1), action=self.onSaveFitResult)
SetTip(self.save_result, 'save model and result to be loaded later')
self.export_fit = Button(panel, 'Export Fit',
size=(150, -1), action=self.onExportFitResult)
SetTip(self.export_fit, 'save arrays and results to text file')
irow = 0
sizer.Add(title, (irow, 0), (1, 1), LEFT)
sizer.Add(wids['data_title'], (irow, 1), (1, 3), LEFT)
irow += 1
sizer.Add(self.save_result, (irow, 0), (1, 1), LEFT)
sizer.Add(self.export_fit, (irow, 1), (1, 1), LEFT)
sizer.Add(self.plot_choice, (irow, 2), (1, 1), LEFT)
sizer.Add(wids['plot_comps'], (irow, 3), (1, 1), LEFT)
irow += 1
sizer.Add(wids['fitlabel_lab'], (irow, 0), (1, 1), LEFT)
sizer.Add(wids['fitlabel_txt'], (irow, 1), (1, 1), LEFT)
sizer.Add(wids['fitlabel_btn'], (irow, 2), (1, 2), LEFT)
irow += 1
sizer.Add(HLine(panel, size=(650, 3)), (irow, 0), (1, 5), LEFT)
irow += 1
title = SimpleText(panel, '[[Fit Statistics]]', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
sizer.Add(title, (irow, 0), (1, 4), LEFT)
sview = wids['stats'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
sview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectFit)
sview.AppendTextColumn(' Fit Label', width=90)
sview.AppendTextColumn(' N_vary', width=65)
sview.AppendTextColumn(' N_eval', width=65)
sview.AppendTextColumn(' \u03c7\u00B2', width=125)
sview.AppendTextColumn(' \u03c7\u00B2_reduced', width=125)
sview.AppendTextColumn(' Akaike Info', width=125)
for col in range(sview.ColumnCount):
this = sview.Columns[col]
isort, align = True, wx.ALIGN_RIGHT
if col == 0:
align = wx.ALIGN_CENTER
this.Sortable = isort
this.Alignment = this.Renderer.Alignment = align
sview.SetMinSize((675, 150))
irow += 1
sizer.Add(sview, (irow, 0), (1, 5), LEFT)
irow += 1
sizer.Add(HLine(panel, size=(650, 3)), (irow, 0), (1, 5), LEFT)
irow += 1
title = SimpleText(panel, '[[Variables]]', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
sizer.Add(title, (irow, 0), (1, 1), LEFT)
pview = wids['params'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
wids['paramsdata'] = []
pview.AppendTextColumn('Parameter', width=150)
pview.AppendTextColumn('Refined Value', width=100)
pview.AppendTextColumn('Standard Error', width=100)
pview.AppendTextColumn('% Uncertainty', width=100)
pview.AppendTextColumn('Initial Value', width=150)
for col in range(4):
this = pview.Columns[col]
align = wx.ALIGN_LEFT
if col > 0:
align = wx.ALIGN_RIGHT
this.Sortable = False
this.Alignment = this.Renderer.Alignment = align
pview.SetMinSize((675, 200))
pview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectParameter)
irow += 1
sizer.Add(pview, (irow, 0), (1, 5), LEFT)
irow += 1
sizer.Add(HLine(panel, size=(650, 3)), (irow, 0), (1, 5), LEFT)
irow += 1
title = SimpleText(panel, '[[Correlations]]', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['all_correl'] = Button(panel, 'Show All',
size=(100, -1), action=self.onAllCorrel)
wids['min_correl'] = FloatSpin(panel, value=MIN_CORREL,
min_val=0, size=(100, -1),
digits=3, increment=0.1)
ctitle = SimpleText(panel, 'minimum correlation: ')
sizer.Add(title, (irow, 0), (1, 1), LEFT)
sizer.Add(ctitle, (irow, 1), (1, 1), LEFT)
sizer.Add(wids['min_correl'], (irow, 2), (1, 1), LEFT)
sizer.Add(wids['all_correl'], (irow, 3), (1, 1), LEFT)
cview = wids['correl'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
cview.AppendTextColumn('Parameter 1', width=150)
cview.AppendTextColumn('Parameter 2', width=150)
cview.AppendTextColumn('Correlation', width=150)
for col in (0, 1, 2):
this = cview.Columns[col]
this.Sortable = False
align = wx.ALIGN_LEFT
if col == 2:
align = wx.ALIGN_RIGHT
this.Alignment = this.Renderer.Alignment = align
cview.SetMinSize((675, 125))
irow += 1
sizer.Add(cview, (irow, 0), (1, 5), LEFT)
pack(panel, sizer)
panel.SetMinSize((675, 725))
panel.SetupScrolling()
return panel
def composition_page(self, **kws):
sizer = wx.GridBagSizer(10, 5)
panel = scrolled.ScrolledPanel(self)
wids = self.owids
title = SimpleText(panel, 'Composition Results', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['data_title2'] = SimpleText(panel, '< > ', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
cview = wids['composition'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
cview.AppendTextColumn(' Z ', width=50)
cview.AppendTextColumn(' Element ', width=100)
cview.AppendTextColumn(' Amplitude', width=150)
cview.AppendTextColumn(' Concentration', width=150)
cview.AppendTextColumn(' Uncertainty', width=150)
for col in range(5):
this = cview.Columns[col]
align = wx.ALIGN_RIGHT
if col == 1:
align = wx.ALIGN_LEFT
this.Sortable = True
this.Alignment = this.Renderer.Alignment = align
cview.SetMinSize((675, 500))
wids['comp_fitlabel'] = Choice(panel, choices=[''], size=(175, -1),
action=self.onCompSelectFit)
self.compscale_lock = 0.0
wids['comp_elemchoice'] = Choice(panel, choices=[''], size=(100, -1))
# action=self.onCompSetElemAbundance)
wids['comp_elemscale'] = FloatSpin(panel, value=1.0, digits=5, min_val=0,
increment=0.01,
action=self.onCompSetElemAbundance)
wids['comp_units'] = Choice(panel, choices=CompositionUnits, size=(100, -1))
wids['comp_scale'] = FloatCtrl(panel, value=0, size=(200, -1), precision=5,
minval=0, action=self.onCompSetScale)
wids['comp_save'] = Button(panel, 'Save This Concentration Data',
size=(200, -1), action=self.onCompSave)
irow = 0
sizer.Add(title, (irow, 0), (1, 2), LEFT)
sizer.Add(wids['data_title2'], (irow, 2), (1, 5), LEFT)
irow += 1
sizer.Add(SimpleText(panel, 'Fit Label:'), (irow, 0), (1, 1), LEFT)
sizer.Add(wids['comp_fitlabel'], (irow, 1), (1, 5), LEFT)
irow += 1
sizer.Add(SimpleText(panel, 'Scale Element:'), (irow, 0), (1, 1), LEFT)
sizer.Add(wids['comp_elemchoice'], (irow, 1), (1, 1), LEFT)
sizer.Add(SimpleText(panel, ' to:'), (irow, 2), (1, 1), LEFT)
sizer.Add(wids['comp_elemscale'], (irow, 3), (1, 1), LEFT)
sizer.Add(wids['comp_units'], (irow, 4), (1, 1), LEFT)
irow += 1
sizer.Add(SimpleText(panel, 'Scaling Factor:'), (irow, 0), (1, 1), LEFT)
sizer.Add(wids['comp_scale'], (irow, 1), (1, 3), LEFT)
irow += 1
sizer.Add(wids['composition'], (irow, 0), (3, 6), LEFT)
irow += 3
sizer.Add(wids['comp_save'], (irow, 0), (1, 3), LEFT)
pack(panel, sizer)
panel.SetMinSize((675, 750))
panel.SetupScrolling()
return panel
def onCompSetScale(self, event=None, value=None):
if len(self.xrfresults) < 1 or (time.time() - self.compscale_lock) < 0.25:
return
self.compscale_lock = time.time()
owids = self.owids
result = self.get_fitresult(nfit=owids['comp_fitlabel'].GetSelection())
cur_elem = owids['comp_elemchoice'].GetStringSelection()
conc_vals = {}
for elem in result.comps.keys():
parname = 'amp_%s' % elem.lower()
if parname in result.params:
par = result.params[parname]
conc_vals[elem] = [par.value, par.stderr]
try:
scale = self.owids['comp_scale'].GetValue()
except:
return
owids['comp_elemscale'].SetValue(conc_vals[cur_elem][0]*scale)
owids['composition'].DeleteAllItems()
result.concentration_results = conc_vals
result.concentration_scale = scale
for elem, dat in conc_vals.items():
zat = "%d" % atomic_number(elem)
val, serr = dat
rval = "%15.4f" % val
sval = "%15.4f" % (val*scale)
uval = "%15.4f" % (serr*scale)
try:
uval = uval + ' ({:.2%})'.format(abs(serr/val))
except ZeroDivisionError:
pass
owids['composition'].AppendItem((zat, elem, rval, sval, uval))
def onCompSetElemAbundance(self, event=None, value=None):
if len(self.xrfresults) < 1 or (time.time() - self.compscale_lock) < 0.25:
return
self.compscale_lock = time.time()
owids = self.owids
result = self.get_fitresult(nfit=owids['comp_fitlabel'].GetSelection())
cur_elem = owids['comp_elemchoice'].GetStringSelection()
conc_vals = {}
for elem in result.comps.keys():
parname = 'amp_%s' % elem.lower()
if parname in result.params:
par = result.params[parname]
conc_vals[elem] = [par.value, par.stderr]
result.concentration_results = conc_vals
elem_value = owids['comp_elemscale'].GetValue()
scale = elem_value/conc_vals[cur_elem][0]
result.concentration_scale = scale
owids['comp_scale'].SetValue(scale)
owids['composition'].DeleteAllItems()
for elem, dat in conc_vals.items():
zat = "%d" % atomic_number(elem)
val, serr = dat
rval = "%15.4f" % val
sval = "%15.4f" % (val*scale)
uval = "%15.4f" % (serr*scale)
try:
uval = uval + ' ({:.2%})'.format(abs(serr/val))
except ZeroDivisionError:
pass
owids['composition'].AppendItem((zat, elem, rval, sval, uval))
def onCompSave(self, event=None):
result = self.get_fitresult(nfit=self.owids['comp_fitlabel'].GetSelection())
scale = result.concentration_scale
deffile = self.mca.label + '_' + result.label
deffile = fix_filename(deffile.replace('.', '_')) + '_xrf.csv'
wcards = "CSV (*.csv)|*.csv|All files (*.*)|*.*"
sfile = FileSave(self, 'Save Concentration Results',
default_file=deffile,
wildcard=wcards)
if sfile is not None:
buff = ["# results for MCA labeled: %s" % self.mca.label,
"# fit label: %s" % result.label,
"# concentration units: %s" % self.owids['comp_units'].GetStringSelection(),
"# count time: %s" % result.count_time,
"# scale: %s" % result.concentration_scale,
"# Fit Report:" ]
for l in result.fit_report.split('\n'):
buff.append("# %s" % l)
buff.append("###########")
buff.append("#Element Concentration Uncertainty RawAmplitude")
for elem, dat in result.concentration_results.items():
eout = (elem + ' '*4)[:4]
val, serr = dat
rval = "%16.5f" % val
sval = "%16.5f" % (val/scale)
uval = "%16.5f" % (serr/scale)
buff.append(" ".join([elem, sval, uval, rval]))
buff.append('')
with open(sfile, 'w') as fh:
fh.write('\n'.join(buff))
def onCompSelectFit(self, event=None):
result = self.get_fitresult(nfit=self.owids['comp_fitlabel'].GetSelection())
cur_elem = self.owids['comp_elemchoice'].GetStringSelection()
self.owids['comp_elemchoice'].Clear()
elems = [el['symbol'] for el in result.elements]
self.owids['comp_elemchoice'].SetChoices(elems)
if len(cur_elem) > 0:
self.owids['comp_elemchoice'].SetStringSelection(cur_elem)
else:
self.owids['comp_elemchoice'].SetSelection(0)
self.onCompSetElemAbundance()
def UpdateCompositionPage(self, event=None):
self.xrfresults = self._larch.symtable.get_symbol(XRFRESULTS_GROUP)
if len(self.xrfresults) > 0:
result = self.get_fitresult()
fitlab = self.owids['comp_fitlabel']
fitlab.Clear()
fitlab.SetChoices([a.label for a in self.xrfresults])
fitlab.SetStringSelection(result.label)
self.onCompSelectFit()
def onElems_Clear(self, event=None):
self.ptable.on_clear_all()
def onElems_GuessPeaks(self, event=None):
mca = self.mca
_indices = peak.indexes(mca.counts*1.0, min_dist=5, thres=0.025)
peak_energies = mca.energy[_indices]
elrange = range(10, 92)
atsyms = [atomic_symbol(i) for i in elrange]
kalphas = [0.001*xray_line(i, 'Ka').energy for i in elrange]
kbetas = [0.001*xray_line(i, 'Kb').energy for i in elrange]
self.ptable.on_clear_all()
elems = []
for iz, en in enumerate(peak_energies):
for i, ex in enumerate(kalphas):
if abs(en - ex) < 0.025:
elems.append(atsyms[i])
peak_energies[iz] = -ex
for iz, en in enumerate(peak_energies):
if en > 0:
for i, ex in enumerate(kbetas):
if abs(en - ex) < 0.025:
if atsyms[i] not in elems:
elems.append(atsyms[i])
peak_energies[iz] = -ex
en = self.wids['en_xray'].GetValue()
emin = self.wids['en_min'].GetValue()
for elem in elems:
kedge = 0.001*xray_edge(elem, 'K').energy
l3edge = 0.001*xray_edge(elem, 'L3').energy
l2edge = 0.001*xray_edge(elem, 'L3').energy
if ((kedge < en and kedge > emin) or
(l3edge < en and l3edge > emin) or
(l2edge < en and l2edge > emin)):
if elem not in self.ptable.selected:
self.ptable.onclick(label=elem)
def onElems_FromROIS(self, event=None):
for roi in self.mca.rois:
words = roi.name.split()
elem = words[0].title()
if (elem in self.ptable.syms and
elem not in self.ptable.selected):
self.ptable.onclick(label=elem)
self.onSetXrayEnergy()
def onSetXrayEnergy(self, event=None):
en = self.wids['en_xray'].GetValue()
self.wids['en_max'].SetValue(en)
self.wids['elastic_cen'].SetValue(en)
self.wids['compton1_cen'].SetValue(en*0.975)
self.wids['compton2_cen'].SetValue(en*0.950)
emin = self.wids['en_min'].GetValue() * 1.25
self.ptable.on_clear_all()
for roi in self.mca.rois:
words = roi.name.split()
elem = words[0].title()
kedge = l3edge = l2edge = 0.0
try:
kedge = 0.001*xray_edge(elem, 'K').energy
l3edge = 0.001*xray_edge(elem, 'L3').energy
l2edge = 0.001*xray_edge(elem, 'L3').energy
except:
pass
if ((kedge < en and kedge > emin) or
(l3edge < en and l3edge > emin) or
(l2edge < en and l2edge > emin)):
if elem not in self.ptable.selected:
self.ptable.onclick(label=elem)
def onDetMaterial(self, event=None):
dmat = self.wids['det_mat'].GetStringSelection()
if dmat not in FanoFactors:
dmat = 'Si'
self.wids['det_efano'].SetLabel('E_Fano= %.4e' % FanoFactors[dmat])
def onFilterMaterial(self, evt=None, index=1):
name = evt.GetString()
den = self.materials_data.get(name, (None, 1.0))[1]
t = 'filter%d' % (index)
thick = self.wids['%s_thk'%t]
if den < 0.1 and thick.GetValue() < 0.1:
thick.SetValue(10.0)
thick.SetIncrement(0.5)
elif den > 0.1 and thick.GetValue() < 1.e-5:
thick.SetValue(0.0250)
thick.SetIncrement(0.005)
def onUseCurrentMaterialAsFilter(self, evt=None):
name = self.selected_material
density = self.materials_data.get(name, (None, 1.0))[1]
self.wids['matrix_den'].SetValue(density)
self.wids['matrix_mat'].SetValue(name)
def onSelectMaterial(self, evt=None):
if self.owids['materials'] is None:
return
item = self.owids['materials'].GetSelectedRow()
name = None
if item > -1:
name = list(self.materials_data.keys())[item]
self.selected_material = name
self.wids['matrix_btn'].Enable(name is not None)
if name is not None:
self.wids['matrix_btn'].SetLabel('Use %s' % name)
def onUpdateFilterList(self, evt=None):
flist = ['None']
for i in range(len(self.materials_data)):
if self.owids['materials'].GetToggleValue(i, 3): # is filter
flist.append(self.owids['materials'].GetTextValue(i, 0))
for i in range(NFILTERS):
t = 'filter%d' % (i+1)
choice = self.wids['%s_mat'%t]
cur = choice.GetStringSelection()
choice.Clear()
choice.SetChoices(flist)
if cur in flist:
choice.SetStringSelection(cur)
else:
choice.SetSelection(0)
def onAddMaterial(self, evt=None):
name = self.owids['newmat_name'].GetValue()
formula = self.owids['newmat_form'].GetValue()
density = self.owids['newmat_dens'].GetValue()
add = len(name) > 0 and len(formula)>0
if add and name in self.materials_data:
add = (Popup(self,
"Overwrite definition of '%s'?" % name,
'Re-define material?',
style=wx.OK|wx.CANCEL)==wx.ID_OK)
if add:
irow = list(self.materials_data.keys()).index(name)
self.owids['materials'].DeleteItem(irow)
if add:
add_material(name, formula, density)
self.materials_data[name] = (formula, density)
self.selected_material = name
self.owids['materials'].AppendItem((name, formula,
"%9.6f"%density,
False))
def onElemSelect(self, event=None, elem=None):
self.ptable.tsym.SetLabel('')
self.ptable.title.SetLabel('%d elements selected' %
len(self.ptable.selected))
def onUsePileupEscape(self, event=None):
puse = self.wids['pileup_use'].IsChecked()
self.wids['pileup_amp'].Enable(puse)
self.wids['pileup_amp_vary'].Enable(puse)
puse = self.wids['escape_use'].IsChecked()
self.wids['escape_amp'].Enable(puse)
self.wids['escape_amp_vary'].Enable(puse)
def onUsePeak(self, event=None, name=None, value=None):
if value is None and event is not None:
value = event.IsChecked()
if name is None:
return
for a in ('cen', 'step', 'tail', 'sigma', 'beta'):
self.wids['%s_%s'%(name, a)].Enable(value)
varwid = self.wids.get('%s_%s_vary'%(name, a), None)
if varwid is not None:
varwid.Enable(value)
def build_model(self, match_amplitudes=True):
"""build xrf_model from form settings"""
vars = {'Vary':'True', 'Fix': 'False', 'True':True, 'False': False}
opts = {}
for key, wid in self.wids.items():
val = None
if hasattr(wid, 'GetValue'):
val = wid.GetValue()
elif hasattr(wid, 'IsChecked'):
val = wid.IsChecked()
elif isinstance(wid, Choice):
val = wid.GetStringSelection()
elif hasattr(wid, 'GetStringSelection'):
val = wid.GetStringSelection()
elif hasattr(wid, 'GetLabel'):
val = wid.GetLabel()
if isinstance(val, str) and val.title() in vars:
val = vars[val.title()]
opts[key] = val
opts['count_time'] = getattr(self.mca, 'real_time', 1.0)
if opts['count_time'] is None:
opts['count_time'] = 1.0
script = [xrfmod_setup.format(**opts)]
for peakname in ('Elastic', 'Compton1', 'Compton2'):
t = peakname.lower()
if opts['%s_use'% t]:
d = {'peakname': t}
d['_cen'] = opts['%s_cen'%t]
d['vcen'] = opts['%s_cen_vary'%t]
d['_step'] = opts['%s_step'%t]
d['vstep'] = opts['%s_step_vary'%t]
d['_tail'] = opts['%s_tail'%t]
d['vtail'] = opts['%s_tail_vary'%t]
d['_beta'] = opts['%s_beta'%t]
d['vbeta'] = opts['%s_beta_vary'%t]
d['_sigma'] = opts['%s_sigma'%t]
d['vsigma'] = opts['%s_sigma_vary'%t]
script.append(xrfmod_scattpeak.format(**d))
for i in range(NFILTERS):
t = 'filter%d' % (i+1)
f_mat = opts['%s_mat'%t]
if f_mat not in (None, 'None') and int(1e6*opts['%s_thk'%t]) > 1:
script.append(xrfmod_filter.format(name=f_mat,
thick=opts['%s_thk'%t],
vary=opts['%s_var'%t]))
m_mat = opts['matrix_mat'].strip()
if len(m_mat) > 0 and int(1e6*opts['matrix_thk']) > 1:
script.append(xrfmod_matrix.format(name=m_mat,
thick=opts['matrix_thk'],
density=opts['matrix_den']))
if opts['pileup_use'] in ('True', True):
script.append(xrfmod_pileup.format(scale=opts['pileup_amp'],
vary=opts['pileup_amp_vary']))
if opts['escape_use'] in ('True', True):
script.append(xrfmod_escape.format(scale=opts['escape_amp'],
vary=opts['escape_amp_vary']))
# sort elements selected on Periodic Table by Z
elemz = []
for elem in self.ptable.selected:
elemz.append( 1 + self.ptable.syms.index(elem))
elemz.sort()
syms = ["'%s'" % self.ptable.syms[iz-1] for iz in sorted(elemz)]
syms = '[%s]' % (', '.join(syms))
script.append(xrfmod_elems.format(elemlist=syms))
script.append("{group:s}.xrf_init = _xrfmodel.calc_spectrum({group:s}.energy)")
script = '\n'.join(script)
self.model_script = script.format(group=self.mcagroup)
self._larch.eval(self.model_script)
cmds = []
self.xrfmod = self._larch.symtable.get_symbol('_xrfmodel')
floor = 1.e-12*max(self.mca.counts)
if match_amplitudes:
total = 0.0 * self.mca.counts
for name, parr in self.xrfmod.comps.items():
nam = name.lower()
try:
imax = np.where(parr > 0.99*parr.max())[0][0]
except: # probably means all counts are zero
imax = int(len(parr)/2.0)
scale = self.mca.counts[imax] / (parr[imax]+1.00)
ampname = 'amp_%s' % nam
if nam in ('elastic', 'compton1', 'compton2', 'compton',
'background', 'pileup', 'escape'):
ampname = '%s_amp' % nam
if nam in ('background', 'pileup', 'escape'):
scale = 1.0
paramval = self.xrfmod.params[ampname].value
s = "_xrfmodel.params['%s'].value = %.5f" % (ampname, paramval*scale)
cmds.append(s)
parr *= scale
parr[np.where(parr<floor)] = floor
total += parr
self.xrfmod.current_model = total
script = '\n'.join(cmds)
self._larch.eval(script)
self.model_script = "%s\n%s" % (self.model_script, script)
s = "{group:s}.xrf_init = _xrfmodel.calc_spectrum({group:s}.energy)"
self._larch.eval(s.format(group=self.mcagroup))
def plot_model(self, model_spectrum=None, init=False, with_comps=True,
label=None):
conf = self.parent.conf
plotkws = {'linewidth': 2.5, 'delay_draw': True, 'grid': False,
'ylog_scale': self.parent.ylog_scale, 'show_legend': False,
'fullbox': False}
ppanel = self.parent.panel
ppanel.conf.reset_trace_properties()
self.parent.plot(self.mca.energy, self.mca.counts, mca=self.mca,
xlabel='E (keV)', xmin=0, with_rois=False, **plotkws)
if model_spectrum is None:
model_spectrum = self.xrfmod.current_model if init else self.xrfmod.best_fit
if label is None:
label = 'predicted model' if init else 'best fit'
self.parent.oplot(self.mca.energy, model_spectrum,
label=label, color=conf.fit_color, **plotkws)
if with_comps:
for label, arr in self.xrfmod.comps.items():
ppanel.oplot(self.mca.energy, arr, label=label, **plotkws)
yscale = {False:'linear', True:'log'}[self.parent.ylog_scale]
ppanel.set_logscale(yscale=yscale)
ppanel.set_viewlimits()
ppanel.conf.set_legend_location('upper right', True)
ppanel.conf.draw_legend(show=True, delay_draw=False)
def onShowModel(self, event=None):
self.build_model()
self.plot_model(init=True, with_comps=True)
def onFitIteration(self, iter=0, pars=None):
pass
# print("Fit iteration %d" % iter)
# self.wids['fit_message'].SetLabel("Fit iteration %d" % iter)
def onFitModel(self, event=None):
self.build_model()
xrfmod = self._larch.symtable.get_symbol('_xrfmodel')
xrfmod.iter_callback = self.onFitIteration
fit_script = xrfmod_fitscript.format(group=self.mcagroup,
emin=self.wids['en_min'].GetValue(),
emax=self.wids['en_max'].GetValue())
self._larch.eval(fit_script)
dgroup = self._larch.symtable.get_group(self.mcagroup)
self.xrfresults = self._larch.symtable.get_symbol(XRFRESULTS_GROUP)
xrfresult = self.xrfresults[0]
xrfresult.script = "%s\n%s" % (self.model_script, fit_script)
xrfresult.label = "fit %d" % (len(self.xrfresults))
self.plot_model(init=True, with_comps=True)
for i in range(len(self.nb.pagelist)):
if self.nb.GetPageText(i).strip().startswith('Fit R'):
self.nb.SetSelection(i)
time.sleep(0.002)
self.show_results()
def onClose(self, event=None):
self.Destroy()
def onSaveFitResult(self, event=None):
result = self.get_fitresult()
deffile = self.mca.label + '_' + result.label
deffile = fix_filename(deffile.replace('.', '_')) + '.xrfmodel'
ModelWcards = "XRF Models(*.xrfmodel)|*.xrfmodel|All files (*.*)|*.*"
sfile = FileSave(self, 'Save XRF Model', default_file=deffile,
wildcard=ModelWcards)
if sfile is not None:
self._larch.eval(xrfmod_savejs.format(group=self.mcagroup,
nfit=self.nfit,
filename=sfile))
def onExportFitResult(self, event=None):
result = self.get_fitresult()
deffile = self.mca.label + '_' + result.label
deffile = fix_filename(deffile.replace('.', '_')) + '_xrf.txt'
wcards = 'All files (*.*)|*.*'
outfile = FileSave(self, 'Export Fit Result', default_file=deffile)
if outfile is not None:
buff = ['# XRF Fit %s: %s' % (self.mca.label, result.label),
'## Fit Script:']
for a in result.script.split('\n'):
buff.append('# %s' % a)
buff.append('## Fit Report:')
for a in result.fit_report.split('\n'):
buff.append('# %s' % a)
buff.append('#')
buff.append('########################################')
labels = ['energy', 'counts', 'best_fit',
'best_energy', 'fit_window',
'fit_weight', 'attenuation']
labels.extend(list(result.comps.keys()))
buff.append('# %s' % (' '.join(labels)))
npts = len(self.mca.energy)
for i in range(npts):
dline = [gformat(self.mca.energy[i]),
gformat(self.mca.counts[i]),
gformat(result.best_fit[i]),
gformat(result.best_en[i]),
gformat(result.fit_window[i]),
gformat(result.fit_weight[i]),
gformat(result.atten[i])]
for c in result.comps.values():
dline.append(gformat(c[i]))
buff.append(' '.join(dline))
buff.append('\n')
with open(outfile, 'w') as fh:
fh.write('\n'.join(buff))
def get_fitresult(self, nfit=None):
if nfit is None:
nfit = self.nfit
self.xrfresults = self._larch.symtable.get_symbol(XRFRESULTS_GROUP)
self.nfit = max(0, nfit)
self.nfit = min(self.nfit, len(self.xrfresults)-1)
return self.xrfresults[self.nfit]
def onChangeFitLabel(self, event=None):
label = self.owids['fitlabel_txt'].GetValue()
result = self.get_fitresult()
result.label = label
self.show_results()
def onPlot(self, event=None):
result = self.get_fitresult()
xrfmod = self._larch.symtable.get_symbol('_xrfmodel')
with_comps = self.owids['plot_comps'].IsChecked()
spect = xrfmod.calc_spectrum(self.mca.energy,
params=result.params)
self.plot_model(model_spectrum=spect, with_comps=with_comps,
label=result.label)
def onSelectFit(self, evt=None):
if self.owids['stats'] is None:
return
item = self.owids['stats'].GetSelectedRow()
if item > -1:
self.show_fitresult(nfit=item)
def onSelectParameter(self, evt=None):
if self.owids['params'] is None:
return
if not self.owids['params'].HasSelection():
return
item = self.owids['params'].GetSelectedRow()
pname = self.owids['paramsdata'][item]
cormin= self.owids['min_correl'].GetValue()
self.owids['correl'].DeleteAllItems()
result = self.get_fitresult()
this = result.params[pname]
if this.correl is not None:
sort_correl = sorted(this.correl.items(), key=lambda it: abs(it[1]))
for name, corval in reversed(sort_correl):
if abs(corval) > cormin:
self.owids['correl'].AppendItem((pname, name, "% .4f" % corval))
def onAllCorrel(self, evt=None):
result = self.get_fitresult()
params = result.params
parnames = list(params.keys())
cormin= self.owids['min_correl'].GetValue()
correls = {}
for i, name in enumerate(parnames):
par = params[name]
if not par.vary:
continue
if hasattr(par, 'correl') and par.correl is not None:
for name2 in parnames[i+1:]:
if (name != name2 and name2 in par.correl and
abs(par.correl[name2]) > cormin):
correls["%s$$%s" % (name, name2)] = par.correl[name2]
sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
sort_correl.reverse()
self.owids['correl'].DeleteAllItems()
for namepair, corval in sort_correl:
name1, name2 = namepair.split('$$')
self.owids['correl'].AppendItem((name1, name2, "% .4f" % corval))
def show_results(self):
cur = self.get_fitresult()
self.owids['stats'].DeleteAllItems()
for i, res in enumerate(self.xrfresults):
args = [res.label]
for attr in ('nvarys', 'nfev', 'chisqr', 'redchi', 'aic'):
val = getattr(res, attr)
if isinstance(val, int):
val = '%d' % val
else:
val = gformat(val, 11)
args.append(val)
self.owids['stats'].AppendItem(tuple(args))
self.owids['data_title'].SetLabel("%s: %.3f sec" % (self.mca.label, cur.count_time))
self.owids['data_title2'].SetLabel("%s: %.3f sec" % (self.mca.label, cur.count_time))
self.owids['fitlabel_txt'].SetValue(cur.label)
self.show_fitresult(nfit=self.nfit)
def show_fitresult(self, nfit=0, mca=None):
if mca is not None:
self.mca = mca
result = self.get_fitresult(nfit=nfit)
self.owids['data_title'].SetLabel("%s: %.3f sec" % (self.mca.label, result.count_time))
self.owids['data_title2'].SetLabel("%s: %.3f sec" % (self.mca.label, result.count_time))
self.result = result
self.owids['fitlabel_txt'].SetValue(result.label)
self.owids['params'].DeleteAllItems()
self.owids['paramsdata'] = []
for param in reversed(result.params.values()):
pname = param.name
try:
val = gformat(param.value, 10)
except (TypeError, ValueError):
val = ' ??? '
serr, perr = ' N/A ', ' N/A '
if param.stderr is not None:
serr = gformat(param.stderr, 10)
try:
perr = ' {:.2%}'.format(abs(param.stderr/param.value))
except ZeroDivisionError:
perr = '?'
extra = ' '
if param.expr is not None:
extra = ' = %s ' % param.expr
elif not param.vary:
extra = ' (fixed)'
elif param.init_value is not None:
extra = gformat(param.init_value, 10)
self.owids['params'].AppendItem((pname, val, serr, perr, extra))
self.owids['paramsdata'].append(pname)
self.Refresh()
| #!/usr/bin/env python
"""
fitting GUI for XRF display
"""
import time
import copy
from functools import partial
from collections import OrderedDict
from threading import Thread
import json
import numpy as np
import wx
import wx.lib.agw.pycollapsiblepane as CP
import wx.lib.scrolledpanel as scrolled
import wx.dataview as dv
DVSTYLE = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES
from peakutils import peak
from lmfit import Parameter, Minimizer
from lmfit.printfuncs import gformat
from wxutils import (SimpleText, FloatCtrl, FloatSpin, Choice, Font, pack,
Button, Check, HLine, GridPanel, RowPanel, CEN, LEFT,
RIGHT, FileSave, GUIColors, FRAMESTYLE, BitmapButton,
SetTip, GridPanel, Popup, FloatSpinWithPin, get_icon,
fix_filename)
from . import FONTSIZE
from xraydb import (material_mu, xray_edge, materials, add_material,
atomic_number, atomic_symbol, xray_line)
from .notebooks import flatnotebook
from .parameter import ParameterPanel
from .periodictable import PeriodicTablePanel
from larch import Group
from ..xrf import xrf_background, MCA, FanoFactors
from ..utils.jsonutils import encode4js, decode4js
from .xrfdisplay_utils import (XRFGROUP, mcaname, XRFRESULTS_GROUP,
MAKE_XRFRESULTS_GROUP)
def read_filterdata(flist, _larch):
""" read filters data"""
materials = _larch.symtable.get_symbol('_xray._materials')
out = OrderedDict()
out['None'] = ('', 0)
for name in flist:
if name in materials:
out[name] = materials[name]
return out
def VarChoice(p, default=0, size=(75, -1)):
return Choice(p, choices=['Fix', 'Vary'],
size=size, default=default)
NFILTERS = 4
MIN_CORREL = 0.10
tooltips = {'ptable': 'Select Elements to include in model',
'step': 'size of step extending to low energy side of peak, fraction of peak height',
'gamma': 'gamma (lorentzian-like weight) of Voigt function',
'tail': 'intensity of tail function at low energy side of peak',
'beta': 'width of tail function at low energy side of peak',
'sigmax': 'scale sigma from Energy/Noise by this amount',
}
CompositionUnits = ('ng/mm^2', 'wt %', 'ppm')
Detector_Materials = ['Si', 'Ge']
EFano_Text = 'Peak Widths: sigma = sqrt(E_Fano * Energy + Noise**2) '
Geom_Text = 'Angles in degrees: 90=normal to surface, 0=grazing surface'
Energy_Text = 'All energies in keV'
xrfmod_setup = """## Set up XRF Model
_xrfmodel = xrf_model(xray_energy={en_xray:.2f}, count_time={count_time:.5f},
energy_min={en_min:.2f}, energy_max={en_max:.2f})
_xrfmodel.set_detector(thickness={det_thk:.5f}, material='{det_mat:s}',
cal_offset={cal_offset:.5f}, cal_slope={cal_slope:.5f},
vary_cal_offset={cal_vary!r}, vary_cal_slope={cal_vary!r},
peak_step={peak_step:.5f}, vary_peak_step={peak_step_vary:s},
peak_tail={peak_tail:.5f}, vary_peak_tail={peak_tail_vary:s},
peak_beta={peak_beta:.5f}, vary_peak_beta={peak_beta_vary:s},
peak_gamma={peak_gamma:.5f}, vary_peak_gamma={peak_gamma_vary:s},
noise={det_noise:.5f}, vary_noise={det_noise_vary:s})"""
xrfmod_scattpeak = """_xrfmodel.add_scatter_peak(name='{peakname:s}', center={_cen:.2f},
amplitude=1e5, step={_step:.5f}, tail={_tail:.5f}, beta={_beta:.5f},
sigmax={_sigma:.5f}, vary_center={vcen:s}, vary_step={vstep:s},
vary_tail={vtail:s}, vary_beta={vbeta:s}, vary_sigmax={vsigma:s})"""
xrfmod_fitscript = """
_xrffitresult = _xrfmodel.fit_spectrum({group:s}, energy_min={emin:.2f}, energy_max={emax:.2f})
_xrfresults.insert(0, _xrffitresult)
"""
xrfmod_filter = "_xrfmodel.add_filter('{name:s}', {thick:.5f}, vary_thickness={vary:s})"
xrfmod_matrix = "_xrfmodel.set_matrix('{name:s}', {thick:.5f}, density={density:.5f})"
xrfmod_pileup = "_xrfmodel.add_pileup(scale={scale:.3f}, vary={vary:s})"
xrfmod_escape = "_xrfmodel.add_escape(scale={scale:.3f}, vary={vary:s})"
xrfmod_savejs = "_xrfresults[{nfit:d}].save('{filename:s}')"
xrfmod_elems = """
for atsym in {elemlist:s}:
_xrfmodel.add_element(atsym)
#endfor
del atsym"""
Filter_Lengths = ['microns', 'mm', 'cm']
Filter_Materials = ['None', 'air', 'nitrogen', 'helium', 'kapton',
'beryllium', 'aluminum', 'mylar', 'pmma']
class FitSpectraFrame(wx.Frame):
"""Frame for Spectral Analysis"""
def __init__(self, parent, size=(700, 825)):
self.parent = parent
self._larch = parent.larch
symtable = self._larch.symtable
# fetch current spectra from parent
if not symtable.has_group(XRFRESULTS_GROUP):
self._larch.eval(MAKE_XRFRESULTS_GROUP)
self.xrfresults = symtable.get_symbol(XRFRESULTS_GROUP)
xrfgroup = symtable.get_group(XRFGROUP)
mcagroup = getattr(xrfgroup, '_mca')
self.mca = getattr(xrfgroup, mcagroup)
self.mcagroup = '%s.%s' % (XRFGROUP, mcagroup)
efactor = 1.0 if max(self.mca.energy) < 250. else 1000.0
if self.mca.incident_energy is None:
self.mca.incident_energy = 20.0
if self.mca.incident_energy > 250:
self.mca.incident_energy /= 1000.0
self.nfit = 0
self.colors = GUIColors()
wx.Frame.__init__(self, parent, -1, 'Fit XRF Spectra',
size=size, style=wx.DEFAULT_FRAME_STYLE)
self.wids = {}
self.owids = {}
pan = GridPanel(self)
mca_label = getattr(self.mca, 'label', None)
if mca_label is None:
mca_label = getattr(self.mca, 'filename', 'mca')
self.wids['mca_name'] = SimpleText(pan, mca_label, size=(300, -1), style=LEFT)
self.wids['btn_calc'] = Button(pan, 'Calculate Model', size=(150, -1),
action=self.onShowModel)
self.wids['btn_fit'] = Button(pan, 'Fit Model', size=(150, -1),
action=self.onFitModel)
pan.AddText(" XRF Spectrum: ", colour='#880000')
pan.Add(self.wids['mca_name'], dcol=3)
pan.Add(self.wids['btn_calc'], newrow=True)
pan.Add(self.wids['btn_fit'])
self.panels = {}
self.panels['Beam & Detector'] = self.beamdet_page
self.panels['Filters & Matrix'] = self.materials_page
self.panels['Elements & Peaks'] = self.elempeaks_page
self.panels['Fit Results'] = self.fitresult_page
self.panels['Composition'] = self.composition_page
self.nb = flatnotebook(pan, self.panels,
on_change=self.onNBChanged)
pan.Add((5, 5), newrow=True)
pan.Add(self.nb, dcol=5, drow=10, newrow=True)
pan.pack()
self.Show()
self.Raise()
def onNBChanged(self, event=None):
pagelabel = self.nb._pages.GetPageText(event.GetSelection()).strip()
if pagelabel.startswith('Composition'):
self.UpdateCompositionPage()
def elempeaks_page(self, **kws):
"elements and peaks parameters"
mca = self.parent.mca
wids = self.wids
p = GridPanel(self)
self.selected_elems = []
self.ptable = PeriodicTablePanel(p, multi_select=True, fontsize=12,
tooltip_msg=tooltips['ptable'],
onselect=self.onElemSelect)
dstep, dtail, dbeta, dgamma = 0.05, 0.10, 0.5, 0.05
wids['peak_step'] = FloatSpin(p, value=dstep, digits=3, min_val=0,
max_val=1.0, increment=0.01,
tooltip=tooltips['step'])
wids['peak_gamma'] = FloatSpin(p, value=dgamma, digits=3, min_val=0,
max_val=10.0, increment=0.01,
tooltip=tooltips['gamma'])
wids['peak_tail'] = FloatSpin(p, value=dtail, digits=3, min_val=0,
max_val=1.0, increment=0.05,
tooltip=tooltips['tail'])
wids['peak_beta'] = FloatSpin(p, value=dbeta, digits=3, min_val=0,
max_val=10.0, increment=0.01,
tooltip=tooltips['beta'])
wids['peak_step_vary'] = VarChoice(p, default=0)
wids['peak_tail_vary'] = VarChoice(p, default=0)
wids['peak_gamma_vary'] = VarChoice(p, default=0)
wids['peak_beta_vary'] = VarChoice(p, default=0)
btn_from_peaks = Button(p, 'Guess Peaks', size=(150, -1),
action=self.onElems_GuessPeaks)
# tooltip='Guess elements from peak locations')
btn_from_rois = Button(p, 'Use ROIS as Peaks', size=(150, -1),
action=self.onElems_FromROIS)
btn_clear_elems = Button(p, 'Clear All Peaks', size=(150, -1),
action=self.onElems_Clear)
wx.CallAfter(self.onElems_GuessPeaks)
p.AddText('Elements to model:', colour='#880000', dcol=2)
p.Add((2, 2), newrow=True)
p.Add(self.ptable, dcol=5, drow=5)
irow = p.irow
p.Add(btn_from_peaks, icol=6, dcol=2, irow=irow)
p.Add(btn_from_rois, icol=6, dcol=2, irow=irow+1)
p.Add(btn_clear_elems, icol=6, dcol=2, irow=irow+2)
p.irow += 5
p.Add((2, 2), newrow=True)
p.AddText(' Step: ')
p.Add(wids['peak_step'])
p.Add(wids['peak_step_vary'])
p.AddText(' Gamma : ')
p.Add(wids['peak_gamma'])
p.Add(wids['peak_gamma_vary'])
p.Add((2, 2), newrow=True)
p.AddText(' Beta: ')
p.Add(wids['peak_beta'])
p.Add(wids['peak_beta_vary'])
p.AddText(' Tail: ')
p.Add(wids['peak_tail'])
p.Add(wids['peak_tail_vary'])
p.Add((2, 2), newrow=True)
p.Add(HLine(p, size=(650, 3)), dcol=8)
p.Add((2, 2), newrow=True)
# name, escale, step, sigmax, beta, tail
scatter_peaks = (('Elastic', 1.00, 0.05, 1.0, 0.5, 0.10),
('Compton1', 0.97, 0.05, 1.5, 2.0, 0.25),
('Compton2', 0.94, 0.05, 2.0, 2.5, 0.25))
opts = dict(size=(100, -1), min_val=0, digits=4, increment=0.010)
for name, escale, dstep, dsigma, dbeta, dtail in scatter_peaks:
en = escale * self.mca.incident_energy
t = name.lower()
vary_en = 1 if t.startswith('compton') else 0
wids['%s_use'%t] = Check(p, label='Include', default=True)
wids['%s_cen_vary'%t] = VarChoice(p, default=vary_en)
wids['%s_step_vary'%t] = VarChoice(p, default=0)
wids['%s_beta_vary'%t] = VarChoice(p, default=0)
wids['%s_tail_vary'%t] = VarChoice(p, default=0)
wids['%s_sigma_vary'%t] = VarChoice(p, default=0)
wids['%s_cen'%t] = FloatSpin(p, value=en, digits=3, min_val=0,
increment=0.01)
wids['%s_step'%t] = FloatSpin(p, value=dstep, digits=3, min_val=0,
max_val=1.0, increment=0.01,
tooltip=tooltips['step'])
wids['%s_tail'%t] = FloatSpin(p, value=dtail, digits=3, min_val=0,
max_val=1.0, increment=0.05,
tooltip=tooltips['tail'])
wids['%s_beta'%t] = FloatSpin(p, value=dbeta, digits=3, min_val=0,
max_val=10.0, increment=0.10,
tooltip=tooltips['beta'])
wids['%s_sigma'%t] = FloatSpin(p, value=dsigma, digits=3, min_val=0,
max_val=10.0, increment=0.05,
tooltip=tooltips['sigmax'])
p.Add((2, 2), newrow=True)
p.AddText(" %s Peak:" % name, colour='#880000')
p.Add(wids['%s_use' % t], dcol=2)
p.AddText(' Energy (keV): ')
p.Add(wids['%s_cen'%t])
p.Add(wids['%s_cen_vary'%t])
p.Add((2, 2), newrow=True)
p.AddText(' Step: ')
p.Add(wids['%s_step'%t])
p.Add(wids['%s_step_vary'%t])
p.AddText(' Sigma Scale : ')
p.Add(wids['%s_sigma'%t])
p.Add(wids['%s_sigma_vary'%t])
p.Add((2, 2), newrow=True)
p.AddText(' Beta : ')
p.Add(wids['%s_beta'%t])
p.Add(wids['%s_beta_vary'%t])
p.AddText(' Tail: ')
p.Add(wids['%s_tail'%t])
p.Add(wids['%s_tail_vary'%t])
p.Add((2, 2), newrow=True)
p.Add(HLine(p, size=(650, 3)), dcol=7)
p.pack()
return p
def beamdet_page(self, **kws):
"beam / detector settings"
mca = self.mca
en_min = 2.0
en_max = self.mca.incident_energy
cal_offset = getattr(mca, 'offset', 0)
cal_slope = getattr(mca, 'slope', 0.010)
det_noise = getattr(mca, 'det_noise', 0.035)
escape_amp = getattr(mca, 'escape_amp', 1.0)
pileup_amp = getattr(mca, 'pileup_amp', 0.1)
wids = self.wids
# main = wx.Panel(self)
pdet = GridPanel(self, itemstyle=LEFT)
def addLine(pan):
pan.Add(HLine(pan, size=(650, 3)), dcol=6, newrow=True)
wids['escape_use'] = Check(pdet, label='Include Escape in Fit',
default=True, action=self.onUsePileupEscape)
wids['escape_amp'] = FloatSpin(pdet, value=escape_amp,
min_val=0, max_val=100, digits=2,
increment=0.02, size=(100, -1))
wids['pileup_use'] = Check(pdet, label='Include Pileup in Fit',
default=True, action=self.onUsePileupEscape)
wids['pileup_amp'] = FloatSpin(pdet, value=pileup_amp,
min_val=0, max_val=100, digits=2,
increment=0.02, size=(100, -1))
wids['escape_amp_vary'] = VarChoice(pdet, default=True)
wids['pileup_amp_vary'] = VarChoice(pdet, default=True)
wids['cal_slope'] = FloatSpin(pdet, value=cal_slope,
min_val=0, max_val=100,
digits=4, increment=0.01, size=(100, -1))
wids['cal_offset'] = FloatSpin(pdet, value=cal_offset,
min_val=-500, max_val=500,
digits=4, increment=0.01, size=(100, -1))
wids['cal_vary'] = Check(pdet, label='Vary Calibration in Fit', default=True)
wids['det_mat'] = Choice(pdet, choices=Detector_Materials,
size=(70, -1), default=0,
action=self.onDetMaterial)
wids['det_thk'] = FloatSpin(pdet, value=0.400, size=(100, -1),
increment=0.010, min_val=0, max_val=10,
digits=4)
wids['det_noise_vary'] = VarChoice(pdet, default=1)
opts = dict(size=(100, -1), min_val=0, max_val=500, digits=3,
increment=0.10)
wids['en_xray'] = FloatSpin(pdet, value=self.mca.incident_energy,
action=self.onSetXrayEnergy, **opts)
wids['en_min'] = FloatSpin(pdet, value=en_min, **opts)
wids['en_max'] = FloatSpin(pdet, value=en_max, **opts)
wids['flux_in'] = FloatCtrl(pdet, value=5.e10, gformat=True,
minval=0, size=(100, -1))
opts.update({'increment': 0.005})
wids['det_noise'] = FloatSpin(pdet, value=det_noise, **opts)
wids['det_efano'] = SimpleText(pdet, size=(200, -1),
label='E_Fano= %.4e' % FanoFactors['Si'])
opts.update(digits=1, max_val=90, min_val=0, increment=1)
wids['angle_in'] = FloatSpin(pdet, value=45, **opts)
wids['angle_out'] = FloatSpin(pdet, value=45, **opts)
opts.update(digits=1, max_val=5e9, min_val=0, increment=1)
wids['det_dist'] = FloatSpin(pdet, value=50, **opts)
wids['det_area'] = FloatSpin(pdet, value=50, **opts)
for notyet in ('angle_in', 'angle_out', 'det_dist', 'det_area',
'flux_in'):
wids[notyet].Disable()
pdet.AddText(' Beam Energy, Fit Range :', colour='#880000', dcol=2)
pdet.AddText(' X-ray Energy (keV): ', newrow=True)
pdet.Add(wids['en_xray'])
pdet.AddText('Incident Flux (Hz): ', newrow=False)
pdet.Add(wids['flux_in'])
pdet.AddText(' Fit Energy Min (keV): ', newrow=True)
pdet.Add(wids['en_min'])
pdet.AddText('Fit Energy Max (keV): ')
pdet.Add(wids['en_max'])
addLine(pdet)
pdet.AddText(' Energy Calibration :', colour='#880000', dcol=1, newrow=True)
pdet.Add(wids['cal_vary'], dcol=2)
pdet.AddText(' Offset (keV): ', newrow=True)
pdet.Add(wids['cal_offset'])
pdet.AddText('Slope (keV/bin): ')
pdet.Add(wids['cal_slope'])
addLine(pdet)
pdet.AddText(' Detector Material:', colour='#880000', dcol=1, newrow=True)
pdet.AddText(EFano_Text, dcol=3)
pdet.AddText(' Material: ', newrow=True)
pdet.Add(wids['det_mat'])
pdet.Add(wids['det_efano'], dcol=2)
pdet.AddText(' Thickness (mm): ', newrow=True)
pdet.Add(wids['det_thk'])
pdet.AddText(' Noise (keV): ', newrow=True)
pdet.Add(wids['det_noise'])
pdet.Add(wids['det_noise_vary'], dcol=2)
addLine(pdet)
pdet.AddText(' Escape && Pileup:', colour='#880000', dcol=2, newrow=True)
pdet.AddText(' Escape Scale:', newrow=True)
pdet.Add(wids['escape_amp'])
pdet.Add(wids['escape_amp_vary'])
pdet.Add(wids['escape_use'], dcol=3)
pdet.AddText(' Pileup Scale:', newrow=True)
pdet.Add(wids['pileup_amp'])
pdet.Add(wids['pileup_amp_vary'])
pdet.Add(wids['pileup_use'], dcol=3)
addLine(pdet)
pdet.AddText(' Geometry:', colour='#880000', dcol=1, newrow=True)
pdet.AddText(Geom_Text, dcol=3)
pdet.AddText(' Incident Angle (deg):', newrow=True)
pdet.Add(wids['angle_in'])
pdet.AddText(' Exit Angle (deg):', newrow=False)
pdet.Add(wids['angle_out'])
pdet.AddText(' Detector Distance (mm): ', newrow=True)
pdet.Add(wids['det_dist'])
pdet.AddText(' Detector Area (mm^2): ', newrow=False)
pdet.Add(wids['det_area'])
addLine(pdet)
pdet.pack()
return pdet
def materials_page(self, **kws):
"filters and matrix settings"
wids = self.wids
pan = GridPanel(self, itemstyle=LEFT)
pan.AddText(' Filters :', colour='#880000', dcol=2) # , newrow=True)
pan.AddManyText((' Filter #', 'Material', 'Thickness (mm)',
'Vary Thickness'), style=LEFT, newrow=True)
opts = dict(size=(125, -1), min_val=0, digits=5, increment=0.005)
for i in range(NFILTERS):
t = 'filter%d' % (i+1)
wids['%s_mat'%t] = Choice(pan, choices=Filter_Materials, default=0,
size=(150, -1),
action=partial(self.onFilterMaterial, index=i+1))
wids['%s_thk'%t] = FloatSpin(pan, value=0.0, **opts)
wids['%s_var'%t] = VarChoice(pan, default=0)
if i == 0: # first selection
wids['%s_mat'%t].SetStringSelection('beryllium')
wids['%s_thk'%t].SetValue(0.0250)
elif i == 1: # second selection
wids['%s_mat'%t].SetStringSelection('air')
wids['%s_thk'%t].SetValue(50.00)
elif i == 2: # third selection
wids['%s_mat'%t].SetStringSelection('kapton')
wids['%s_thk'%t].SetValue(0.00)
elif i == 3: # third selection
wids['%s_mat'%t].SetStringSelection('aluminum')
wids['%s_thk'%t].SetValue(0.00)
pan.AddText(' %i' % (i+1), newrow=True)
pan.Add(wids['%s_mat' % t])
pan.Add(wids['%s_thk' % t])
pan.Add(wids['%s_var' % t])
pan.Add(HLine(pan, size=(650, 3)), dcol=6, newrow=True)
pan.AddText(' Matrix:', colour='#880000', newrow=True)
pan.AddText(' NOTE: thin film limit only', dcol=3)
wids['matrix_mat'] = wx.TextCtrl(pan, value='', size=(275, -1))
wids['matrix_thk'] = FloatSpin(pan, value=0.0, **opts)
wids['matrix_den'] = FloatSpin(pan, value=1.0, **opts)
wids['matrix_btn'] = Button(pan, 'Use Material', size=(175, -1),
action=self.onUseCurrentMaterialAsFilter)
wids['matrix_btn'].Disable()
pan.AddText(' Material/Formula:', dcol=1, newrow=True)
pan.Add(wids['matrix_mat'], dcol=2)
pan.Add(wids['matrix_btn'], dcol=3)
pan.AddText(' Thickness (mm):', newrow=True)
pan.Add(wids['matrix_thk'])
pan.AddText(' Density (gr/cm^3):', newrow=False)
pan.Add(wids['matrix_den'])
pan.Add(HLine(pan, size=(650, 3)), dcol=6, newrow=True)
# Materials
pan.AddText(' Known Materials:', colour='#880000', dcol=4, newrow=True)
mview = self.owids['materials'] = dv.DataViewListCtrl(pan, style=DVSTYLE)
mview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectMaterial)
self.selected_material = ''
mview.AppendTextColumn('Name', width=150)
mview.AppendTextColumn('Formula', width=325)
mview.AppendTextColumn('density', width=90)
mview.AppendToggleColumn('Filter?', width=75)
for col in range(4):
this = mview.Columns[col]
align = wx.ALIGN_LEFT
this.Sortable = True
this.Alignment = this.Renderer.Alignment = align
mview.SetMinSize((675, 170))
mview.DeleteAllItems()
self.materials_data = {}
for name, data in materials._read_materials_db().items():
formula, density = data
self.materials_data[name] = (formula, density)
mview.AppendItem((name, formula, "%9.6f"%density,
name in Filter_Materials))
pan.Add(mview, dcol=5, newrow=True)
pan.AddText(' Add Material:', colour='#880000', newrow=True)
pan.Add(Button(pan, 'Add', size=(175, -1),
action=self.onAddMaterial))
pan.Add((10, 10))
bx = Button(pan, 'Update Filter List', size=(175, -1),
action=self.onUpdateFilterList)
pan.Add(bx)
self.owids['newmat_name'] = wx.TextCtrl(pan, value='', size=(175, -1))
self.owids['newmat_dens'] = FloatSpin(pan, value=1.0, **opts)
self.owids['newmat_form'] = wx.TextCtrl(pan, value='', size=(400, -1))
for notyet in ('matrix_mat', 'matrix_thk', 'matrix_den',
'matrix_btn'):
wids[notyet].Disable()
pan.AddText(' Name:', newrow=True)
pan.Add(self.owids['newmat_name'])
pan.AddText(' Density (gr/cm^3):', newrow=False)
pan.Add(self.owids['newmat_dens'])
pan.AddText(' Formula:', newrow=True)
pan.Add(self.owids['newmat_form'], dcol=3)
pan.pack()
return pan
def fitresult_page(self, **kws):
sizer = wx.GridBagSizer(10, 5)
panel = scrolled.ScrolledPanel(self)
# title row
wids = self.owids
title = SimpleText(panel, 'Fit Results', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['data_title'] = SimpleText(panel, '< > ', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['fitlabel_lab'] = SimpleText(panel, ' Fit Label: ')
wids['fitlabel_txt'] = wx.TextCtrl(panel, -1, ' ', size=(150, -1))
wids['fitlabel_btn'] = Button(panel, 'Set Label', size=(150, -1),
action=self.onChangeFitLabel)
opts = dict(default=False, size=(175, -1), action=self.onPlot)
wids['plot_comps'] = Check(panel, label='Show Components?', **opts)
self.plot_choice = Button(panel, 'Plot',
size=(150, -1), action=self.onPlot)
self.save_result = Button(panel, 'Save Model',
size=(150, -1), action=self.onSaveFitResult)
SetTip(self.save_result, 'save model and result to be loaded later')
self.export_fit = Button(panel, 'Export Fit',
size=(150, -1), action=self.onExportFitResult)
SetTip(self.export_fit, 'save arrays and results to text file')
irow = 0
sizer.Add(title, (irow, 0), (1, 1), LEFT)
sizer.Add(wids['data_title'], (irow, 1), (1, 3), LEFT)
irow += 1
sizer.Add(self.save_result, (irow, 0), (1, 1), LEFT)
sizer.Add(self.export_fit, (irow, 1), (1, 1), LEFT)
sizer.Add(self.plot_choice, (irow, 2), (1, 1), LEFT)
sizer.Add(wids['plot_comps'], (irow, 3), (1, 1), LEFT)
irow += 1
sizer.Add(wids['fitlabel_lab'], (irow, 0), (1, 1), LEFT)
sizer.Add(wids['fitlabel_txt'], (irow, 1), (1, 1), LEFT)
sizer.Add(wids['fitlabel_btn'], (irow, 2), (1, 2), LEFT)
irow += 1
sizer.Add(HLine(panel, size=(650, 3)), (irow, 0), (1, 5), LEFT)
irow += 1
title = SimpleText(panel, '[[Fit Statistics]]', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
sizer.Add(title, (irow, 0), (1, 4), LEFT)
sview = wids['stats'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
sview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectFit)
sview.AppendTextColumn(' Fit Label', width=90)
sview.AppendTextColumn(' N_vary', width=65)
sview.AppendTextColumn(' N_eval', width=65)
sview.AppendTextColumn(' \u03c7\u00B2', width=125)
sview.AppendTextColumn(' \u03c7\u00B2_reduced', width=125)
sview.AppendTextColumn(' Akaike Info', width=125)
for col in range(sview.ColumnCount):
this = sview.Columns[col]
isort, align = True, wx.ALIGN_RIGHT
if col == 0:
align = wx.ALIGN_CENTER
this.Sortable = isort
this.Alignment = this.Renderer.Alignment = align
sview.SetMinSize((675, 150))
irow += 1
sizer.Add(sview, (irow, 0), (1, 5), LEFT)
irow += 1
sizer.Add(HLine(panel, size=(650, 3)), (irow, 0), (1, 5), LEFT)
irow += 1
title = SimpleText(panel, '[[Variables]]', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
sizer.Add(title, (irow, 0), (1, 1), LEFT)
pview = wids['params'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
wids['paramsdata'] = []
pview.AppendTextColumn('Parameter', width=150)
pview.AppendTextColumn('Refined Value', width=100)
pview.AppendTextColumn('Standard Error', width=100)
pview.AppendTextColumn('% Uncertainty', width=100)
pview.AppendTextColumn('Initial Value', width=150)
for col in range(4):
this = pview.Columns[col]
align = wx.ALIGN_LEFT
if col > 0:
align = wx.ALIGN_RIGHT
this.Sortable = False
this.Alignment = this.Renderer.Alignment = align
pview.SetMinSize((675, 200))
pview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectParameter)
irow += 1
sizer.Add(pview, (irow, 0), (1, 5), LEFT)
irow += 1
sizer.Add(HLine(panel, size=(650, 3)), (irow, 0), (1, 5), LEFT)
irow += 1
title = SimpleText(panel, '[[Correlations]]', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['all_correl'] = Button(panel, 'Show All',
size=(100, -1), action=self.onAllCorrel)
wids['min_correl'] = FloatSpin(panel, value=MIN_CORREL,
min_val=0, size=(100, -1),
digits=3, increment=0.1)
ctitle = SimpleText(panel, 'minimum correlation: ')
sizer.Add(title, (irow, 0), (1, 1), LEFT)
sizer.Add(ctitle, (irow, 1), (1, 1), LEFT)
sizer.Add(wids['min_correl'], (irow, 2), (1, 1), LEFT)
sizer.Add(wids['all_correl'], (irow, 3), (1, 1), LEFT)
cview = wids['correl'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
cview.AppendTextColumn('Parameter 1', width=150)
cview.AppendTextColumn('Parameter 2', width=150)
cview.AppendTextColumn('Correlation', width=150)
for col in (0, 1, 2):
this = cview.Columns[col]
this.Sortable = False
align = wx.ALIGN_LEFT
if col == 2:
align = wx.ALIGN_RIGHT
this.Alignment = this.Renderer.Alignment = align
cview.SetMinSize((675, 125))
irow += 1
sizer.Add(cview, (irow, 0), (1, 5), LEFT)
pack(panel, sizer)
panel.SetMinSize((675, 725))
panel.SetupScrolling()
return panel
def composition_page(self, **kws):
sizer = wx.GridBagSizer(10, 5)
panel = scrolled.ScrolledPanel(self)
wids = self.owids
title = SimpleText(panel, 'Composition Results', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
wids['data_title2'] = SimpleText(panel, '< > ', font=Font(FONTSIZE+1),
colour=self.colors.title, style=LEFT)
cview = wids['composition'] = dv.DataViewListCtrl(panel, style=DVSTYLE)
cview.AppendTextColumn(' Z ', width=50)
cview.AppendTextColumn(' Element ', width=100)
cview.AppendTextColumn(' Amplitude', width=150)
cview.AppendTextColumn(' Concentration', width=150)
cview.AppendTextColumn(' Uncertainty', width=150)
for col in range(5):
this = cview.Columns[col]
align = wx.ALIGN_RIGHT
if col == 1:
align = wx.ALIGN_LEFT
this.Sortable = True
this.Alignment = this.Renderer.Alignment = align
cview.SetMinSize((675, 500))
wids['comp_fitlabel'] = Choice(panel, choices=[''], size=(175, -1),
action=self.onCompSelectFit)
self.compscale_lock = 0.0
wids['comp_elemchoice'] = Choice(panel, choices=[''], size=(100, -1))
# action=self.onCompSetElemAbundance)
wids['comp_elemscale'] = FloatSpin(panel, value=1.0, digits=5, min_val=0,
increment=0.01,
action=self.onCompSetElemAbundance)
wids['comp_units'] = Choice(panel, choices=CompositionUnits, size=(100, -1))
wids['comp_scale'] = FloatCtrl(panel, value=0, size=(200, -1), precision=5,
minval=0, action=self.onCompSetScale)
wids['comp_save'] = Button(panel, 'Save This Concentration Data',
size=(200, -1), action=self.onCompSave)
irow = 0
sizer.Add(title, (irow, 0), (1, 2), LEFT)
sizer.Add(wids['data_title2'], (irow, 2), (1, 5), LEFT)
irow += 1
sizer.Add(SimpleText(panel, 'Fit Label:'), (irow, 0), (1, 1), LEFT)
sizer.Add(wids['comp_fitlabel'], (irow, 1), (1, 5), LEFT)
irow += 1
sizer.Add(SimpleText(panel, 'Scale Element:'), (irow, 0), (1, 1), LEFT)
sizer.Add(wids['comp_elemchoice'], (irow, 1), (1, 1), LEFT)
sizer.Add(SimpleText(panel, ' to:'), (irow, 2), (1, 1), LEFT)
sizer.Add(wids['comp_elemscale'], (irow, 3), (1, 1), LEFT)
sizer.Add(wids['comp_units'], (irow, 4), (1, 1), LEFT)
irow += 1
sizer.Add(SimpleText(panel, 'Scaling Factor:'), (irow, 0), (1, 1), LEFT)
sizer.Add(wids['comp_scale'], (irow, 1), (1, 3), LEFT)
irow += 1
sizer.Add(wids['composition'], (irow, 0), (3, 6), LEFT)
irow += 3
sizer.Add(wids['comp_save'], (irow, 0), (1, 3), LEFT)
pack(panel, sizer)
panel.SetMinSize((675, 750))
panel.SetupScrolling()
return panel
def onCompSetScale(self, event=None, value=None):
if len(self.xrfresults) < 1 or (time.time() - self.compscale_lock) < 0.25:
return
self.compscale_lock = time.time()
owids = self.owids
result = self.get_fitresult(nfit=owids['comp_fitlabel'].GetSelection())
cur_elem = owids['comp_elemchoice'].GetStringSelection()
conc_vals = {}
for elem in result.comps.keys():
parname = 'amp_%s' % elem.lower()
if parname in result.params:
par = result.params[parname]
conc_vals[elem] = [par.value, par.stderr]
try:
scale = self.owids['comp_scale'].GetValue()
except:
return
owids['comp_elemscale'].SetValue(conc_vals[cur_elem][0]*scale)
owids['composition'].DeleteAllItems()
result.concentration_results = conc_vals
result.concentration_scale = scale
for elem, dat in conc_vals.items():
zat = "%d" % atomic_number(elem)
val, serr = dat
rval = "%15.4f" % val
sval = "%15.4f" % (val*scale)
uval = "%15.4f" % (serr*scale)
try:
uval = uval + ' ({:.2%})'.format(abs(serr/val))
except ZeroDivisionError:
pass
owids['composition'].AppendItem((zat, elem, rval, sval, uval))
def onCompSetElemAbundance(self, event=None, value=None):
if len(self.xrfresults) < 1 or (time.time() - self.compscale_lock) < 0.25:
return
self.compscale_lock = time.time()
owids = self.owids
result = self.get_fitresult(nfit=owids['comp_fitlabel'].GetSelection())
cur_elem = owids['comp_elemchoice'].GetStringSelection()
conc_vals = {}
for elem in result.comps.keys():
parname = 'amp_%s' % elem.lower()
if parname in result.params:
par = result.params[parname]
conc_vals[elem] = [par.value, par.stderr]
result.concentration_results = conc_vals
elem_value = owids['comp_elemscale'].GetValue()
scale = elem_value/conc_vals[cur_elem][0]
result.concentration_scale = scale
owids['comp_scale'].SetValue(scale)
owids['composition'].DeleteAllItems()
for elem, dat in conc_vals.items():
zat = "%d" % atomic_number(elem)
val, serr = dat
rval = "%15.4f" % val
sval = "%15.4f" % (val*scale)
uval = "%15.4f" % (serr*scale)
try:
uval = uval + ' ({:.2%})'.format(abs(serr/val))
except ZeroDivisionError:
pass
owids['composition'].AppendItem((zat, elem, rval, sval, uval))
def onCompSave(self, event=None):
result = self.get_fitresult(nfit=self.owids['comp_fitlabel'].GetSelection())
scale = result.concentration_scale
deffile = self.mca.label + '_' + result.label
deffile = fix_filename(deffile.replace('.', '_')) + '_xrf.csv'
wcards = "CSV (*.csv)|*.csv|All files (*.*)|*.*"
sfile = FileSave(self, 'Save Concentration Results',
default_file=deffile,
wildcard=wcards)
if sfile is not None:
buff = ["# results for MCA labeled: %s" % self.mca.label,
"# fit label: %s" % result.label,
"# concentration units: %s" % self.owids['comp_units'].GetStringSelection(),
"# count time: %s" % result.count_time,
"# scale: %s" % result.concentration_scale,
"# Fit Report:" ]
for l in result.fit_report.split('\n'):
buff.append("# %s" % l)
buff.append("###########")
buff.append("#Element Concentration Uncertainty RawAmplitude")
for elem, dat in result.concentration_results.items():
eout = (elem + ' '*4)[:4]
val, serr = dat
rval = "%16.5f" % val
sval = "%16.5f" % (val/scale)
uval = "%16.5f" % (serr/scale)
buff.append(" ".join([elem, sval, uval, rval]))
buff.append('')
with open(sfile, 'w') as fh:
fh.write('\n'.join(buff))
def onCompSelectFit(self, event=None):
result = self.get_fitresult(nfit=self.owids['comp_fitlabel'].GetSelection())
cur_elem = self.owids['comp_elemchoice'].GetStringSelection()
self.owids['comp_elemchoice'].Clear()
elems = [el['symbol'] for el in result.elements]
self.owids['comp_elemchoice'].SetChoices(elems)
if len(cur_elem) > 0:
self.owids['comp_elemchoice'].SetStringSelection(cur_elem)
else:
self.owids['comp_elemchoice'].SetSelection(0)
self.onCompSetElemAbundance()
def UpdateCompositionPage(self, event=None):
self.xrfresults = self._larch.symtable.get_symbol(XRFRESULTS_GROUP)
if len(self.xrfresults) > 0:
result = self.get_fitresult()
fitlab = self.owids['comp_fitlabel']
fitlab.Clear()
fitlab.SetChoices([a.label for a in self.xrfresults])
fitlab.SetStringSelection(result.label)
self.onCompSelectFit()
def onElems_Clear(self, event=None):
self.ptable.on_clear_all()
def onElems_GuessPeaks(self, event=None):
mca = self.mca
_indices = peak.indexes(mca.counts*1.0, min_dist=5, thres=0.025)
peak_energies = mca.energy[_indices]
elrange = range(10, 92)
atsyms = [atomic_symbol(i) for i in elrange]
kalphas = [0.001*xray_line(i, 'Ka').energy for i in elrange]
kbetas = [0.001*xray_line(i, 'Kb').energy for i in elrange]
self.ptable.on_clear_all()
elems = []
for iz, en in enumerate(peak_energies):
for i, ex in enumerate(kalphas):
if abs(en - ex) < 0.025:
elems.append(atsyms[i])
peak_energies[iz] = -ex
for iz, en in enumerate(peak_energies):
if en > 0:
for i, ex in enumerate(kbetas):
if abs(en - ex) < 0.025:
if atsyms[i] not in elems:
elems.append(atsyms[i])
peak_energies[iz] = -ex
en = self.wids['en_xray'].GetValue()
emin = self.wids['en_min'].GetValue()
for elem in elems:
kedge = 0.001*xray_edge(elem, 'K').energy
l3edge = 0.001*xray_edge(elem, 'L3').energy
l2edge = 0.001*xray_edge(elem, 'L3').energy
if ((kedge < en and kedge > emin) or
(l3edge < en and l3edge > emin) or
(l2edge < en and l2edge > emin)):
if elem not in self.ptable.selected:
self.ptable.onclick(label=elem)
def onElems_FromROIS(self, event=None):
for roi in self.mca.rois:
words = roi.name.split()
elem = words[0].title()
if (elem in self.ptable.syms and
elem not in self.ptable.selected):
self.ptable.onclick(label=elem)
self.onSetXrayEnergy()
def onSetXrayEnergy(self, event=None):
en = self.wids['en_xray'].GetValue()
self.wids['en_max'].SetValue(en)
self.wids['elastic_cen'].SetValue(en)
self.wids['compton1_cen'].SetValue(en*0.975)
self.wids['compton2_cen'].SetValue(en*0.950)
emin = self.wids['en_min'].GetValue() * 1.25
self.ptable.on_clear_all()
for roi in self.mca.rois:
words = roi.name.split()
elem = words[0].title()
kedge = l3edge = l2edge = 0.0
try:
kedge = 0.001*xray_edge(elem, 'K').energy
l3edge = 0.001*xray_edge(elem, 'L3').energy
l2edge = 0.001*xray_edge(elem, 'L3').energy
except:
pass
if ((kedge < en and kedge > emin) or
(l3edge < en and l3edge > emin) or
(l2edge < en and l2edge > emin)):
if elem not in self.ptable.selected:
self.ptable.onclick(label=elem)
def onDetMaterial(self, event=None):
dmat = self.wids['det_mat'].GetStringSelection()
if dmat not in FanoFactors:
dmat = 'Si'
self.wids['det_efano'].SetLabel('E_Fano= %.4e' % FanoFactors[dmat])
def onFilterMaterial(self, evt=None, index=1):
name = evt.GetString()
den = self.materials_data.get(name, (None, 1.0))[1]
t = 'filter%d' % (index)
thick = self.wids['%s_thk'%t]
if den < 0.1 and thick.GetValue() < 0.1:
thick.SetValue(10.0)
thick.SetIncrement(0.5)
elif den > 0.1 and thick.GetValue() < 1.e-5:
thick.SetValue(0.0250)
thick.SetIncrement(0.005)
def onUseCurrentMaterialAsFilter(self, evt=None):
name = self.selected_material
density = self.materials_data.get(name, (None, 1.0))[1]
self.wids['matrix_den'].SetValue(density)
self.wids['matrix_mat'].SetValue(name)
def onSelectMaterial(self, evt=None):
if self.owids['materials'] is None:
return
item = self.owids['materials'].GetSelectedRow()
name = None
if item > -1:
name = list(self.materials_data.keys())[item]
self.selected_material = name
self.wids['matrix_btn'].Enable(name is not None)
if name is not None:
self.wids['matrix_btn'].SetLabel('Use %s' % name)
def onUpdateFilterList(self, evt=None):
flist = ['None']
for i in range(len(self.materials_data)):
if self.owids['materials'].GetToggleValue(i, 3): # is filter
flist.append(self.owids['materials'].GetTextValue(i, 0))
for i in range(NFILTERS):
t = 'filter%d' % (i+1)
choice = self.wids['%s_mat'%t]
cur = choice.GetStringSelection()
choice.Clear()
choice.SetChoices(flist)
if cur in flist:
choice.SetStringSelection(cur)
else:
choice.SetSelection(0)
def onAddMaterial(self, evt=None):
name = self.owids['newmat_name'].GetValue()
formula = self.owids['newmat_form'].GetValue()
density = self.owids['newmat_dens'].GetValue()
add = len(name) > 0 and len(formula)>0
if add and name in self.materials_data:
add = (Popup(self,
"Overwrite definition of '%s'?" % name,
'Re-define material?',
style=wx.OK|wx.CANCEL)==wx.ID_OK)
if add:
irow = list(self.materials_data.keys()).index(name)
self.owids['materials'].DeleteItem(irow)
if add:
add_material(name, formula, density)
self.materials_data[name] = (formula, density)
self.selected_material = name
self.owids['materials'].AppendItem((name, formula,
"%9.6f"%density,
False))
def onElemSelect(self, event=None, elem=None):
self.ptable.tsym.SetLabel('')
self.ptable.title.SetLabel('%d elements selected' %
len(self.ptable.selected))
def onUsePileupEscape(self, event=None):
puse = self.wids['pileup_use'].IsChecked()
self.wids['pileup_amp'].Enable(puse)
self.wids['pileup_amp_vary'].Enable(puse)
puse = self.wids['escape_use'].IsChecked()
self.wids['escape_amp'].Enable(puse)
self.wids['escape_amp_vary'].Enable(puse)
def onUsePeak(self, event=None, name=None, value=None):
if value is None and event is not None:
value = event.IsChecked()
if name is None:
return
for a in ('cen', 'step', 'tail', 'sigma', 'beta'):
self.wids['%s_%s'%(name, a)].Enable(value)
varwid = self.wids.get('%s_%s_vary'%(name, a), None)
if varwid is not None:
varwid.Enable(value)
def build_model(self, match_amplitudes=True):
"""build xrf_model from form settings"""
vars = {'Vary':'True', 'Fix': 'False', 'True':True, 'False': False}
opts = {}
for key, wid in self.wids.items():
val = None
if hasattr(wid, 'GetValue'):
val = wid.GetValue()
elif hasattr(wid, 'IsChecked'):
val = wid.IsChecked()
elif isinstance(wid, Choice):
val = wid.GetStringSelection()
elif hasattr(wid, 'GetStringSelection'):
val = wid.GetStringSelection()
elif hasattr(wid, 'GetLabel'):
val = wid.GetLabel()
if isinstance(val, str) and val.title() in vars:
val = vars[val.title()]
opts[key] = val
opts['count_time'] = getattr(self.mca, 'real_time', 1.0)
if opts['count_time'] is None:
opts['count_time'] = 1.0
script = [xrfmod_setup.format(**opts)]
for peakname in ('Elastic', 'Compton1', 'Compton2'):
t = peakname.lower()
if opts['%s_use'% t]:
d = {'peakname': t}
d['_cen'] = opts['%s_cen'%t]
d['vcen'] = opts['%s_cen_vary'%t]
d['_step'] = opts['%s_step'%t]
d['vstep'] = opts['%s_step_vary'%t]
d['_tail'] = opts['%s_tail'%t]
d['vtail'] = opts['%s_tail_vary'%t]
d['_beta'] = opts['%s_beta'%t]
d['vbeta'] = opts['%s_beta_vary'%t]
d['_sigma'] = opts['%s_sigma'%t]
d['vsigma'] = opts['%s_sigma_vary'%t]
script.append(xrfmod_scattpeak.format(**d))
for i in range(NFILTERS):
t = 'filter%d' % (i+1)
f_mat = opts['%s_mat'%t]
if f_mat not in (None, 'None') and int(1e6*opts['%s_thk'%t]) > 1:
script.append(xrfmod_filter.format(name=f_mat,
thick=opts['%s_thk'%t],
vary=opts['%s_var'%t]))
m_mat = opts['matrix_mat'].strip()
if len(m_mat) > 0 and int(1e6*opts['matrix_thk']) > 1:
script.append(xrfmod_matrix.format(name=m_mat,
thick=opts['matrix_thk'],
density=opts['matrix_den']))
if opts['pileup_use'] in ('True', True):
script.append(xrfmod_pileup.format(scale=opts['pileup_amp'],
vary=opts['pileup_amp_vary']))
if opts['escape_use'] in ('True', True):
script.append(xrfmod_escape.format(scale=opts['escape_amp'],
vary=opts['escape_amp_vary']))
# sort elements selected on Periodic Table by Z
elemz = []
for elem in self.ptable.selected:
elemz.append( 1 + self.ptable.syms.index(elem))
elemz.sort()
syms = ["'%s'" % self.ptable.syms[iz-1] for iz in sorted(elemz)]
syms = '[%s]' % (', '.join(syms))
script.append(xrfmod_elems.format(elemlist=syms))
script.append("{group:s}.xrf_init = _xrfmodel.calc_spectrum({group:s}.energy)")
script = '\n'.join(script)
self.model_script = script.format(group=self.mcagroup)
self._larch.eval(self.model_script)
cmds = []
self.xrfmod = self._larch.symtable.get_symbol('_xrfmodel')
floor = 1.e-12*max(self.mca.counts)
if match_amplitudes:
total = 0.0 * self.mca.counts
for name, parr in self.xrfmod.comps.items():
nam = name.lower()
try:
imax = np.where(parr > 0.99*parr.max())[0][0]
except: # probably means all counts are zero
imax = int(len(parr)/2.0)
scale = self.mca.counts[imax] / (parr[imax]+1.00)
ampname = 'amp_%s' % nam
if nam in ('elastic', 'compton1', 'compton2', 'compton',
'background', 'pileup', 'escape'):
ampname = '%s_amp' % nam
if nam in ('background', 'pileup', 'escape'):
scale = 1.0
paramval = self.xrfmod.params[ampname].value
s = "_xrfmodel.params['%s'].value = %.5f" % (ampname, paramval*scale)
cmds.append(s)
parr *= scale
parr[np.where(parr<floor)] = floor
total += parr
self.xrfmod.current_model = total
script = '\n'.join(cmds)
self._larch.eval(script)
self.model_script = "%s\n%s" % (self.model_script, script)
s = "{group:s}.xrf_init = _xrfmodel.calc_spectrum({group:s}.energy)"
self._larch.eval(s.format(group=self.mcagroup))
def plot_model(self, model_spectrum=None, init=False, with_comps=True,
label=None):
conf = self.parent.conf
plotkws = {'linewidth': 2.5, 'delay_draw': True, 'grid': False,
'ylog_scale': self.parent.ylog_scale, 'show_legend': False,
'fullbox': False}
ppanel = self.parent.panel
ppanel.conf.reset_trace_properties()
self.parent.plot(self.mca.energy, self.mca.counts, mca=self.mca,
xlabel='E (keV)', xmin=0, with_rois=False, **plotkws)
if model_spectrum is None:
model_spectrum = self.xrfmod.current_model if init else self.xrfmod.best_fit
if label is None:
label = 'predicted model' if init else 'best fit'
self.parent.oplot(self.mca.energy, model_spectrum,
label=label, color=conf.fit_color, **plotkws)
if with_comps:
for label, arr in self.xrfmod.comps.items():
ppanel.oplot(self.mca.energy, arr, label=label, **plotkws)
yscale = {False:'linear', True:'log'}[self.parent.ylog_scale]
ppanel.set_logscale(yscale=yscale)
ppanel.set_viewlimits()
ppanel.conf.set_legend_location('upper right', True)
ppanel.conf.draw_legend(show=True, delay_draw=False)
def onShowModel(self, event=None):
self.build_model()
self.plot_model(init=True, with_comps=True)
def onFitIteration(self, iter=0, pars=None):
pass
# print("Fit iteration %d" % iter)
# self.wids['fit_message'].SetLabel("Fit iteration %d" % iter)
def onFitModel(self, event=None):
self.build_model()
xrfmod = self._larch.symtable.get_symbol('_xrfmodel')
xrfmod.iter_callback = self.onFitIteration
fit_script = xrfmod_fitscript.format(group=self.mcagroup,
emin=self.wids['en_min'].GetValue(),
emax=self.wids['en_max'].GetValue())
self._larch.eval(fit_script)
dgroup = self._larch.symtable.get_group(self.mcagroup)
self.xrfresults = self._larch.symtable.get_symbol(XRFRESULTS_GROUP)
xrfresult = self.xrfresults[0]
xrfresult.script = "%s\n%s" % (self.model_script, fit_script)
xrfresult.label = "fit %d" % (len(self.xrfresults))
self.plot_model(init=True, with_comps=True)
for i in range(len(self.nb.pagelist)):
if self.nb.GetPageText(i).strip().startswith('Fit R'):
self.nb.SetSelection(i)
time.sleep(0.002)
self.show_results()
def onClose(self, event=None):
self.Destroy()
def onSaveFitResult(self, event=None):
result = self.get_fitresult()
deffile = self.mca.label + '_' + result.label
deffile = fix_filename(deffile.replace('.', '_')) + '.xrfmodel'
ModelWcards = "XRF Models(*.xrfmodel)|*.xrfmodel|All files (*.*)|*.*"
sfile = FileSave(self, 'Save XRF Model', default_file=deffile,
wildcard=ModelWcards)
if sfile is not None:
self._larch.eval(xrfmod_savejs.format(group=self.mcagroup,
nfit=self.nfit,
filename=sfile))
def onExportFitResult(self, event=None):
result = self.get_fitresult()
deffile = self.mca.label + '_' + result.label
deffile = fix_filename(deffile.replace('.', '_')) + '_xrf.txt'
wcards = 'All files (*.*)|*.*'
outfile = FileSave(self, 'Export Fit Result', default_file=deffile)
if outfile is not None:
buff = ['# XRF Fit %s: %s' % (self.mca.label, result.label),
'## Fit Script:']
for a in result.script.split('\n'):
buff.append('# %s' % a)
buff.append('## Fit Report:')
for a in result.fit_report.split('\n'):
buff.append('# %s' % a)
buff.append('#')
buff.append('########################################')
labels = ['energy', 'counts', 'best_fit',
'best_energy', 'fit_window',
'fit_weight', 'attenuation']
labels.extend(list(result.comps.keys()))
buff.append('# %s' % (' '.join(labels)))
npts = len(self.mca.energy)
for i in range(npts):
dline = [gformat(self.mca.energy[i]),
gformat(self.mca.counts[i]),
gformat(result.best_fit[i]),
gformat(result.best_en[i]),
gformat(result.fit_window[i]),
gformat(result.fit_weight[i]),
gformat(result.atten[i])]
for c in result.comps.values():
dline.append(gformat(c[i]))
buff.append(' '.join(dline))
buff.append('\n')
with open(outfile, 'w') as fh:
fh.write('\n'.join(buff))
def get_fitresult(self, nfit=None):
if nfit is None:
nfit = self.nfit
self.xrfresults = self._larch.symtable.get_symbol(XRFRESULTS_GROUP)
self.nfit = max(0, nfit)
self.nfit = min(self.nfit, len(self.xrfresults)-1)
return self.xrfresults[self.nfit]
def onChangeFitLabel(self, event=None):
label = self.owids['fitlabel_txt'].GetValue()
result = self.get_fitresult()
result.label = label
self.show_results()
def onPlot(self, event=None):
result = self.get_fitresult()
xrfmod = self._larch.symtable.get_symbol('_xrfmodel')
with_comps = self.owids['plot_comps'].IsChecked()
spect = xrfmod.calc_spectrum(self.mca.energy,
params=result.params)
self.plot_model(model_spectrum=spect, with_comps=with_comps,
label=result.label)
def onSelectFit(self, evt=None):
if self.owids['stats'] is None:
return
item = self.owids['stats'].GetSelectedRow()
if item > -1:
self.show_fitresult(nfit=item)
def onSelectParameter(self, evt=None):
if self.owids['params'] is None:
return
if not self.owids['params'].HasSelection():
return
item = self.owids['params'].GetSelectedRow()
pname = self.owids['paramsdata'][item]
cormin= self.owids['min_correl'].GetValue()
self.owids['correl'].DeleteAllItems()
result = self.get_fitresult()
this = result.params[pname]
if this.correl is not None:
sort_correl = sorted(this.correl.items(), key=lambda it: abs(it[1]))
for name, corval in reversed(sort_correl):
if abs(corval) > cormin:
self.owids['correl'].AppendItem((pname, name, "% .4f" % corval))
def onAllCorrel(self, evt=None):
result = self.get_fitresult()
params = result.params
parnames = list(params.keys())
cormin= self.owids['min_correl'].GetValue()
correls = {}
for i, name in enumerate(parnames):
par = params[name]
if not par.vary:
continue
if hasattr(par, 'correl') and par.correl is not None:
for name2 in parnames[i+1:]:
if (name != name2 and name2 in par.correl and
abs(par.correl[name2]) > cormin):
correls["%s$$%s" % (name, name2)] = par.correl[name2]
sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
sort_correl.reverse()
self.owids['correl'].DeleteAllItems()
for namepair, corval in sort_correl:
name1, name2 = namepair.split('$$')
self.owids['correl'].AppendItem((name1, name2, "% .4f" % corval))
def show_results(self):
cur = self.get_fitresult()
self.owids['stats'].DeleteAllItems()
for i, res in enumerate(self.xrfresults):
args = [res.label]
for attr in ('nvarys', 'nfev', 'chisqr', 'redchi', 'aic'):
val = getattr(res, attr)
if isinstance(val, int):
val = '%d' % val
else:
val = gformat(val, 11)
args.append(val)
self.owids['stats'].AppendItem(tuple(args))
self.owids['data_title'].SetLabel("%s: %.3f sec" % (self.mca.label, cur.count_time))
self.owids['data_title2'].SetLabel("%s: %.3f sec" % (self.mca.label, cur.count_time))
self.owids['fitlabel_txt'].SetValue(cur.label)
self.show_fitresult(nfit=self.nfit)
def show_fitresult(self, nfit=0, mca=None):
if mca is not None:
self.mca = mca
result = self.get_fitresult(nfit=nfit)
self.owids['data_title'].SetLabel("%s: %.3f sec" % (self.mca.label, result.count_time))
self.owids['data_title2'].SetLabel("%s: %.3f sec" % (self.mca.label, result.count_time))
self.result = result
self.owids['fitlabel_txt'].SetValue(result.label)
self.owids['params'].DeleteAllItems()
self.owids['paramsdata'] = []
for param in reversed(result.params.values()):
pname = param.name
try:
val = gformat(param.value, 10)
except (TypeError, ValueError):
val = ' ??? '
serr, perr = ' N/A ', ' N/A '
if param.stderr is not None:
serr = gformat(param.stderr, 10)
try:
perr = ' {:.2%}'.format(abs(param.stderr/param.value))
except ZeroDivisionError:
perr = '?'
extra = ' '
if param.expr is not None:
extra = ' = %s ' % param.expr
elif not param.vary:
extra = ' (fixed)'
elif param.init_value is not None:
extra = gformat(param.init_value, 10)
self.owids['params'].AppendItem((pname, val, serr, perr, extra))
self.owids['paramsdata'].append(pname)
self.Refresh()
| en | 0.387898 | #!/usr/bin/env python fitting GUI for XRF display read filters data ## Set up XRF Model _xrfmodel = xrf_model(xray_energy={en_xray:.2f}, count_time={count_time:.5f}, energy_min={en_min:.2f}, energy_max={en_max:.2f}) _xrfmodel.set_detector(thickness={det_thk:.5f}, material='{det_mat:s}', cal_offset={cal_offset:.5f}, cal_slope={cal_slope:.5f}, vary_cal_offset={cal_vary!r}, vary_cal_slope={cal_vary!r}, peak_step={peak_step:.5f}, vary_peak_step={peak_step_vary:s}, peak_tail={peak_tail:.5f}, vary_peak_tail={peak_tail_vary:s}, peak_beta={peak_beta:.5f}, vary_peak_beta={peak_beta_vary:s}, peak_gamma={peak_gamma:.5f}, vary_peak_gamma={peak_gamma_vary:s}, noise={det_noise:.5f}, vary_noise={det_noise_vary:s}) _xrfmodel.add_scatter_peak(name='{peakname:s}', center={_cen:.2f}, amplitude=1e5, step={_step:.5f}, tail={_tail:.5f}, beta={_beta:.5f}, sigmax={_sigma:.5f}, vary_center={vcen:s}, vary_step={vstep:s}, vary_tail={vtail:s}, vary_beta={vbeta:s}, vary_sigmax={vsigma:s}) _xrffitresult = _xrfmodel.fit_spectrum({group:s}, energy_min={emin:.2f}, energy_max={emax:.2f}) _xrfresults.insert(0, _xrffitresult) for atsym in {elemlist:s}: _xrfmodel.add_element(atsym) #endfor del atsym Frame for Spectral Analysis # fetch current spectra from parent # tooltip='Guess elements from peak locations') # name, escale, step, sigmax, beta, tail # main = wx.Panel(self) # , newrow=True) #', 'Material', 'Thickness (mm)', # first selection # second selection # third selection # third selection # Materials # title row # action=self.onCompSetElemAbundance) ##########") # is filter build xrf_model from form settings # sort elements selected on Periodic Table by Z # probably means all counts are zero # print("Fit iteration %d" % iter) # self.wids['fit_message'].SetLabel("Fit iteration %d" % iter) # Fit Script:'] # Fit Report:') #######################################') | 1.978641 | 2 |
pythonProject/033.py | MontanhaRio/python | 0 | 6630198 | #033 - Maior e menor valores
a = int(input('primeiro valor:'))
b = int(input('segundo valor:'))
c = int(input('terceiro valor: '))
menor = a
if b < a and b < c:
menor = b
if c < a and c < b:
menor = c
maior = a
if b > a and b > c:
maior = b
if c > a and c > b:
maior = c
print(f'O menor valor digitado foi {menor}')
print(f'O mairor valor digitado foi {maior}') | #033 - Maior e menor valores
a = int(input('primeiro valor:'))
b = int(input('segundo valor:'))
c = int(input('terceiro valor: '))
menor = a
if b < a and b < c:
menor = b
if c < a and c < b:
menor = c
maior = a
if b > a and b > c:
maior = b
if c > a and c > b:
maior = c
print(f'O menor valor digitado foi {menor}')
print(f'O mairor valor digitado foi {maior}') | pt | 0.610416 | #033 - Maior e menor valores | 3.930107 | 4 |
akshare/pro/client.py | lisong996/akshare | 4,202 | 6630199 | <filename>akshare/pro/client.py<gh_stars>1000+
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2019/11/10 22:52
Desc: 数据接口源代码
"""
from functools import partial
from urllib import parse
import pandas as pd
import requests
class DataApi:
__token = ""
__http_url = "https://api.qhkch.com"
def __init__(self, token, timeout=10):
"""
初始化函数
:param token: API接口TOKEN,用于用户认证
:type token: str
:param timeout: 超时设置
:type timeout: int
"""
self.__token = token
self.__timeout = timeout
def query(self, api_name, fields="", **kwargs):
"""
:param api_name: 需要调取的接口
:type api_name: str
:param fields: 想要获取的字段
:type fields: str
:param kwargs: 指定需要输入的参数
:type kwargs: 键值对
:return: 指定的数据
:rtype: dict or pandas.DataFrame
"""
headers = {
"X-Token": self.__token,
}
url = parse.urljoin(self.__http_url, "/".join([api_name, *kwargs.values()]))
res = requests.get(url, headers=headers, timeout=self.__timeout)
if res.status_code != 200:
raise Exception("连接异常, 请检查您的Token是否过期和输入的参数是否正确")
data_json = res.json()
if fields == "":
try:
return pd.DataFrame(data_json)
except ValueError as e:
result_df = pd.DataFrame.from_dict(data_json, orient="index", columns=[api_name])
return result_df
else: # 此处增加处理
if api_name == "variety_all_positions":
big_df = pd.DataFrame()
for item in data_json[fields].keys():
temp_df = pd.DataFrame(data_json[fields][item])
temp_df["code"] = item
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True, drop=True)
return big_df
else:
return pd.DataFrame(data_json[fields])
def __getattr__(self, name):
return partial(self.query, name)
if __name__ == '__main__':
pass
| <filename>akshare/pro/client.py<gh_stars>1000+
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2019/11/10 22:52
Desc: 数据接口源代码
"""
from functools import partial
from urllib import parse
import pandas as pd
import requests
class DataApi:
__token = ""
__http_url = "https://api.qhkch.com"
def __init__(self, token, timeout=10):
"""
初始化函数
:param token: API接口TOKEN,用于用户认证
:type token: str
:param timeout: 超时设置
:type timeout: int
"""
self.__token = token
self.__timeout = timeout
def query(self, api_name, fields="", **kwargs):
"""
:param api_name: 需要调取的接口
:type api_name: str
:param fields: 想要获取的字段
:type fields: str
:param kwargs: 指定需要输入的参数
:type kwargs: 键值对
:return: 指定的数据
:rtype: dict or pandas.DataFrame
"""
headers = {
"X-Token": self.__token,
}
url = parse.urljoin(self.__http_url, "/".join([api_name, *kwargs.values()]))
res = requests.get(url, headers=headers, timeout=self.__timeout)
if res.status_code != 200:
raise Exception("连接异常, 请检查您的Token是否过期和输入的参数是否正确")
data_json = res.json()
if fields == "":
try:
return pd.DataFrame(data_json)
except ValueError as e:
result_df = pd.DataFrame.from_dict(data_json, orient="index", columns=[api_name])
return result_df
else: # 此处增加处理
if api_name == "variety_all_positions":
big_df = pd.DataFrame()
for item in data_json[fields].keys():
temp_df = pd.DataFrame(data_json[fields][item])
temp_df["code"] = item
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True, drop=True)
return big_df
else:
return pd.DataFrame(data_json[fields])
def __getattr__(self, name):
return partial(self.query, name)
if __name__ == '__main__':
pass
| zh | 0.507774 | # -*- coding:utf-8 -*- #!/usr/bin/env python Date: 2019/11/10 22:52 Desc: 数据接口源代码 初始化函数 :param token: API接口TOKEN,用于用户认证 :type token: str :param timeout: 超时设置 :type timeout: int :param api_name: 需要调取的接口 :type api_name: str :param fields: 想要获取的字段 :type fields: str :param kwargs: 指定需要输入的参数 :type kwargs: 键值对 :return: 指定的数据 :rtype: dict or pandas.DataFrame # 此处增加处理 | 2.68139 | 3 |
Resource/random_resource.py | GopalNG/FlaskApiRandom | 0 | 6630200 | <reponame>GopalNG/FlaskApiRandom<filename>Resource/random_resource.py
import json
from flask_restful import Resource
from Random.RandomObjects import RandomObjects
from Models.random_models import GenerateModel
from flask import send_file
from io import BytesIO
class CreateRandom(Resource):
@classmethod
def get(cls):
"""
@usage : It Wil Create A Random Data and Save the DB
:return: A Json with Message and File Url For Download
"""
try:
random_data = RandomObjects().random_objects_main()
data = {'values': json.dumps(random_data[0]), 'file_id': random_data[2], 'data': json.dumps(random_data[1])}
if data:
user = GenerateModel(**data)
user.save_to_db()
url = 'http://127.0.0.1//api/download/{}'.format(data['file_id'])
return {"message": "User created successfully.", "file_path": url}, 201
return {"message": "A user with that username already exists"}, 400
except Exception as error:
return {'message': str(error[:10])}, 500
class GetReport(Resource):
@classmethod
def get(cls, file_id: int):
"""
@usage: It Is used to Get the report of generated File by CreateRandom from DB
:param file_id: Created By CreateRandom
:return: a dict with Report of the file
"""
try:
data = GenerateModel.find_by_file_id(file_id)
if not data:
return {'message': 'file not found create first.'}, 404
by_index_data = ['float', 'numbers', 'alphanumeric', 'alphabets']
_report_value = json.loads(data.json()['values'])
return dict(zip(by_index_data, _report_value)), 200, {'content-type': 'application/json'}
except Exception as error:
return {'message': str(error[:10])}, 500
class Download(Resource):
@classmethod
def get(cls, file_id: int):
"""
@usage: It Is Used For Downloading the file while is inserted (generated) by CreateRandom from db
:param file_id: Created By CreateRandom
:return: A Json with Message and File Url For Download
"""
try:
data = GenerateModel.get_file_data(file_id)
if data is not None:
data = json.loads(data)
buffer = BytesIO()
for i in data:
buffer.write(i.encode('utf-8'))
buffer.seek(0)
return send_file(buffer, as_attachment=True,
download_name='{}.txt'.format(file_id),
mimetype='text/csv')
return {'Message': 'No Data For The Request',
'hint': 'generate random first:{}'.format(' /api/generate/')}, 404
except Exception as error:
return {'message': str(error[:10])}, 500
| import json
from flask_restful import Resource
from Random.RandomObjects import RandomObjects
from Models.random_models import GenerateModel
from flask import send_file
from io import BytesIO
class CreateRandom(Resource):
@classmethod
def get(cls):
"""
@usage : It Wil Create A Random Data and Save the DB
:return: A Json with Message and File Url For Download
"""
try:
random_data = RandomObjects().random_objects_main()
data = {'values': json.dumps(random_data[0]), 'file_id': random_data[2], 'data': json.dumps(random_data[1])}
if data:
user = GenerateModel(**data)
user.save_to_db()
url = 'http://127.0.0.1//api/download/{}'.format(data['file_id'])
return {"message": "User created successfully.", "file_path": url}, 201
return {"message": "A user with that username already exists"}, 400
except Exception as error:
return {'message': str(error[:10])}, 500
class GetReport(Resource):
@classmethod
def get(cls, file_id: int):
"""
@usage: It Is used to Get the report of generated File by CreateRandom from DB
:param file_id: Created By CreateRandom
:return: a dict with Report of the file
"""
try:
data = GenerateModel.find_by_file_id(file_id)
if not data:
return {'message': 'file not found create first.'}, 404
by_index_data = ['float', 'numbers', 'alphanumeric', 'alphabets']
_report_value = json.loads(data.json()['values'])
return dict(zip(by_index_data, _report_value)), 200, {'content-type': 'application/json'}
except Exception as error:
return {'message': str(error[:10])}, 500
class Download(Resource):
@classmethod
def get(cls, file_id: int):
"""
@usage: It Is Used For Downloading the file while is inserted (generated) by CreateRandom from db
:param file_id: Created By CreateRandom
:return: A Json with Message and File Url For Download
"""
try:
data = GenerateModel.get_file_data(file_id)
if data is not None:
data = json.loads(data)
buffer = BytesIO()
for i in data:
buffer.write(i.encode('utf-8'))
buffer.seek(0)
return send_file(buffer, as_attachment=True,
download_name='{}.txt'.format(file_id),
mimetype='text/csv')
return {'Message': 'No Data For The Request',
'hint': 'generate random first:{}'.format(' /api/generate/')}, 404
except Exception as error:
return {'message': str(error[:10])}, 500 | en | 0.818045 | @usage : It Wil Create A Random Data and Save the DB
:return: A Json with Message and File Url For Download @usage: It Is used to Get the report of generated File by CreateRandom from DB
:param file_id: Created By CreateRandom
:return: a dict with Report of the file @usage: It Is Used For Downloading the file while is inserted (generated) by CreateRandom from db
:param file_id: Created By CreateRandom
:return: A Json with Message and File Url For Download | 2.995637 | 3 |
tests/test_utils.py | H4CKY54CK/hackytools | 5 | 6630201 | from hackytools.utils import *
def test_combutations():
data = 'ABC'
assert list(combutations(data)) == [('A',), ('B',), ('C',), ('A', 'B'), ('A', 'C'), ('B', 'C'), ('A', 'B', 'C')]
assert list(combutations(data, 2)) == [('A',), ('B',), ('C',), ('A', 'B'), ('A', 'C'), ('B', 'C')]
assert list(combutations(data, reverse=True)) == [('A', 'B', 'C'), ('A', 'B'), ('A', 'C'), ('B', 'C'), ('A',), ('B',), ('C',)]
assert list(combutations(data, 2, reverse=True)) == [('A', 'B'), ('A', 'C'), ('B', 'C'), ('A',), ('B',), ('C',)]
def test_ftime_ns():
data = (
(0, '0', 'ns'),
(10, '10', 'ns'),
(371, '371', 'ns'),
(5150, '5', '\u00b5s'),
(58715, '58', '\u00b5s'),
(987465, '987', '\u00b5s'),
(3167431, '3', 'ms'),
(64744741, '64', 'ms'),
(943167497, '943', 'ms'),
(6468541351, '6', 's'),
)
for n, pre, after in data:
res = ftime_ns(n)
assert res.startswith(pre), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
assert res.endswith(after), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
def test_ftime():
data = (
(68546438444, '1m', '8s'),
(979779441123, '16m', '19s'),
(7977941341123, '2h, 12m', '57s'),
(97977941341123, '1d, 3h, 12m', '57s'),
)
for n, pre, after in data:
n /= 1000000000
res = ftime(n, 'macro')
assert res.startswith(pre), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
assert res.endswith(after), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
def test_ftime_seconds():
data = (
(68546438444, '1m', '8s'),
(979779441123, '16m', '19s'),
(7977941341123, '2h, 12m', '57s'),
(97977941341123, '1d, 3h, 12m', '57s'),
)
for n, pre, after in data:
n /= 1000000000
res = ftime_seconds(n)
assert res.startswith(pre), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
assert res.endswith(after), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
def test_flatten():
data = [1, [2, [3, [[[4,5,6,7,8,9]]]]]]
assert flatten(data) == list(range(1,10))
def test_smiter():
data = range(500)
for i in smiter(data):
if i.value == 0:
assert i.first is True and i.last is False
elif i.value == 499:
assert i.last is True and i.first is False
else:
assert i.first is not True and i.last is not True
data = [13]
for i in smiter(data):
assert i.last is True and i.first is True and i.value == 13
def test_splitint():
for i in range(1,307):
n = 10 ** i
nums = list(map(int,str(n)))
assert splitint(n) == nums | from hackytools.utils import *
def test_combutations():
data = 'ABC'
assert list(combutations(data)) == [('A',), ('B',), ('C',), ('A', 'B'), ('A', 'C'), ('B', 'C'), ('A', 'B', 'C')]
assert list(combutations(data, 2)) == [('A',), ('B',), ('C',), ('A', 'B'), ('A', 'C'), ('B', 'C')]
assert list(combutations(data, reverse=True)) == [('A', 'B', 'C'), ('A', 'B'), ('A', 'C'), ('B', 'C'), ('A',), ('B',), ('C',)]
assert list(combutations(data, 2, reverse=True)) == [('A', 'B'), ('A', 'C'), ('B', 'C'), ('A',), ('B',), ('C',)]
def test_ftime_ns():
data = (
(0, '0', 'ns'),
(10, '10', 'ns'),
(371, '371', 'ns'),
(5150, '5', '\u00b5s'),
(58715, '58', '\u00b5s'),
(987465, '987', '\u00b5s'),
(3167431, '3', 'ms'),
(64744741, '64', 'ms'),
(943167497, '943', 'ms'),
(6468541351, '6', 's'),
)
for n, pre, after in data:
res = ftime_ns(n)
assert res.startswith(pre), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
assert res.endswith(after), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
def test_ftime():
data = (
(68546438444, '1m', '8s'),
(979779441123, '16m', '19s'),
(7977941341123, '2h, 12m', '57s'),
(97977941341123, '1d, 3h, 12m', '57s'),
)
for n, pre, after in data:
n /= 1000000000
res = ftime(n, 'macro')
assert res.startswith(pre), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
assert res.endswith(after), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
def test_ftime_seconds():
data = (
(68546438444, '1m', '8s'),
(979779441123, '16m', '19s'),
(7977941341123, '2h, 12m', '57s'),
(97977941341123, '1d, 3h, 12m', '57s'),
)
for n, pre, after in data:
n /= 1000000000
res = ftime_seconds(n)
assert res.startswith(pre), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
assert res.endswith(after), "Expected: %s | Got: %s" % (pre + ' ' + after, res)
def test_flatten():
data = [1, [2, [3, [[[4,5,6,7,8,9]]]]]]
assert flatten(data) == list(range(1,10))
def test_smiter():
data = range(500)
for i in smiter(data):
if i.value == 0:
assert i.first is True and i.last is False
elif i.value == 499:
assert i.last is True and i.first is False
else:
assert i.first is not True and i.last is not True
data = [13]
for i in smiter(data):
assert i.last is True and i.first is True and i.value == 13
def test_splitint():
for i in range(1,307):
n = 10 ** i
nums = list(map(int,str(n)))
assert splitint(n) == nums | none | 1 | 2.642274 | 3 |
|
tests/test_ilmat.py | zhengp0/spmat | 0 | 6630202 | <reponame>zhengp0/spmat<filename>tests/test_ilmat.py
"""
Test ILMat
"""
import pytest
import numpy as np
from spmat import ILMat
# pylint: disable=redefined-outer-name
SHAPE = (5, 3)
@pytest.fixture
def ilmat():
lmat = np.random.randn(*SHAPE)
return ILMat(lmat)
def test_ilmat(ilmat):
my_result = ilmat.mat.dot(ilmat.invmat)
tr_result = np.identity(ilmat.dsize)
assert np.allclose(my_result, tr_result)
@pytest.mark.parametrize("array", [np.random.randn(SHAPE[0]),
np.random.randn(*SHAPE)])
def test_dot(ilmat, array):
my_result = ilmat.dot(array)
tr_result = ilmat.mat.dot(array)
assert np.allclose(my_result, tr_result)
@pytest.mark.parametrize("array", [np.random.randn(SHAPE[0]),
np.random.randn(*SHAPE)])
def test_invdot(ilmat, array):
my_result = ilmat.invdot(array)
tr_result = np.linalg.solve(ilmat.mat, array)
assert np.allclose(my_result, tr_result)
def test_logdet(ilmat):
my_result = ilmat.logdet()
tr_result = np.linalg.slogdet(ilmat.mat)[1]
assert np.isclose(my_result, tr_result)
| """
Test ILMat
"""
import pytest
import numpy as np
from spmat import ILMat
# pylint: disable=redefined-outer-name
SHAPE = (5, 3)
@pytest.fixture
def ilmat():
lmat = np.random.randn(*SHAPE)
return ILMat(lmat)
def test_ilmat(ilmat):
my_result = ilmat.mat.dot(ilmat.invmat)
tr_result = np.identity(ilmat.dsize)
assert np.allclose(my_result, tr_result)
@pytest.mark.parametrize("array", [np.random.randn(SHAPE[0]),
np.random.randn(*SHAPE)])
def test_dot(ilmat, array):
my_result = ilmat.dot(array)
tr_result = ilmat.mat.dot(array)
assert np.allclose(my_result, tr_result)
@pytest.mark.parametrize("array", [np.random.randn(SHAPE[0]),
np.random.randn(*SHAPE)])
def test_invdot(ilmat, array):
my_result = ilmat.invdot(array)
tr_result = np.linalg.solve(ilmat.mat, array)
assert np.allclose(my_result, tr_result)
def test_logdet(ilmat):
my_result = ilmat.logdet()
tr_result = np.linalg.slogdet(ilmat.mat)[1]
assert np.isclose(my_result, tr_result) | en | 0.402141 | Test ILMat # pylint: disable=redefined-outer-name | 2.257238 | 2 |
systemcheck/systems/ABAP/plugins/actions/action_abap_validate_redundant_password_hashes.py | team-fasel/SystemCheck | 2 | 6630203 | <reponame>team-fasel/SystemCheck
from systemcheck.systems import ABAP
import systemcheck
import logging
from pprint import pformat
import re
class ActionAbapValidateRedundantPasswordHashes(systemcheck.plugins.ActionAbapCheck):
""" Validate the scheduling of batch jobs
"""
def __init__(self):
super().__init__()
self.logger=logging.getLogger(self.__class__.__name__)
self.alchemyObjects = [ABAP.models.ActionAbapValidateRedundantPasswordHashes,
ABAP.models.ActionAbapFolder]
def _buildSpoolParams(self)->dict:
spoolParams = dict(PDEST=self.checkObject.PDEST,
PRNEW = 'X',
PAART='X_65_132')
if self.checkObject.PRBIG:
spoolParams['PRBIG']=self.boolmapper(self.checkObject.PRBIG)
if self.checkObject.PRSAP:
spoolParams['PRSAP'] = self.boolmapper(self.checkObject.PRSAP)
if self.checkObject.PRIMM:
spoolParams['PRIMM'] = self.boolmapper(self.checkObject.PRIMM)
self.logger.debug('Spool Parameters: %s', pformat(spoolParams))
return spoolParams
def initializeResult(self):
self.actionResult.addResultColumn('RATING', 'Rating')
self.actionResult.addResultColumn('TABLE', 'Table')
self.actionResult.addResultColumn('EXPECTED', 'Expected Counts')
self.actionResult.addResultColumn('OPERATOR', 'Operator')
self.actionResult.addResultColumn('CONFIGURED', 'Number of Users')
self.actionResult.addResultColumn('LOGRECORD', 'Log Record')
def retrieveData(self, **parameters):
result = self.systemConnection.call_fm('TH_SERVER_LIST')
def execute(self):
# Setup Job for
job_param = dict(JOBNAME='SystemCheck: Red. Password Hashes')
job_stepParams=[]
job_stepParam=dict(ABAP_PROGRAM_NAME='CLEANUP_PASSWORD_HASH_VALUES',
ALLPRIPAR=self._buildSpoolParams())
if self.checkObject.SAP_USER_NAME:
job_stepParam['SAP_USER_NAME']=self.checkObject.SAP_USER_NAME
job_stepParams.append({'type':'ABAP',
'params':job_stepParam})
result = self.systemConnection.btc_schedule_job(jobptions=job_param,
stepoptions=job_stepParams)
if result.fail:
self.actionResult.rating='error'
self.actionResult.errorMessage=result.fail
return self.actionResult
# Get Spool
spoolinfo = result.data.get('SPOOL_ATTR')
if len(spoolinfo)>0:
relevant_spoolinfo=spoolinfo[0]
else:
self.actionResult.rating='error'
self.actionResult.errorMessage = 'No Spool for jobname "SystemCheck: Red. Password Hashes"'
return self.actionResult
spoolid = relevant_spoolinfo.get('SPOOLID')
result=self.systemConnection.btc_xbp_generic_bapi_caller('BAPI_XBP_GET_SPOOL_AS_DAT', SPOOL_REQUEST=spoolid)
if result.fail:
self.actionResult.rating='error'
self.actionResult.errorMessage=result.message
return self.actionResult
spool=result.data['SPOOL_LIST']
#Analyze Spool
self.logger.debug('analyzing spool file')
for lineNumber, spoolLine in enumerate(spool):
if 'Checking table USR02 ...' in spoolLine['']:
usr02Start=lineNumber
elif 'Checking table USH02 ...' in spoolLine['']:
ush02Start=lineNumber
elif 'Checking table USRPWDHISTORY ...' in spoolLine['']:
usrpwdhistoryStart=lineNumber
counter=usr02Start+1
withinLogRecord=True
numUsers=None
logRecord=None
parsedLines=[]
# Start working the log lines for table USR02 until USH02 begins
for lineNumber, item in enumerate(spool):
if lineNumber>usr02Start:
lineText=item[''].strip()
match=re.match('(^\d+)', lineText)
if match or lineText.startswith('Checking table'):
# Line starts with a digit
if numUsers:
if usr02Start < lineNumber-1 < ush02Start:
parsedLines.append(['USR02', numUsers, logRecord])
elif ush02Start < lineNumber-1 < usrpwdhistoryStart:
parsedLines.append(['USH02', numUsers, logRecord])
elif lineNumber-1 > usrpwdhistoryStart:
parsedLines.append(['USRPWDHISTORY', numUsers, logRecord])
if not lineText.startswith('Checking table'):
numUsers=match.group(0)
matchRecord=re.match('\d+(.*)', lineText)
if matchRecord:
logRecord=matchRecord.groups()[0]
else:
logRecord=''
else:
if lineNumber<len(spool)-1:
logRecord+=lineText
if lineNumber==len(spool)-1:
parsedLines.append(['USRPWDHISTORY', numUsers, logRecord])
self.logger.debug('Parsed Spool results: %s', pformat(parsedLines))
for item in parsedLines:
record=dict(RATING='pass',
TABLE=item[0],
EXPECTED=0,
OPERATOR='equal',
CONFIGURED=item[1],
LOGRECORD=item[2])
record=self.rateIndividualResult(record)
self.actionResult.addResult(record)
self.rateOverallResult()
return self.actionResult
| from systemcheck.systems import ABAP
import systemcheck
import logging
from pprint import pformat
import re
class ActionAbapValidateRedundantPasswordHashes(systemcheck.plugins.ActionAbapCheck):
""" Validate the scheduling of batch jobs
"""
def __init__(self):
super().__init__()
self.logger=logging.getLogger(self.__class__.__name__)
self.alchemyObjects = [ABAP.models.ActionAbapValidateRedundantPasswordHashes,
ABAP.models.ActionAbapFolder]
def _buildSpoolParams(self)->dict:
spoolParams = dict(PDEST=self.checkObject.PDEST,
PRNEW = 'X',
PAART='X_65_132')
if self.checkObject.PRBIG:
spoolParams['PRBIG']=self.boolmapper(self.checkObject.PRBIG)
if self.checkObject.PRSAP:
spoolParams['PRSAP'] = self.boolmapper(self.checkObject.PRSAP)
if self.checkObject.PRIMM:
spoolParams['PRIMM'] = self.boolmapper(self.checkObject.PRIMM)
self.logger.debug('Spool Parameters: %s', pformat(spoolParams))
return spoolParams
def initializeResult(self):
self.actionResult.addResultColumn('RATING', 'Rating')
self.actionResult.addResultColumn('TABLE', 'Table')
self.actionResult.addResultColumn('EXPECTED', 'Expected Counts')
self.actionResult.addResultColumn('OPERATOR', 'Operator')
self.actionResult.addResultColumn('CONFIGURED', 'Number of Users')
self.actionResult.addResultColumn('LOGRECORD', 'Log Record')
def retrieveData(self, **parameters):
result = self.systemConnection.call_fm('TH_SERVER_LIST')
def execute(self):
# Setup Job for
job_param = dict(JOBNAME='SystemCheck: Red. Password Hashes')
job_stepParams=[]
job_stepParam=dict(ABAP_PROGRAM_NAME='CLEANUP_PASSWORD_HASH_VALUES',
ALLPRIPAR=self._buildSpoolParams())
if self.checkObject.SAP_USER_NAME:
job_stepParam['SAP_USER_NAME']=self.checkObject.SAP_USER_NAME
job_stepParams.append({'type':'ABAP',
'params':job_stepParam})
result = self.systemConnection.btc_schedule_job(jobptions=job_param,
stepoptions=job_stepParams)
if result.fail:
self.actionResult.rating='error'
self.actionResult.errorMessage=result.fail
return self.actionResult
# Get Spool
spoolinfo = result.data.get('SPOOL_ATTR')
if len(spoolinfo)>0:
relevant_spoolinfo=spoolinfo[0]
else:
self.actionResult.rating='error'
self.actionResult.errorMessage = 'No Spool for jobname "SystemCheck: Red. Password Hashes"'
return self.actionResult
spoolid = relevant_spoolinfo.get('SPOOLID')
result=self.systemConnection.btc_xbp_generic_bapi_caller('BAPI_XBP_GET_SPOOL_AS_DAT', SPOOL_REQUEST=spoolid)
if result.fail:
self.actionResult.rating='error'
self.actionResult.errorMessage=result.message
return self.actionResult
spool=result.data['SPOOL_LIST']
#Analyze Spool
self.logger.debug('analyzing spool file')
for lineNumber, spoolLine in enumerate(spool):
if 'Checking table USR02 ...' in spoolLine['']:
usr02Start=lineNumber
elif 'Checking table USH02 ...' in spoolLine['']:
ush02Start=lineNumber
elif 'Checking table USRPWDHISTORY ...' in spoolLine['']:
usrpwdhistoryStart=lineNumber
counter=usr02Start+1
withinLogRecord=True
numUsers=None
logRecord=None
parsedLines=[]
# Start working the log lines for table USR02 until USH02 begins
for lineNumber, item in enumerate(spool):
if lineNumber>usr02Start:
lineText=item[''].strip()
match=re.match('(^\d+)', lineText)
if match or lineText.startswith('Checking table'):
# Line starts with a digit
if numUsers:
if usr02Start < lineNumber-1 < ush02Start:
parsedLines.append(['USR02', numUsers, logRecord])
elif ush02Start < lineNumber-1 < usrpwdhistoryStart:
parsedLines.append(['USH02', numUsers, logRecord])
elif lineNumber-1 > usrpwdhistoryStart:
parsedLines.append(['USRPWDHISTORY', numUsers, logRecord])
if not lineText.startswith('Checking table'):
numUsers=match.group(0)
matchRecord=re.match('\d+(.*)', lineText)
if matchRecord:
logRecord=matchRecord.groups()[0]
else:
logRecord=''
else:
if lineNumber<len(spool)-1:
logRecord+=lineText
if lineNumber==len(spool)-1:
parsedLines.append(['USRPWDHISTORY', numUsers, logRecord])
self.logger.debug('Parsed Spool results: %s', pformat(parsedLines))
for item in parsedLines:
record=dict(RATING='pass',
TABLE=item[0],
EXPECTED=0,
OPERATOR='equal',
CONFIGURED=item[1],
LOGRECORD=item[2])
record=self.rateIndividualResult(record)
self.actionResult.addResult(record)
self.rateOverallResult()
return self.actionResult | en | 0.745406 | Validate the scheduling of batch jobs # Setup Job for # Get Spool #Analyze Spool # Start working the log lines for table USR02 until USH02 begins # Line starts with a digit | 2.196487 | 2 |
packages/nonebot-adapter-mirai/nonebot/adapters/mirai/event/message.py | emicoto/none | 1,757 | 6630204 | from datetime import datetime
from typing import Any, Optional
from pydantic import BaseModel, Field
from nonebot.typing import overrides
from ..message import MessageChain
from .base import Event, GroupChatInfo, PrivateChatInfo
class MessageSource(BaseModel):
id: int
time: datetime
class MessageEvent(Event):
"""消息事件基类"""
message_chain: MessageChain = Field(alias='messageChain')
source: Optional[MessageSource] = None
sender: Any
@overrides(Event)
def get_message(self) -> MessageChain:
return self.message_chain
@overrides(Event)
def get_plaintext(self) -> str:
return self.message_chain.extract_plain_text()
@overrides(Event)
def get_user_id(self) -> str:
raise NotImplementedError
@overrides(Event)
def get_session_id(self) -> str:
raise NotImplementedError
class GroupMessage(MessageEvent):
"""群消息事件"""
sender: GroupChatInfo
to_me: bool = False
@overrides(MessageEvent)
def get_session_id(self) -> str:
return f'group_{self.sender.group.id}_' + self.get_user_id()
@overrides(MessageEvent)
def get_user_id(self) -> str:
return str(self.sender.id)
@overrides(MessageEvent)
def is_tome(self) -> bool:
return self.to_me
class FriendMessage(MessageEvent):
"""好友消息事件"""
sender: PrivateChatInfo
@overrides(MessageEvent)
def get_user_id(self) -> str:
return str(self.sender.id)
@overrides(MessageEvent)
def get_session_id(self) -> str:
return 'friend_' + self.get_user_id()
@overrides(MessageEvent)
def is_tome(self) -> bool:
return True
class TempMessage(MessageEvent):
"""临时会话消息事件"""
sender: GroupChatInfo
@overrides(MessageEvent)
def get_session_id(self) -> str:
return f'temp_{self.sender.group.id}_' + self.get_user_id()
@overrides(MessageEvent)
def is_tome(self) -> bool:
return True
| from datetime import datetime
from typing import Any, Optional
from pydantic import BaseModel, Field
from nonebot.typing import overrides
from ..message import MessageChain
from .base import Event, GroupChatInfo, PrivateChatInfo
class MessageSource(BaseModel):
id: int
time: datetime
class MessageEvent(Event):
"""消息事件基类"""
message_chain: MessageChain = Field(alias='messageChain')
source: Optional[MessageSource] = None
sender: Any
@overrides(Event)
def get_message(self) -> MessageChain:
return self.message_chain
@overrides(Event)
def get_plaintext(self) -> str:
return self.message_chain.extract_plain_text()
@overrides(Event)
def get_user_id(self) -> str:
raise NotImplementedError
@overrides(Event)
def get_session_id(self) -> str:
raise NotImplementedError
class GroupMessage(MessageEvent):
"""群消息事件"""
sender: GroupChatInfo
to_me: bool = False
@overrides(MessageEvent)
def get_session_id(self) -> str:
return f'group_{self.sender.group.id}_' + self.get_user_id()
@overrides(MessageEvent)
def get_user_id(self) -> str:
return str(self.sender.id)
@overrides(MessageEvent)
def is_tome(self) -> bool:
return self.to_me
class FriendMessage(MessageEvent):
"""好友消息事件"""
sender: PrivateChatInfo
@overrides(MessageEvent)
def get_user_id(self) -> str:
return str(self.sender.id)
@overrides(MessageEvent)
def get_session_id(self) -> str:
return 'friend_' + self.get_user_id()
@overrides(MessageEvent)
def is_tome(self) -> bool:
return True
class TempMessage(MessageEvent):
"""临时会话消息事件"""
sender: GroupChatInfo
@overrides(MessageEvent)
def get_session_id(self) -> str:
return f'temp_{self.sender.group.id}_' + self.get_user_id()
@overrides(MessageEvent)
def is_tome(self) -> bool:
return True
| zh | 0.987352 | 消息事件基类 群消息事件 好友消息事件 临时会话消息事件 | 2.351241 | 2 |
imdb_bidirectional_lstm.py | hc-super66/Word2Vec-Learning | 0 | 6630205 | # max len = 56
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import numpy as np
import pandas as pd
from keras import Sequential
from keras.layers import Dense, Dropout, Embedding, GRU, Bidirectional
from keras.models import Model
from keras.preprocessing import sequence
from keras.utils import np_utils, plot_model
from matplotlib import pyplot as plt
import pickle
# maxlen = 56
batch_size = 100
nb_epoch = 10
hidden_dim = 120
kernel_size = 3
nb_filter = 60
test = pd.read_csv("corpus/imdb/testData.tsv", header=0,
delimiter="\t", quoting=3)
def get_idx_from_sent(sent, word_idx_map):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = []
words = sent.split()
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
else:
x.append(1)
return x
def make_idx_data(revs, word_idx_map, maxlen=60):
"""
Transforms sentences into a 2-d matrix.
"""
X_train, X_test, X_dev, y_train, y_dev = [], [], [], [], []
for rev in revs:
sent = get_idx_from_sent(rev['text'], word_idx_map)
y = rev['y']
if rev['split'] == 1:
X_train.append(sent)
y_train.append(y)
elif rev['split'] == 0:
X_dev.append(sent)
y_dev.append(y)
elif rev['split'] == -1:
X_test.append(sent)
X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)
X_dev = sequence.pad_sequences(np.array(X_dev), maxlen=maxlen)
X_test = sequence.pad_sequences(np.array(X_test), maxlen=maxlen)
# X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen)
y_train = np_utils.to_categorical(np.array(y_train))
y_dev = np_utils.to_categorical(np.array(y_dev))
# y_valid = np.array(y_valid)
return [X_train, X_test, X_dev, y_train, y_dev]
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info(r"running %s" % ''.join(sys.argv))
logging.info('loading data...')
# pickle_file = os.path.join('pickle', 'vader_movie_reviews_glove.pickle3')
# pickle_file = sys.argv[1]
pickle_file = os.path.join('pickle', 'imdb_train_val_test.pickle3')
revs, W, word_idx_map, vocab, maxlen = pickle.load(open(pickle_file, 'rb'))
logging.info('data loaded!')
X_train, X_test, X_dev, y_train, y_dev = make_idx_data(revs, word_idx_map, maxlen=maxlen)
n_train_sample = X_train.shape[0]
logging.info("n_train_sample [n_train_sample]: %d" % n_train_sample)
n_test_sample = X_test.shape[0]
logging.info("n_test_sample [n_train_sample]: %d" % n_test_sample)
len_sentence = X_train.shape[1] # 200
logging.info("len_sentence [len_sentence]: %d" % len_sentence)
max_features = W.shape[0]
logging.info("num of word vector [max_features]: %d" % max_features)
num_features = W.shape[1] # 400
logging.info("dimension of word vector [num_features]: %d" % num_features)
# Keras Model
# this is the placeholder tensor for the input sequence
model = Sequential()
model.add(Embedding(input_dim=max_features, output_dim=num_features, input_length=maxlen, mask_zero=True, weights=[W],
trainable=False))
# embedded = Embedding(input_dim=max_features, output_dim=num_features, input_length=maxlen, weights=[W], trainable=False) (sequence)
model.add(Dropout(0.25))
# bi-directional LSTM
# hidden = Bidirectional(LSTM(hidden_dim//2, recurrent_dropout=0.25)) (embedded)
# bi-directional GRU
model.add(Bidirectional(GRU(hidden_dim // 2, recurrent_dropout=0.25)))
# 双向循环神经网络 (Bidirectional Recurrent Neural Network, Bi-RNN) 由两层循环神经网络组成,
# 它们的输入相同, 只是信息传递的方向不同
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
print('plot model...')
plot_model(model, to_file='imdb_bidirectional_lstm.png', show_shapes=True, show_layer_names=True) # 网络可视化
history = model.fit(X_train, y_train, validation_data=[X_dev, y_dev], batch_size=batch_size, epochs=nb_epoch, verbose=2)
y_pred = model.predict(X_test, batch_size=batch_size)
y_pred = np.argmax(y_pred, axis=1)
result_output = pd.DataFrame(data={"id": test["id"], "sentiment": y_pred})
# Use pandas to write the comma-separated output file
# result_output.to_csv("./result/bi-lstm.csv", index=False, quoting=3)
result_output.to_csv("./result/bi-lstm.csv", index=False, quoting=3)
score, acc = model.evaluate(X_dev, y_dev, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
| # max len = 56
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import numpy as np
import pandas as pd
from keras import Sequential
from keras.layers import Dense, Dropout, Embedding, GRU, Bidirectional
from keras.models import Model
from keras.preprocessing import sequence
from keras.utils import np_utils, plot_model
from matplotlib import pyplot as plt
import pickle
# maxlen = 56
batch_size = 100
nb_epoch = 10
hidden_dim = 120
kernel_size = 3
nb_filter = 60
test = pd.read_csv("corpus/imdb/testData.tsv", header=0,
delimiter="\t", quoting=3)
def get_idx_from_sent(sent, word_idx_map):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = []
words = sent.split()
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
else:
x.append(1)
return x
def make_idx_data(revs, word_idx_map, maxlen=60):
"""
Transforms sentences into a 2-d matrix.
"""
X_train, X_test, X_dev, y_train, y_dev = [], [], [], [], []
for rev in revs:
sent = get_idx_from_sent(rev['text'], word_idx_map)
y = rev['y']
if rev['split'] == 1:
X_train.append(sent)
y_train.append(y)
elif rev['split'] == 0:
X_dev.append(sent)
y_dev.append(y)
elif rev['split'] == -1:
X_test.append(sent)
X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)
X_dev = sequence.pad_sequences(np.array(X_dev), maxlen=maxlen)
X_test = sequence.pad_sequences(np.array(X_test), maxlen=maxlen)
# X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen)
y_train = np_utils.to_categorical(np.array(y_train))
y_dev = np_utils.to_categorical(np.array(y_dev))
# y_valid = np.array(y_valid)
return [X_train, X_test, X_dev, y_train, y_dev]
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info(r"running %s" % ''.join(sys.argv))
logging.info('loading data...')
# pickle_file = os.path.join('pickle', 'vader_movie_reviews_glove.pickle3')
# pickle_file = sys.argv[1]
pickle_file = os.path.join('pickle', 'imdb_train_val_test.pickle3')
revs, W, word_idx_map, vocab, maxlen = pickle.load(open(pickle_file, 'rb'))
logging.info('data loaded!')
X_train, X_test, X_dev, y_train, y_dev = make_idx_data(revs, word_idx_map, maxlen=maxlen)
n_train_sample = X_train.shape[0]
logging.info("n_train_sample [n_train_sample]: %d" % n_train_sample)
n_test_sample = X_test.shape[0]
logging.info("n_test_sample [n_train_sample]: %d" % n_test_sample)
len_sentence = X_train.shape[1] # 200
logging.info("len_sentence [len_sentence]: %d" % len_sentence)
max_features = W.shape[0]
logging.info("num of word vector [max_features]: %d" % max_features)
num_features = W.shape[1] # 400
logging.info("dimension of word vector [num_features]: %d" % num_features)
# Keras Model
# this is the placeholder tensor for the input sequence
model = Sequential()
model.add(Embedding(input_dim=max_features, output_dim=num_features, input_length=maxlen, mask_zero=True, weights=[W],
trainable=False))
# embedded = Embedding(input_dim=max_features, output_dim=num_features, input_length=maxlen, weights=[W], trainable=False) (sequence)
model.add(Dropout(0.25))
# bi-directional LSTM
# hidden = Bidirectional(LSTM(hidden_dim//2, recurrent_dropout=0.25)) (embedded)
# bi-directional GRU
model.add(Bidirectional(GRU(hidden_dim // 2, recurrent_dropout=0.25)))
# 双向循环神经网络 (Bidirectional Recurrent Neural Network, Bi-RNN) 由两层循环神经网络组成,
# 它们的输入相同, 只是信息传递的方向不同
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
print('plot model...')
plot_model(model, to_file='imdb_bidirectional_lstm.png', show_shapes=True, show_layer_names=True) # 网络可视化
history = model.fit(X_train, y_train, validation_data=[X_dev, y_dev], batch_size=batch_size, epochs=nb_epoch, verbose=2)
y_pred = model.predict(X_test, batch_size=batch_size)
y_pred = np.argmax(y_pred, axis=1)
result_output = pd.DataFrame(data={"id": test["id"], "sentiment": y_pred})
# Use pandas to write the comma-separated output file
# result_output.to_csv("./result/bi-lstm.csv", index=False, quoting=3)
result_output.to_csv("./result/bi-lstm.csv", index=False, quoting=3)
score, acc = model.evaluate(X_dev, y_dev, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
| en | 0.590384 | # max len = 56 # maxlen = 56 Transforms sentence into a list of indices. Pad with zeroes. Transforms sentences into a 2-d matrix. # X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen) # y_valid = np.array(y_valid) # pickle_file = os.path.join('pickle', 'vader_movie_reviews_glove.pickle3') # pickle_file = sys.argv[1] # 200 # 400 # Keras Model # this is the placeholder tensor for the input sequence # embedded = Embedding(input_dim=max_features, output_dim=num_features, input_length=maxlen, weights=[W], trainable=False) (sequence) # bi-directional LSTM # hidden = Bidirectional(LSTM(hidden_dim//2, recurrent_dropout=0.25)) (embedded) # bi-directional GRU # 双向循环神经网络 (Bidirectional Recurrent Neural Network, Bi-RNN) 由两层循环神经网络组成, # 它们的输入相同, 只是信息传递的方向不同 # 网络可视化 # Use pandas to write the comma-separated output file # result_output.to_csv("./result/bi-lstm.csv", index=False, quoting=3) # 绘制训练 & 验证的损失值 | 2.689999 | 3 |
tests.py | Mahalinoro/game-of-life | 3 | 6630206 | import unittest
from Anime import Anime
from AnimeScraper import AnimeScraper
from Storage import Storage
from User import User, UserStorage
from password_hash import verify_password
from Recommender import Recommender
class TestUnitCase(unittest.TestCase):
def test_anime_object(self):
"""Anime Class Testing"""
anime1 = Anime('Naruto', 'TV', 'Shounen', 600, 24, 2006, "FALSE", 4, 8.7, 1258970, 4, 6, 1582963, 12574, 'Ninja Anime')
self.assertEqual(anime1.getTitle(), 'Naruto')
self.assertEqual(anime1.getType(), 'TV')
self.assertEqual(anime1.getEpisodeNumber(), 600)
self.assertEqual(anime1.getMembers(), 1582963)
self.assertEqual(anime1.getAiringStatus(), 'no')
def test_anime_scraper(self):
"""Anime Scraper Class Testing"""
url = AnimeScraper()
data1 = url.fetchAnime('https://myanimelist.net/topanime.php?type=airing', 'airing')
data2 = url.fetchAnime('https://myanimelist.net/topanime.php?type=upcoming', 'upcoming')
self.assertEqual(data1.__len__(), 50)
self.assertEqual(data2.__len__(), 50)
def test_storage_object(self):
"""Storage Class Testing"""
s = Storage()
s.add('Naruto', 'TV', 'Shounen', 600, 24, 2006, "FALSE", 4, 8.7, 1258970, 4, 6, 1582963, 12574, 'Ninja Anime')
self.assertEqual(s.get('Naruto'), 'Naruto')
self.assertEqual(s.contains('Naruto'), True)
s.delete('Naruto')
self.assertEqual(s.contains('Naruto'), False)
def test_user_object(self):
"""User Class Testing"""
user1 = User('hello', 'helloworld', ['Naruto', 'Kimetsu no Yaiba'])
self.assertEqual(user1.getUsername(), 'hello')
self.assertEqual(user1.getWatchedList(), ['Naruto', 'Kimetsu no Yaiba'])
def test_user_storage_object(self):
"""User Storage Class Testing"""
s = UserStorage()
user1 = User('hello', 'helloworld', ['Naruto', 'Kimetsu no Yaiba'])
s.addUser(user1.getUsername(), user1.password, user1.getWatchedList())
self.assertEqual('hello' in s.user_storage.keys(), True)
self.assertEqual(verify_password(s.user_storage['hello']['password'], '<PASSWORD>'), True)
s.removeUser('hello')
self.assertEqual('hello' in s.user_storage.keys(), False)
def test_parse_data(self):
"""Recommender Class Testing"""
r = Recommender()
s = Storage()
r.parseData(s, 'DataSource.csv')
self.assertEqual(s != {}, True)
if __name__ == "__main__":
unittest.main() | import unittest
from Anime import Anime
from AnimeScraper import AnimeScraper
from Storage import Storage
from User import User, UserStorage
from password_hash import verify_password
from Recommender import Recommender
class TestUnitCase(unittest.TestCase):
def test_anime_object(self):
"""Anime Class Testing"""
anime1 = Anime('Naruto', 'TV', 'Shounen', 600, 24, 2006, "FALSE", 4, 8.7, 1258970, 4, 6, 1582963, 12574, 'Ninja Anime')
self.assertEqual(anime1.getTitle(), 'Naruto')
self.assertEqual(anime1.getType(), 'TV')
self.assertEqual(anime1.getEpisodeNumber(), 600)
self.assertEqual(anime1.getMembers(), 1582963)
self.assertEqual(anime1.getAiringStatus(), 'no')
def test_anime_scraper(self):
"""Anime Scraper Class Testing"""
url = AnimeScraper()
data1 = url.fetchAnime('https://myanimelist.net/topanime.php?type=airing', 'airing')
data2 = url.fetchAnime('https://myanimelist.net/topanime.php?type=upcoming', 'upcoming')
self.assertEqual(data1.__len__(), 50)
self.assertEqual(data2.__len__(), 50)
def test_storage_object(self):
"""Storage Class Testing"""
s = Storage()
s.add('Naruto', 'TV', 'Shounen', 600, 24, 2006, "FALSE", 4, 8.7, 1258970, 4, 6, 1582963, 12574, 'Ninja Anime')
self.assertEqual(s.get('Naruto'), 'Naruto')
self.assertEqual(s.contains('Naruto'), True)
s.delete('Naruto')
self.assertEqual(s.contains('Naruto'), False)
def test_user_object(self):
"""User Class Testing"""
user1 = User('hello', 'helloworld', ['Naruto', 'Kimetsu no Yaiba'])
self.assertEqual(user1.getUsername(), 'hello')
self.assertEqual(user1.getWatchedList(), ['Naruto', 'Kimetsu no Yaiba'])
def test_user_storage_object(self):
"""User Storage Class Testing"""
s = UserStorage()
user1 = User('hello', 'helloworld', ['Naruto', 'Kimetsu no Yaiba'])
s.addUser(user1.getUsername(), user1.password, user1.getWatchedList())
self.assertEqual('hello' in s.user_storage.keys(), True)
self.assertEqual(verify_password(s.user_storage['hello']['password'], '<PASSWORD>'), True)
s.removeUser('hello')
self.assertEqual('hello' in s.user_storage.keys(), False)
def test_parse_data(self):
"""Recommender Class Testing"""
r = Recommender()
s = Storage()
r.parseData(s, 'DataSource.csv')
self.assertEqual(s != {}, True)
if __name__ == "__main__":
unittest.main() | en | 0.752156 | Anime Class Testing Anime Scraper Class Testing Storage Class Testing User Class Testing User Storage Class Testing Recommender Class Testing | 2.898906 | 3 |
generated_input.py | MaxLing/food-101-resnet | 3 | 6630207 | <reponame>MaxLing/food-101-resnet<gh_stars>1-10
'''Visualization of the filters of resnet50, via gradient ascent in input space.
This script can run on CPU in a few minutes.
'''
from __future__ import print_function
from scipy.misc import imsave
import numpy as np
import time
from keras import models
from keras import backend as K
from keras.applications import resnet50
# dimensions of the generated pictures for each filter/class
img_width = 224
img_height = 224
# the name of the layer we want to visualize - last layer
layer_name = 'predictions'
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
K.set_learning_phase(1)
model = models.load_model('model/resnet50_9_norm_rmspop.h5')
# model = models.load_model('resnet50_notop.h5')
print('Model loaded.')
model.summary()
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
kept_filters = []
for filter_index in range(101):
# we only scan through the first 25 filters (actually class)
print('Processing filter %d' % filter_index)
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
# layer_output = layer_dict[layer_name].output
# loss = K.mean(layer_output[:, :, :, filter_index])
loss = K.mean(model.output[:, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization a tensor by its L2 norm
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1
# we start from a gray image with some random noise
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 30 steps
for i in range(30):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
# we will stich the first 25 filters on a 5 x 5 grid.
n = 5
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 25 filters.
# kept_filters.sort(key=lambda x: x[1], reverse=True)
# kept_filters = kept_filters[:n * n]
# # build a black picture with enough space for
# # our 5 x 5 filters of size 224 x 224, with a 5px margin in between
# margin = 5
# width = n * img_width + (n - 1) * margin
# height = n * img_height + (n - 1) * margin
# stitched_filters = np.zeros((width, height, 3))
#
# # fill the picture with our saved filters
# for i in range(n):
# for j in range(n):
# img, loss = kept_filters[i * n + j]
# stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
# (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
#
# # save the result to disk
# imsave('stitched_filters_%dx%d.png' % (n, n), stitched_filters)
with open('ETHZ-FOOD-101/food-101/meta/classes.txt') as f:
classes = f.read().splitlines()
for i in range(101):
img, loss = kept_filters[i]
cla = classes[i]
imsave('generative/%s.png' % (cla), img) | '''Visualization of the filters of resnet50, via gradient ascent in input space.
This script can run on CPU in a few minutes.
'''
from __future__ import print_function
from scipy.misc import imsave
import numpy as np
import time
from keras import models
from keras import backend as K
from keras.applications import resnet50
# dimensions of the generated pictures for each filter/class
img_width = 224
img_height = 224
# the name of the layer we want to visualize - last layer
layer_name = 'predictions'
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
K.set_learning_phase(1)
model = models.load_model('model/resnet50_9_norm_rmspop.h5')
# model = models.load_model('resnet50_notop.h5')
print('Model loaded.')
model.summary()
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
kept_filters = []
for filter_index in range(101):
# we only scan through the first 25 filters (actually class)
print('Processing filter %d' % filter_index)
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
# layer_output = layer_dict[layer_name].output
# loss = K.mean(layer_output[:, :, :, filter_index])
loss = K.mean(model.output[:, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization a tensor by its L2 norm
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1
# we start from a gray image with some random noise
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 30 steps
for i in range(30):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
# we will stich the first 25 filters on a 5 x 5 grid.
n = 5
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 25 filters.
# kept_filters.sort(key=lambda x: x[1], reverse=True)
# kept_filters = kept_filters[:n * n]
# # build a black picture with enough space for
# # our 5 x 5 filters of size 224 x 224, with a 5px margin in between
# margin = 5
# width = n * img_width + (n - 1) * margin
# height = n * img_height + (n - 1) * margin
# stitched_filters = np.zeros((width, height, 3))
#
# # fill the picture with our saved filters
# for i in range(n):
# for j in range(n):
# img, loss = kept_filters[i * n + j]
# stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
# (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
#
# # save the result to disk
# imsave('stitched_filters_%dx%d.png' % (n, n), stitched_filters)
with open('ETHZ-FOOD-101/food-101/meta/classes.txt') as f:
classes = f.read().splitlines()
for i in range(101):
img, loss = kept_filters[i]
cla = classes[i]
imsave('generative/%s.png' % (cla), img) | en | 0.722913 | Visualization of the filters of resnet50, via gradient ascent in input space. This script can run on CPU in a few minutes. # dimensions of the generated pictures for each filter/class # the name of the layer we want to visualize - last layer # util function to convert a tensor into a valid image # normalize tensor: center on 0., ensure std is 0.1 # clip to [0, 1] # convert to RGB array # model = models.load_model('resnet50_notop.h5') # this is the placeholder for the input images # get the symbolic outputs of each "key" layer (we gave them unique names). # we only scan through the first 25 filters (actually class) # we build a loss function that maximizes the activation # of the nth filter of the layer considered # layer_output = layer_dict[layer_name].output # loss = K.mean(layer_output[:, :, :, filter_index]) # we compute the gradient of the input picture wrt this loss # normalization a tensor by its L2 norm # this function returns the loss and grads given the input picture # step size for gradient ascent # we start from a gray image with some random noise # we run gradient ascent for 30 steps # some filters get stuck to 0, we can skip them # decode the resulting input image # we will stich the first 25 filters on a 5 x 5 grid. # the filters that have the highest loss are assumed to be better-looking. # we will only keep the top 25 filters. # kept_filters.sort(key=lambda x: x[1], reverse=True) # kept_filters = kept_filters[:n * n] # # build a black picture with enough space for # # our 5 x 5 filters of size 224 x 224, with a 5px margin in between # margin = 5 # width = n * img_width + (n - 1) * margin # height = n * img_height + (n - 1) * margin # stitched_filters = np.zeros((width, height, 3)) # # # fill the picture with our saved filters # for i in range(n): # for j in range(n): # img, loss = kept_filters[i * n + j] # stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width, # (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img # # # save the result to disk # imsave('stitched_filters_%dx%d.png' % (n, n), stitched_filters) | 3.212098 | 3 |
09_Implementing_Collections/test/test_sorted_set.py | MANOJPATRA1991/Python-Beyond-the-Basics | 0 | 6630208 | import unittest
from collections.abc import (Container, Sized, Iterable, Sequence, Set)
from .src.sorted_set import SortedSet
class TestConstruction(unittest.TestCase):
def test_empty(self):
s = SortedSet([])
def test_from_sequence(self):
s = SortedSet([7, 8, 3, 1])
def test_with_duplicates(self):
s = SortedSet([8, 8, 8])
def test_from_iterable(self):
def gen6842():
yield 6
yield 8
yield 4
yield 2
g = gen6842()
s = SortedSet(g)
def test_default_empty(self):
s = SortedSet()
class TestContainerProtocol(unittest.TestCase):
def setUp(self):
self.s = SortedSet([6, 7, 3, 9])
def test_positive_contained(self):
self.assertTrue(6 in self.s)
def test_negative_contained(self):
self.assertFalse(2 in self.s)
def test_positive_not_contained(self):
self.assertTrue(5 not in self.s)
def test_negative_not_contained(self):
self.assertFalse(9 not in self.s)
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Container))
class TestSizedProtocol(unittest.TestCase):
def test_empty(self):
s = SortedSet()
self.assertEqual(len(s), 0)
def test_one(self):
s = SortedSet([42])
self.assertEqual(len(s), 1)
def test_ten(self):
s = SortedSet(range(10))
self.assertEqual(len(s), 10)
def test_with_duplicates(self):
s = SortedSet([5, 5, 5])
self.assertEqual(len(s), 1)
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Sized))
class TestIterableProtocol(unittest.TestCase):
def setUp(self):
self.s = SortedSet([7, 2, 1, 1, 9])
def test_iter(self):
i = iter(self.s)
self.assertEqual(next(i), 1)
self.assertEqual(next(i), 2)
self.assertEqual(next(i), 7)
self.assertEqual(next(i), 9)
self.assertRaises(StopIteration, lambda: next(i))
def test_for_loop(self):
index = 0
expected = [1, 2, 7, 9]
for item in self.s:
self.assertEqual(item, expected[index])
index += 1
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Iterable))
class TestSequenceProtocol(unittest.TestCase):
def setUp(self):
self.s = SortedSet([1, 4, 9, 13, 15])
def test_index_zero(self):
self.assertEqual(self.s[0], 1)
def test_index_four(self):
self.assertEqual(self.s[4], 15)
def test_index_one_beyond_the_end(self):
with self.assertRaises(IndexError):
self.s[5]
def test_index_minus_one(self):
self.assertEqual(self.s[-1], 15)
def test_index_minus_five(self):
self.assertEqual(self.s[-5], 1)
def test_index_one_before_the_beginning(self):
with self.assertRaises(IndexError):
self.s[-6]
def test_slice_from_start(self):
self.assertEqual(self.s[:3], SortedSet([1, 4, 9]))
def test_slice_to_end(self):
self.assertEqual(self.s[2:], SortedSet([9, 13, 15]))
def test_slice_empty(self):
self.assertEqual(self.s[10:], SortedSet())
def test_slice_arbitrary(self):
self.assertEqual(self.s[2:4], SortedSet([9, 13]))
def test_full_slice(self):
self.assertEqual(self.s[:], self.s)
def test_reversed(self):
s = SortedSet([1, 3, 5, 7])
r = reversed(s)
self.assertEqual(next(r), 7)
self.assertEqual(next(r), 5)
self.assertEqual(next(r), 3)
self.assertEqual(next(r), 1)
with self.assertRaises(StopIteration):
next(r)
def test_index_positive(self):
s = SortedSet([1, 5, 8, 9])
self.assertEqual(s.index(8), 2)
def test_index_negative(self):
s = SortedSet([1, 5, 8, 9])
with self.assertRaises(ValueError):
s.index(15)
def test_count_zero(self):
s = SortedSet([1, 5, 8, 9])
self.assertEqual(s.count(11), 0)
def test_count_one(self):
s = SortedSet([1, 5, 8, 9])
self.assertEqual(s.count(5), 1)
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Sequence))
def test_concatenate_disjoint(self):
s = SortedSet([1, 2, 3])
t = SortedSet([4, 5, 6])
self.assertEqual(s + t, SortedSet([1, 2, 3, 4, 5, 6]))
def test_concatenate_equal(self):
s = SortedSet([2, 4, 6])
self.assertEqual(s + s, s)
def test_concatenate_intersecting(self):
s = SortedSet([1, 2, 3])
t = SortedSet([3, 4, 5])
self.assertEqual(s + t, SortedSet([1, 2, 3, 4, 5]))
def test_repetition_zero_left(self):
s = SortedSet([4, 5, 6])
self.assertEqual(0 * s, SortedSet())
def test_repetition_nonzero_left(self):
s = SortedSet([4, 5, 6])
self.assertEqual(100 * s, s)
def test_repetition_zero_right(self):
s = SortedSet([4, 5, 6])
self.assertEqual(s * 0, SortedSet())
def test_repetition_nonzero_right(self):
s = SortedSet([4, 5, 6])
self.assertEqual(s * 100, s)
class TestReprProtocol(unittest.TestCase):
# Python equality comparisons which are inherited from the ultimate base class object
# are for reference equality rather than value equality or equivalence
def test_repr_empty(self):
s = SortedSet()
self.assertEqual(repr(s), 'SortedSet()')
def test_repr_some(self):
s = SortedSet([42, 40, 19])
self.assertEqual(repr(s), 'SortedSet([19, 40, 42])')
class TestEqualityProtocol(unittest.TestCase):
def test_positive_equal(self):
self.assertTrue(SortedSet([4, 5, 6]) == SortedSet([4, 5, 6]))
def test_negative_equal(self):
self.assertFalse(SortedSet([4, 5, 6]) == SortedSet([1, 2, 3]))
def test_type_mismatch(self):
self.assertFalse(SortedSet([4, 5, 6]) == [4, 5, 6])
def test_identical(self):
s = SortedSet([10, 11, 12])
self.assertTrue(s == s)
class TestInequalityProtocol(unittest.TestCase):
def test_positive_unequal(self):
self.assertTrue(SortedSet([4, 5, 6]) != SortedSet([1, 2, 3]))
def test_negative_unequal(self):
self.assertFalse(SortedSet([4, 5, 6]) != SortedSet([6, 5, 4]))
def test_type_mismatch(self):
self.assertTrue(SortedSet([1, 2, 3]) != [1, 2, 3])
def test_identical(self):
s = SortedSet([10, 11, 12])
self.assertFalse(s != s)
class TestRelationalSetProtocol(unittest.TestCase):
def test_lt_positive(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertTrue(s < t)
def test_lt_negative(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2, 3})
self.assertFalse(s < t)
def test_le_lt_positive(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertTrue(s <= t)
def test_le_eq_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2, 3})
self.assertTrue(s <= t)
def test_le_negative(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2})
self.assertFalse(s <= t)
def test_gt_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2})
self.assertTrue(s >= t)
def test_gt_negative(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertFalse(s > t)
def test_ge_gt_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2})
self.assertTrue(s > t)
def test_ge_eq_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2, 3})
self.assertTrue(s >= t)
def test_ge_negative(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertFalse(s >= t)
class TestSetRelationalMethods(unittest.TestCase):
def test_issubset_proper_positive(self):
s = SortedSet({1, 2})
t = [1, 2, 3]
self.assertTrue(s.issubset(t))
def test_issubset_positive(self):
s = SortedSet({1, 2, 3})
t = [1, 2, 3]
self.assertTrue(s.issubset(t))
def test_issubset_negative(self):
s = SortedSet({1, 2, 3})
t = [1, 2]
self.assertFalse(s.issubset(t))
def test_issuperset_proper_positive(self):
s = SortedSet({1, 2, 3})
t = [1, 2]
self.assertTrue(s.issuperset(t))
def test_issuperset_positive(self):
s = SortedSet({1, 2, 3})
t = [1, 2, 3]
self.assertTrue(s.issuperset(t))
def test_issubset_negative(self):
s = SortedSet({1, 2})
t = [1, 2, 3]
self.assertFalse(s.issuperset(t))
class TestSetOperationsMethods(unittest.TestCase):
def test_intersection(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.intersection(t), SortedSet({2, 3}))
def test_union(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.union(t), SortedSet({1, 2, 3, 4}))
def test_symmetric_difference(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.symmetric_difference(t), SortedSet({1, 4}))
def test_difference(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.difference(t), SortedSet({1}))
def test_isdisjoint_positive(self):
s = SortedSet({1, 2, 3})
t = [4, 5, 6]
self.assertTrue(s.isdisjoint(t))
def test_isdisjoint_negative(self):
s = SortedSet({1, 2, 3})
t = [3, 4, 5]
self.assertFalse(s.isdisjoint(t))
class TestSetProtocol(unittest.TestCase):
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Set))
if __name__ == '__main__':
unittest.main()
| import unittest
from collections.abc import (Container, Sized, Iterable, Sequence, Set)
from .src.sorted_set import SortedSet
class TestConstruction(unittest.TestCase):
def test_empty(self):
s = SortedSet([])
def test_from_sequence(self):
s = SortedSet([7, 8, 3, 1])
def test_with_duplicates(self):
s = SortedSet([8, 8, 8])
def test_from_iterable(self):
def gen6842():
yield 6
yield 8
yield 4
yield 2
g = gen6842()
s = SortedSet(g)
def test_default_empty(self):
s = SortedSet()
class TestContainerProtocol(unittest.TestCase):
def setUp(self):
self.s = SortedSet([6, 7, 3, 9])
def test_positive_contained(self):
self.assertTrue(6 in self.s)
def test_negative_contained(self):
self.assertFalse(2 in self.s)
def test_positive_not_contained(self):
self.assertTrue(5 not in self.s)
def test_negative_not_contained(self):
self.assertFalse(9 not in self.s)
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Container))
class TestSizedProtocol(unittest.TestCase):
def test_empty(self):
s = SortedSet()
self.assertEqual(len(s), 0)
def test_one(self):
s = SortedSet([42])
self.assertEqual(len(s), 1)
def test_ten(self):
s = SortedSet(range(10))
self.assertEqual(len(s), 10)
def test_with_duplicates(self):
s = SortedSet([5, 5, 5])
self.assertEqual(len(s), 1)
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Sized))
class TestIterableProtocol(unittest.TestCase):
def setUp(self):
self.s = SortedSet([7, 2, 1, 1, 9])
def test_iter(self):
i = iter(self.s)
self.assertEqual(next(i), 1)
self.assertEqual(next(i), 2)
self.assertEqual(next(i), 7)
self.assertEqual(next(i), 9)
self.assertRaises(StopIteration, lambda: next(i))
def test_for_loop(self):
index = 0
expected = [1, 2, 7, 9]
for item in self.s:
self.assertEqual(item, expected[index])
index += 1
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Iterable))
class TestSequenceProtocol(unittest.TestCase):
def setUp(self):
self.s = SortedSet([1, 4, 9, 13, 15])
def test_index_zero(self):
self.assertEqual(self.s[0], 1)
def test_index_four(self):
self.assertEqual(self.s[4], 15)
def test_index_one_beyond_the_end(self):
with self.assertRaises(IndexError):
self.s[5]
def test_index_minus_one(self):
self.assertEqual(self.s[-1], 15)
def test_index_minus_five(self):
self.assertEqual(self.s[-5], 1)
def test_index_one_before_the_beginning(self):
with self.assertRaises(IndexError):
self.s[-6]
def test_slice_from_start(self):
self.assertEqual(self.s[:3], SortedSet([1, 4, 9]))
def test_slice_to_end(self):
self.assertEqual(self.s[2:], SortedSet([9, 13, 15]))
def test_slice_empty(self):
self.assertEqual(self.s[10:], SortedSet())
def test_slice_arbitrary(self):
self.assertEqual(self.s[2:4], SortedSet([9, 13]))
def test_full_slice(self):
self.assertEqual(self.s[:], self.s)
def test_reversed(self):
s = SortedSet([1, 3, 5, 7])
r = reversed(s)
self.assertEqual(next(r), 7)
self.assertEqual(next(r), 5)
self.assertEqual(next(r), 3)
self.assertEqual(next(r), 1)
with self.assertRaises(StopIteration):
next(r)
def test_index_positive(self):
s = SortedSet([1, 5, 8, 9])
self.assertEqual(s.index(8), 2)
def test_index_negative(self):
s = SortedSet([1, 5, 8, 9])
with self.assertRaises(ValueError):
s.index(15)
def test_count_zero(self):
s = SortedSet([1, 5, 8, 9])
self.assertEqual(s.count(11), 0)
def test_count_one(self):
s = SortedSet([1, 5, 8, 9])
self.assertEqual(s.count(5), 1)
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Sequence))
def test_concatenate_disjoint(self):
s = SortedSet([1, 2, 3])
t = SortedSet([4, 5, 6])
self.assertEqual(s + t, SortedSet([1, 2, 3, 4, 5, 6]))
def test_concatenate_equal(self):
s = SortedSet([2, 4, 6])
self.assertEqual(s + s, s)
def test_concatenate_intersecting(self):
s = SortedSet([1, 2, 3])
t = SortedSet([3, 4, 5])
self.assertEqual(s + t, SortedSet([1, 2, 3, 4, 5]))
def test_repetition_zero_left(self):
s = SortedSet([4, 5, 6])
self.assertEqual(0 * s, SortedSet())
def test_repetition_nonzero_left(self):
s = SortedSet([4, 5, 6])
self.assertEqual(100 * s, s)
def test_repetition_zero_right(self):
s = SortedSet([4, 5, 6])
self.assertEqual(s * 0, SortedSet())
def test_repetition_nonzero_right(self):
s = SortedSet([4, 5, 6])
self.assertEqual(s * 100, s)
class TestReprProtocol(unittest.TestCase):
# Python equality comparisons which are inherited from the ultimate base class object
# are for reference equality rather than value equality or equivalence
def test_repr_empty(self):
s = SortedSet()
self.assertEqual(repr(s), 'SortedSet()')
def test_repr_some(self):
s = SortedSet([42, 40, 19])
self.assertEqual(repr(s), 'SortedSet([19, 40, 42])')
class TestEqualityProtocol(unittest.TestCase):
def test_positive_equal(self):
self.assertTrue(SortedSet([4, 5, 6]) == SortedSet([4, 5, 6]))
def test_negative_equal(self):
self.assertFalse(SortedSet([4, 5, 6]) == SortedSet([1, 2, 3]))
def test_type_mismatch(self):
self.assertFalse(SortedSet([4, 5, 6]) == [4, 5, 6])
def test_identical(self):
s = SortedSet([10, 11, 12])
self.assertTrue(s == s)
class TestInequalityProtocol(unittest.TestCase):
def test_positive_unequal(self):
self.assertTrue(SortedSet([4, 5, 6]) != SortedSet([1, 2, 3]))
def test_negative_unequal(self):
self.assertFalse(SortedSet([4, 5, 6]) != SortedSet([6, 5, 4]))
def test_type_mismatch(self):
self.assertTrue(SortedSet([1, 2, 3]) != [1, 2, 3])
def test_identical(self):
s = SortedSet([10, 11, 12])
self.assertFalse(s != s)
class TestRelationalSetProtocol(unittest.TestCase):
def test_lt_positive(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertTrue(s < t)
def test_lt_negative(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2, 3})
self.assertFalse(s < t)
def test_le_lt_positive(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertTrue(s <= t)
def test_le_eq_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2, 3})
self.assertTrue(s <= t)
def test_le_negative(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2})
self.assertFalse(s <= t)
def test_gt_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2})
self.assertTrue(s >= t)
def test_gt_negative(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertFalse(s > t)
def test_ge_gt_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2})
self.assertTrue(s > t)
def test_ge_eq_positive(self):
s = SortedSet({1, 2, 3})
t = SortedSet({1, 2, 3})
self.assertTrue(s >= t)
def test_ge_negative(self):
s = SortedSet({1, 2})
t = SortedSet({1, 2, 3})
self.assertFalse(s >= t)
class TestSetRelationalMethods(unittest.TestCase):
def test_issubset_proper_positive(self):
s = SortedSet({1, 2})
t = [1, 2, 3]
self.assertTrue(s.issubset(t))
def test_issubset_positive(self):
s = SortedSet({1, 2, 3})
t = [1, 2, 3]
self.assertTrue(s.issubset(t))
def test_issubset_negative(self):
s = SortedSet({1, 2, 3})
t = [1, 2]
self.assertFalse(s.issubset(t))
def test_issuperset_proper_positive(self):
s = SortedSet({1, 2, 3})
t = [1, 2]
self.assertTrue(s.issuperset(t))
def test_issuperset_positive(self):
s = SortedSet({1, 2, 3})
t = [1, 2, 3]
self.assertTrue(s.issuperset(t))
def test_issubset_negative(self):
s = SortedSet({1, 2})
t = [1, 2, 3]
self.assertFalse(s.issuperset(t))
class TestSetOperationsMethods(unittest.TestCase):
def test_intersection(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.intersection(t), SortedSet({2, 3}))
def test_union(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.union(t), SortedSet({1, 2, 3, 4}))
def test_symmetric_difference(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.symmetric_difference(t), SortedSet({1, 4}))
def test_difference(self):
s = SortedSet({1, 2, 3})
t = [2, 3, 4]
self.assertEqual(s.difference(t), SortedSet({1}))
def test_isdisjoint_positive(self):
s = SortedSet({1, 2, 3})
t = [4, 5, 6]
self.assertTrue(s.isdisjoint(t))
def test_isdisjoint_negative(self):
s = SortedSet({1, 2, 3})
t = [3, 4, 5]
self.assertFalse(s.isdisjoint(t))
class TestSetProtocol(unittest.TestCase):
def test_protocol(self):
self.assertTrue(issubclass(SortedSet, Set))
if __name__ == '__main__':
unittest.main()
| en | 0.903311 | # Python equality comparisons which are inherited from the ultimate base class object # are for reference equality rather than value equality or equivalence | 3.16195 | 3 |
exer33.py | marvely/learn_python_the_hard_way | 0 | 6630209 | # ++++++++++++++ while loop +++++++++++++++++++++ #
i = 0
numbers = []
while i < 6:
print "At the top i is %d." % i
numbers.append(i)
i += 1
print "Number's now:", numbers
print "At the bottom i is %d." % i
print "The numbers: "
for num in numbers:
print num
| # ++++++++++++++ while loop +++++++++++++++++++++ #
i = 0
numbers = []
while i < 6:
print "At the top i is %d." % i
numbers.append(i)
i += 1
print "Number's now:", numbers
print "At the bottom i is %d." % i
print "The numbers: "
for num in numbers:
print num
| en | 0.566343 | # ++++++++++++++ while loop +++++++++++++++++++++ # | 4.118906 | 4 |
Practice/AllDomains/Languages/Python/Strings/Mutations.py | DHS009/HackerRankSolutions | 15 | 6630210 | <reponame>DHS009/HackerRankSolutions
#/* author:@shivkrthakur */
# Enter your code here. Read input from STDIN. Print output to STDOUT
inputstr1 = raw_input().strip();
inputstr2 = raw_input().strip().split();
index = int(inputstr2[0])
outputstr = inputstr1[0:index] + inputstr2[1] + inputstr1[index+1:len(inputstr1)]
print outputstr
#print inputstr1[index+1:len(inputstr1)]
#print("".join(strlist))
| #/* author:@shivkrthakur */
# Enter your code here. Read input from STDIN. Print output to STDOUT
inputstr1 = raw_input().strip();
inputstr2 = raw_input().strip().split();
index = int(inputstr2[0])
outputstr = inputstr1[0:index] + inputstr2[1] + inputstr1[index+1:len(inputstr1)]
print outputstr
#print inputstr1[index+1:len(inputstr1)]
#print("".join(strlist)) | en | 0.37396 | #/* author:@shivkrthakur */ # Enter your code here. Read input from STDIN. Print output to STDOUT #print inputstr1[index+1:len(inputstr1)] #print("".join(strlist)) | 3.626806 | 4 |
src/snowflake/connector/test_util.py | 666Chao666/snowflake-connector-python | 0 | 6630211 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import logging
import os
from .compat import IS_LINUX
RUNNING_ON_JENKINS = os.getenv('JENKINS_HOME') is not None
REGRESSION_TEST_LOG_DIR = os.getenv('CLIENT_LOG_DIR_PATH_DOCKER', '/tmp')
ENABLE_TELEMETRY_LOG = RUNNING_ON_JENKINS and IS_LINUX
rt_plain_logger = None
if ENABLE_TELEMETRY_LOG:
rt_plain_logger = logging.getLogger('regression.test.plain.logger')
rt_plain_logger.setLevel(logging.DEBUG)
ch = logging.FileHandler(os.path.join(REGRESSION_TEST_LOG_DIR, 'snowflake_ssm_rt_telemetry.log'))
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s - %(message)s'))
rt_plain_logger.addHandler(ch)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import logging
import os
from .compat import IS_LINUX
RUNNING_ON_JENKINS = os.getenv('JENKINS_HOME') is not None
REGRESSION_TEST_LOG_DIR = os.getenv('CLIENT_LOG_DIR_PATH_DOCKER', '/tmp')
ENABLE_TELEMETRY_LOG = RUNNING_ON_JENKINS and IS_LINUX
rt_plain_logger = None
if ENABLE_TELEMETRY_LOG:
rt_plain_logger = logging.getLogger('regression.test.plain.logger')
rt_plain_logger.setLevel(logging.DEBUG)
ch = logging.FileHandler(os.path.join(REGRESSION_TEST_LOG_DIR, 'snowflake_ssm_rt_telemetry.log'))
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s - %(message)s'))
rt_plain_logger.addHandler(ch)
| en | 0.585445 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved. # | 1.784744 | 2 |
HPOBenchExperimentUtils/utils/dragonfly_utils.py | PhMueller/TrajectoryParser | 0 | 6630212 | from math import exp, log, floor
from typing import List, Dict, Tuple, Union, Callable
from pathlib import Path
from argparse import Namespace
import logging
import os, uuid, sys
import json
_log = logging.getLogger(__name__)
# -------------------------------Begin code adapted directly from the dragonfly repo------------------------------------
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import Hyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter, \
CategoricalHyperparameter, OrdinalHyperparameter, Constant
from dragonfly.exd.exd_utils import get_unique_list_of_option_args
from dragonfly.utils.option_handler import get_option_specs
# Get options
from dragonfly.opt.ga_optimiser import ga_opt_args
from dragonfly.opt.gp_bandit import get_all_euc_gp_bandit_args, \
get_all_cp_gp_bandit_args, get_all_mf_euc_gp_bandit_args, \
get_all_mf_cp_gp_bandit_args
from dragonfly.opt.random_optimiser import euclidean_random_optimiser_args, \
mf_euclidean_random_optimiser_args, \
cp_random_optimiser_args, mf_cp_random_optimiser_args
from dragonfly.opt.multiobjective_gp_bandit import get_all_euc_moo_gp_bandit_args, \
get_all_cp_moo_gp_bandit_args
from dragonfly.opt.random_multiobjective_optimiser import \
euclidean_random_multiobjective_optimiser_args, \
cp_random_multiobjective_optimiser_args
from dragonfly.utils.option_handler import load_options
_dragonfly_args = [
# get_option_specs('config', False, None, 'Path to the json or pb config file. '),
# get_option_specs('options', False, None, 'Path to the options file. '),
get_option_specs('max_or_min', False, 'max', 'Whether to maximise or minimise. '),
get_option_specs('max_capital', False, -1.0,
'Maximum capital (available budget) to be used in the experiment. '),
get_option_specs('capital_type', False, 'return_value',
'Maximum capital (available budget) to be used in the experiment. '),
get_option_specs('is_multi_objective', False, 0,
'If True, will treat it as a multiobjective optimisation problem. '),
get_option_specs('opt_method', False, 'bo',
('Optimisation method. Default is bo. This should be one of bo, ga, ea, direct, ' +
' pdoo, or rand, but not all methods apply to all problems.')),
get_option_specs('report_progress', False, 'default',
('How to report progress. Should be one of "default" (prints to stdout), ' +
'"silent" (no reporting), or a filename (writes to file).')),
]
def _get_command_line_args():
""" Returns all arguments for the command line. """
ret = _dragonfly_args + \
ga_opt_args + \
euclidean_random_optimiser_args + cp_random_optimiser_args + \
mf_euclidean_random_optimiser_args + mf_cp_random_optimiser_args + \
get_all_euc_gp_bandit_args() + get_all_cp_gp_bandit_args() + \
get_all_mf_euc_gp_bandit_args() + get_all_mf_cp_gp_bandit_args() + \
euclidean_random_multiobjective_optimiser_args + \
cp_random_multiobjective_optimiser_args + \
get_all_euc_moo_gp_bandit_args() + get_all_cp_moo_gp_bandit_args()
return get_unique_list_of_option_args(ret)
# ---------------------------------End code adapted directly from the dragonfly repo------------------------------------
from dragonfly.parse.config_parser import load_parameters
from dragonfly.exd.cp_domain_utils import load_config
def load_dragonfly_options(hpoexp_settings: Dict, config: Dict) -> Tuple[Namespace, Dict]:
""" Interpret the options provided by HPOBenchExperimentUtils to those compatible with dragonfly. """
partial_options = {
"max_or_min": "min",
"capital_type": "num_evals",
"max_capital": sys.maxsize,
# Dragonfly prioritises init_capital > init_capital_frac > num_init_evals
"init_capital": None,
"init_capital_frac": None,
}
try:
init_eval = hpoexp_settings["init_iter_per_dim"]
except KeyError:
_log.debug("Could not read the number of initial evaluations for the optimizer, switching to a realtime "
"budget.")
budget = hpoexp_settings["time_limit_in_s"]
try:
init_frac = hpoexp_settings.get("init_capital_frac")
except KeyError as e:
raise RuntimeError("Could not read an initial budget for the optimizer. Either 'init_iter_per_dim' or "
"'init_capital_frac' must be specified in the optimizer settings of dragonfly.")
else:
_log.debug("Setting dragonfly to use a realtime budget and a fraction of the benchmark budget for "
"initialization.")
partial_options.update({
"capital_type": "realtime",
"max_capital": float("inf"),
"init_capital": budget * init_frac
})
else:
_log.debug("Setting dragonfly to use a number of evaluations based budget and an initialization budget based "
"on the size of the benchmark configuration space.")
partial_options["num_init_evals"] = init_eval * len(config["domain"])
_log.debug("Passing these settings to the dragonfly optimizer:\n%s" % json.dumps(partial_options, indent=4))
options = load_options(_get_command_line_args(), partial_options=partial_options, cmd_line=False)
config = load_config(load_parameters(config))
return options, config
def _handler_unknown(hyp):
raise RuntimeError("No valid handler available for hyperparameter of type %s" % type(hyp))
def _handle_uniform_float(hyper: UniformFloatHyperparameter) -> Tuple[Dict, Callable, Callable, float]:
"""
Handles the mapping of ConfigSpace.UniformFloatHyperparameter objects to dragonfly's 'float' parameters.
Caveats:
- Dragonfly does not support sampling on a log scale, therefore this mapping will instead ask dragonfly to
uniformly sample values in the range [log(lower), log(upper)], and then forward the exponentiated sampled
values to the objective function.
- It is assumed that the costs are directly proportional to the sampled value, such that the minimum value
corresponds to a cost of 0 and the maximum value corresponds to a cost of 1.
"""
domain = {
'name': hyper.name,
'type': 'float',
'min': log(hyper.lower) if hyper.log else hyper.lower,
'max': log(hyper.upper) if hyper.log else hyper.upper
}
parser = (lambda x: float(exp(x))) if hyper.log else (lambda x: float(x))
# Here, x is in the mapped space!
cost = lambda x: (x - domain['min']) / (domain['max'] - domain['min'])
return domain, parser, cost, domain['max']
def _handle_uniform_int(hyper: UniformFloatHyperparameter) -> Tuple[Dict, Callable, Callable, Union[int, float]]:
"""
Handles the mapping of ConfigSpace.UniformFloatHyperparameter objects to dragonfly's 'int' parameters.
Caveats:
- Dragonfly does not support sampling on a log scale, therefore this mapping will instead ask dragonfly to
uniformly sample integers in the range [floor(log(lower)), floor(log(upper))], and then forward the
exponentiated sampled values to the objective function.
- It is assumed that the costs are a directly proportional to the sampled value, such that the minimum value
corresponds to a cost of 0 and the maximum value corresponds to a cost of 1.
"""
if hyper.log:
lower = log(hyper.lower)
upper = log(hyper.upper)
width = upper - lower
domain = {
'name': hyper.name,
'type': 'float',
'min': 0.0,
'max': 1.0
}
# Here, x is in the dragonfly space!
parser = lambda x: round(exp(x * width + lower))
cost = lambda x: x
return domain, parser, cost, domain['max']
else:
domain = {
'name': hyper.name,
'type': 'int',
'min': hyper.lower,
'max': hyper.upper
}
# Here, x is in the dragonfly space!
parser = lambda x: int(x)
cost = lambda x: (x - hyper.lower + 1) / (hyper.upper - hyper.lower + 1)
return domain, parser, cost, domain['max']
def _handle_categorical(hyper: CategoricalHyperparameter) -> Tuple[Dict, Callable, Callable, str]:
"""
Handles the mapping of ConfigSpace.CategoricalHyperparameter objects to dragonfly's 'discrete' parameters.
Caveats:
- Dragonfly cannot handle non-uniform item weights.
- The items will be internally stored as a list and dragonfly will only be provided the indices of the items
as a categorical parameter to choose from.
- It is assumed that each individual choice incurs exactly the same cost, 1/N, where N is the number of choices.
- Dragonfly will read the indices as strings.
"""
if not isinstance(hyper.choices, (list, tuple)):
raise TypeError("Expected choices to be either list or tuple, received %s" % str(type(hyper.choices)))
if hyper.probabilities is not None:
if not hyper.probabilities[:-1] == hyper.probabilities[1:]:
raise ValueError("Dragonfly does not support categorical parameters with non-uniform weights.")
n = len(hyper.choices)
choices = tuple(hyper.choices)
domain = {
'name': hyper.name,
'type': 'discrete',
'items': '-'.join([str(i) for i in range(n)])
}
parser = lambda x: choices[int(x)]
cost = lambda x: 1. / n
return domain, parser, cost, str(n - 1)
def _handle_ordinal(hyper: OrdinalHyperparameter) -> Tuple[Dict, Callable, Callable, int]:
"""
Handles the mapping of ConfigSpace.OrdinalHyperparameter objects to dragonfly's 'discrete_numeric' parameters.
Caveats:
- The only difference between an Ordinal and a Categorical is the meta-information of item ordering, which is
not useful for dragonfly in any case, therefore dragonfly is only provided indices to an internally stored
ordered sequence.
- It is assumed that the costs are directly proportional to the index location of the sampled value, such that
the item with index 0 or the first item in the sequence incurs a cost of 0 and the last item incurs a
cost of 1.
"""
sequence = hyper.sequence
if not isinstance(sequence, (list, tuple)):
raise TypeError("Expected sequence to be either list or tuple, received %s" % str(type(sequence)))
n = len(sequence) - 1
domain = {
'name': hyper.name,
'type': 'int',
'min': 0,
'max': n # Dragonfly uses the closed interval [min, max]
}
parser = lambda x: sequence[x]
cost = lambda x: x / n
return domain, parser, cost, domain['max']
_handlers = {
UniformFloatHyperparameter: _handle_uniform_float,
UniformIntegerHyperparameter: _handle_uniform_int,
CategoricalHyperparameter: _handle_categorical,
OrdinalHyperparameter: _handle_ordinal
}
def _configspace_to_dragonfly(params: List[Hyperparameter]) -> Tuple[Dict, List, List, List]:
dragonfly_dict = {}
parsers = []
costs = []
maxima = []
for param in params:
d, p, c, m = _handlers.get(type(param), _handler_unknown)(param)
_log.debug("Mapped ConfigSpace Hyperparameter %s to dragonfly domain %s" % (str(param), str(d)))
dragonfly_dict[param.name] = d
parsers.append((param.name, p))
costs.append(c)
maxima.append(m)
return dragonfly_dict, parsers, costs, maxima
def configspace_to_dragonfly(domain_cs: ConfigurationSpace, name="hpobench_benchmark",
fidelity_cs: ConfigurationSpace = None) -> \
Tuple[Dict, List, Union[List, None], Union[List, None]]:
domain, domain_parsers, _, _ = _configspace_to_dragonfly(domain_cs.get_hyperparameters())
out = {'name': name, 'domain': domain}
if fidelity_cs:
# fidelity_space, fidelity_parsers = _generate_xgboost_fidelity_space(fidelity_cs)
fidelity_space, fidelity_parsers, fidelity_costs, fidelity_maxima = \
_configspace_to_dragonfly(fidelity_cs.get_hyperparameters())
out['fidel_space'] = fidelity_space
out['fidel_to_opt'] = fidelity_maxima
_log.debug("Generated fidelity space %s\nFidelity optimization target: %s" %
(out['fidel_space'], out['fidel_to_opt']))
return out, domain_parsers, fidelity_parsers, fidelity_costs
else:
return out, domain_parsers, None, None
def generate_trajectory(history: Namespace, save_file: Path, is_cp=False, history_file=None):
"""
Given the history generated by a call to minimise_function in dragonfly, generates a SMAC-like trajectory and
saves it as the given file. The parameter save_file should be the full path of the filename to which the history is
to be saved. The is_cp flag indicates that a Cartesian Product space was used, thus affecting the output format. If
a history_file is specified, the dragonfly run history will be dumped to that file.
"""
if history_file is not None:
history_file = Path(history_file)
if not history_file.is_absolute():
history_file.expanduser().resolve()
recorded_history = []
save_history = True
else:
save_history = False
trajectories = []
incumbent = {
"cpu_time": float(0),
"wallclock_time": float(0),
"evaluations": int(0),
"cost": float('inf'),
"incumbent": None,
"origin": "xxx"
}
update = False
for qinfo in history.query_qinfos:
# Remember, dragonfly maximizes.
# In the history namespace, query_true_vals refers to the values used for maximization, and query_vals refers
# to the actual value returned from the objective function. This means that if the optimizer was told to
# minimize instead of maximize, query_true_vals will be the negated query_vals. However, the corresponding
# fields in each query_qinfo do not follow this convention and always contain the value used for maximization.
if -qinfo.val < incumbent["cost"]:
incumbent = {
"cpu_time": qinfo.receive_time,
"wallclock_time": qinfo.receive_time,
"evaluations": qinfo.step_idx,
"cost": -qinfo.val,
"incumbent": [list(pt) for pt in qinfo.point] if is_cp else list(qinfo.point),
"origin": "xxx" if not hasattr(qinfo, "curr_acq") else qinfo.curr_acq
}
update = True
if not trajectories or update:
trajectories.append(incumbent)
update = False
if save_history:
recorded_history.append({
"cpu_time": qinfo.receive_time,
"wallclock_time": qinfo.receive_time,
"evaluations": qinfo.step_idx,
"cost": -qinfo.val,
"incumbent": [list(pt) for pt in qinfo.point] if is_cp else list(qinfo.point),
"origin": "xxx" if not hasattr(qinfo, "curr_acq") else qinfo.curr_acq
})
import json
with open(save_file, "w") as f:
f.write("\n".join([json.dumps(t, indent=4) for t in trajectories]))
# json.dump(trajectories, f, indent=4)
if save_history:
with open(history_file, 'w') as fp:
json.dump(recorded_history, fp, indent=4)
print("Finished writing trajectories file.")
def change_cwd(tries=5):
if tries <= 0:
raise RuntimeError("Could not create random temporary dragonfly directory due to timeout.")
tmp_dir = Path(os.getenv('TMPDIR', "/tmp")) / "dragonfly" / str(uuid.uuid4())
try:
tmp_dir.mkdir(parents=True, exist_ok=False)
except FileExistsError:
change_cwd(tries=tries - 1)
except PermissionError as e:
_log.debug("Encountered PermissionError: %s" % e.strerror)
change_cwd(tries=tries - 1)
else:
os.chdir(tmp_dir)
_log.debug("Switched to temporary directory %s" % str(tmp_dir))
return
| from math import exp, log, floor
from typing import List, Dict, Tuple, Union, Callable
from pathlib import Path
from argparse import Namespace
import logging
import os, uuid, sys
import json
_log = logging.getLogger(__name__)
# -------------------------------Begin code adapted directly from the dragonfly repo------------------------------------
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import Hyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter, \
CategoricalHyperparameter, OrdinalHyperparameter, Constant
from dragonfly.exd.exd_utils import get_unique_list_of_option_args
from dragonfly.utils.option_handler import get_option_specs
# Get options
from dragonfly.opt.ga_optimiser import ga_opt_args
from dragonfly.opt.gp_bandit import get_all_euc_gp_bandit_args, \
get_all_cp_gp_bandit_args, get_all_mf_euc_gp_bandit_args, \
get_all_mf_cp_gp_bandit_args
from dragonfly.opt.random_optimiser import euclidean_random_optimiser_args, \
mf_euclidean_random_optimiser_args, \
cp_random_optimiser_args, mf_cp_random_optimiser_args
from dragonfly.opt.multiobjective_gp_bandit import get_all_euc_moo_gp_bandit_args, \
get_all_cp_moo_gp_bandit_args
from dragonfly.opt.random_multiobjective_optimiser import \
euclidean_random_multiobjective_optimiser_args, \
cp_random_multiobjective_optimiser_args
from dragonfly.utils.option_handler import load_options
_dragonfly_args = [
# get_option_specs('config', False, None, 'Path to the json or pb config file. '),
# get_option_specs('options', False, None, 'Path to the options file. '),
get_option_specs('max_or_min', False, 'max', 'Whether to maximise or minimise. '),
get_option_specs('max_capital', False, -1.0,
'Maximum capital (available budget) to be used in the experiment. '),
get_option_specs('capital_type', False, 'return_value',
'Maximum capital (available budget) to be used in the experiment. '),
get_option_specs('is_multi_objective', False, 0,
'If True, will treat it as a multiobjective optimisation problem. '),
get_option_specs('opt_method', False, 'bo',
('Optimisation method. Default is bo. This should be one of bo, ga, ea, direct, ' +
' pdoo, or rand, but not all methods apply to all problems.')),
get_option_specs('report_progress', False, 'default',
('How to report progress. Should be one of "default" (prints to stdout), ' +
'"silent" (no reporting), or a filename (writes to file).')),
]
def _get_command_line_args():
""" Returns all arguments for the command line. """
ret = _dragonfly_args + \
ga_opt_args + \
euclidean_random_optimiser_args + cp_random_optimiser_args + \
mf_euclidean_random_optimiser_args + mf_cp_random_optimiser_args + \
get_all_euc_gp_bandit_args() + get_all_cp_gp_bandit_args() + \
get_all_mf_euc_gp_bandit_args() + get_all_mf_cp_gp_bandit_args() + \
euclidean_random_multiobjective_optimiser_args + \
cp_random_multiobjective_optimiser_args + \
get_all_euc_moo_gp_bandit_args() + get_all_cp_moo_gp_bandit_args()
return get_unique_list_of_option_args(ret)
# ---------------------------------End code adapted directly from the dragonfly repo------------------------------------
from dragonfly.parse.config_parser import load_parameters
from dragonfly.exd.cp_domain_utils import load_config
def load_dragonfly_options(hpoexp_settings: Dict, config: Dict) -> Tuple[Namespace, Dict]:
""" Interpret the options provided by HPOBenchExperimentUtils to those compatible with dragonfly. """
partial_options = {
"max_or_min": "min",
"capital_type": "num_evals",
"max_capital": sys.maxsize,
# Dragonfly prioritises init_capital > init_capital_frac > num_init_evals
"init_capital": None,
"init_capital_frac": None,
}
try:
init_eval = hpoexp_settings["init_iter_per_dim"]
except KeyError:
_log.debug("Could not read the number of initial evaluations for the optimizer, switching to a realtime "
"budget.")
budget = hpoexp_settings["time_limit_in_s"]
try:
init_frac = hpoexp_settings.get("init_capital_frac")
except KeyError as e:
raise RuntimeError("Could not read an initial budget for the optimizer. Either 'init_iter_per_dim' or "
"'init_capital_frac' must be specified in the optimizer settings of dragonfly.")
else:
_log.debug("Setting dragonfly to use a realtime budget and a fraction of the benchmark budget for "
"initialization.")
partial_options.update({
"capital_type": "realtime",
"max_capital": float("inf"),
"init_capital": budget * init_frac
})
else:
_log.debug("Setting dragonfly to use a number of evaluations based budget and an initialization budget based "
"on the size of the benchmark configuration space.")
partial_options["num_init_evals"] = init_eval * len(config["domain"])
_log.debug("Passing these settings to the dragonfly optimizer:\n%s" % json.dumps(partial_options, indent=4))
options = load_options(_get_command_line_args(), partial_options=partial_options, cmd_line=False)
config = load_config(load_parameters(config))
return options, config
def _handler_unknown(hyp):
raise RuntimeError("No valid handler available for hyperparameter of type %s" % type(hyp))
def _handle_uniform_float(hyper: UniformFloatHyperparameter) -> Tuple[Dict, Callable, Callable, float]:
"""
Handles the mapping of ConfigSpace.UniformFloatHyperparameter objects to dragonfly's 'float' parameters.
Caveats:
- Dragonfly does not support sampling on a log scale, therefore this mapping will instead ask dragonfly to
uniformly sample values in the range [log(lower), log(upper)], and then forward the exponentiated sampled
values to the objective function.
- It is assumed that the costs are directly proportional to the sampled value, such that the minimum value
corresponds to a cost of 0 and the maximum value corresponds to a cost of 1.
"""
domain = {
'name': hyper.name,
'type': 'float',
'min': log(hyper.lower) if hyper.log else hyper.lower,
'max': log(hyper.upper) if hyper.log else hyper.upper
}
parser = (lambda x: float(exp(x))) if hyper.log else (lambda x: float(x))
# Here, x is in the mapped space!
cost = lambda x: (x - domain['min']) / (domain['max'] - domain['min'])
return domain, parser, cost, domain['max']
def _handle_uniform_int(hyper: UniformFloatHyperparameter) -> Tuple[Dict, Callable, Callable, Union[int, float]]:
"""
Handles the mapping of ConfigSpace.UniformFloatHyperparameter objects to dragonfly's 'int' parameters.
Caveats:
- Dragonfly does not support sampling on a log scale, therefore this mapping will instead ask dragonfly to
uniformly sample integers in the range [floor(log(lower)), floor(log(upper))], and then forward the
exponentiated sampled values to the objective function.
- It is assumed that the costs are a directly proportional to the sampled value, such that the minimum value
corresponds to a cost of 0 and the maximum value corresponds to a cost of 1.
"""
if hyper.log:
lower = log(hyper.lower)
upper = log(hyper.upper)
width = upper - lower
domain = {
'name': hyper.name,
'type': 'float',
'min': 0.0,
'max': 1.0
}
# Here, x is in the dragonfly space!
parser = lambda x: round(exp(x * width + lower))
cost = lambda x: x
return domain, parser, cost, domain['max']
else:
domain = {
'name': hyper.name,
'type': 'int',
'min': hyper.lower,
'max': hyper.upper
}
# Here, x is in the dragonfly space!
parser = lambda x: int(x)
cost = lambda x: (x - hyper.lower + 1) / (hyper.upper - hyper.lower + 1)
return domain, parser, cost, domain['max']
def _handle_categorical(hyper: CategoricalHyperparameter) -> Tuple[Dict, Callable, Callable, str]:
"""
Handles the mapping of ConfigSpace.CategoricalHyperparameter objects to dragonfly's 'discrete' parameters.
Caveats:
- Dragonfly cannot handle non-uniform item weights.
- The items will be internally stored as a list and dragonfly will only be provided the indices of the items
as a categorical parameter to choose from.
- It is assumed that each individual choice incurs exactly the same cost, 1/N, where N is the number of choices.
- Dragonfly will read the indices as strings.
"""
if not isinstance(hyper.choices, (list, tuple)):
raise TypeError("Expected choices to be either list or tuple, received %s" % str(type(hyper.choices)))
if hyper.probabilities is not None:
if not hyper.probabilities[:-1] == hyper.probabilities[1:]:
raise ValueError("Dragonfly does not support categorical parameters with non-uniform weights.")
n = len(hyper.choices)
choices = tuple(hyper.choices)
domain = {
'name': hyper.name,
'type': 'discrete',
'items': '-'.join([str(i) for i in range(n)])
}
parser = lambda x: choices[int(x)]
cost = lambda x: 1. / n
return domain, parser, cost, str(n - 1)
def _handle_ordinal(hyper: OrdinalHyperparameter) -> Tuple[Dict, Callable, Callable, int]:
"""
Handles the mapping of ConfigSpace.OrdinalHyperparameter objects to dragonfly's 'discrete_numeric' parameters.
Caveats:
- The only difference between an Ordinal and a Categorical is the meta-information of item ordering, which is
not useful for dragonfly in any case, therefore dragonfly is only provided indices to an internally stored
ordered sequence.
- It is assumed that the costs are directly proportional to the index location of the sampled value, such that
the item with index 0 or the first item in the sequence incurs a cost of 0 and the last item incurs a
cost of 1.
"""
sequence = hyper.sequence
if not isinstance(sequence, (list, tuple)):
raise TypeError("Expected sequence to be either list or tuple, received %s" % str(type(sequence)))
n = len(sequence) - 1
domain = {
'name': hyper.name,
'type': 'int',
'min': 0,
'max': n # Dragonfly uses the closed interval [min, max]
}
parser = lambda x: sequence[x]
cost = lambda x: x / n
return domain, parser, cost, domain['max']
_handlers = {
UniformFloatHyperparameter: _handle_uniform_float,
UniformIntegerHyperparameter: _handle_uniform_int,
CategoricalHyperparameter: _handle_categorical,
OrdinalHyperparameter: _handle_ordinal
}
def _configspace_to_dragonfly(params: List[Hyperparameter]) -> Tuple[Dict, List, List, List]:
dragonfly_dict = {}
parsers = []
costs = []
maxima = []
for param in params:
d, p, c, m = _handlers.get(type(param), _handler_unknown)(param)
_log.debug("Mapped ConfigSpace Hyperparameter %s to dragonfly domain %s" % (str(param), str(d)))
dragonfly_dict[param.name] = d
parsers.append((param.name, p))
costs.append(c)
maxima.append(m)
return dragonfly_dict, parsers, costs, maxima
def configspace_to_dragonfly(domain_cs: ConfigurationSpace, name="hpobench_benchmark",
fidelity_cs: ConfigurationSpace = None) -> \
Tuple[Dict, List, Union[List, None], Union[List, None]]:
domain, domain_parsers, _, _ = _configspace_to_dragonfly(domain_cs.get_hyperparameters())
out = {'name': name, 'domain': domain}
if fidelity_cs:
# fidelity_space, fidelity_parsers = _generate_xgboost_fidelity_space(fidelity_cs)
fidelity_space, fidelity_parsers, fidelity_costs, fidelity_maxima = \
_configspace_to_dragonfly(fidelity_cs.get_hyperparameters())
out['fidel_space'] = fidelity_space
out['fidel_to_opt'] = fidelity_maxima
_log.debug("Generated fidelity space %s\nFidelity optimization target: %s" %
(out['fidel_space'], out['fidel_to_opt']))
return out, domain_parsers, fidelity_parsers, fidelity_costs
else:
return out, domain_parsers, None, None
def generate_trajectory(history: Namespace, save_file: Path, is_cp=False, history_file=None):
"""
Given the history generated by a call to minimise_function in dragonfly, generates a SMAC-like trajectory and
saves it as the given file. The parameter save_file should be the full path of the filename to which the history is
to be saved. The is_cp flag indicates that a Cartesian Product space was used, thus affecting the output format. If
a history_file is specified, the dragonfly run history will be dumped to that file.
"""
if history_file is not None:
history_file = Path(history_file)
if not history_file.is_absolute():
history_file.expanduser().resolve()
recorded_history = []
save_history = True
else:
save_history = False
trajectories = []
incumbent = {
"cpu_time": float(0),
"wallclock_time": float(0),
"evaluations": int(0),
"cost": float('inf'),
"incumbent": None,
"origin": "xxx"
}
update = False
for qinfo in history.query_qinfos:
# Remember, dragonfly maximizes.
# In the history namespace, query_true_vals refers to the values used for maximization, and query_vals refers
# to the actual value returned from the objective function. This means that if the optimizer was told to
# minimize instead of maximize, query_true_vals will be the negated query_vals. However, the corresponding
# fields in each query_qinfo do not follow this convention and always contain the value used for maximization.
if -qinfo.val < incumbent["cost"]:
incumbent = {
"cpu_time": qinfo.receive_time,
"wallclock_time": qinfo.receive_time,
"evaluations": qinfo.step_idx,
"cost": -qinfo.val,
"incumbent": [list(pt) for pt in qinfo.point] if is_cp else list(qinfo.point),
"origin": "xxx" if not hasattr(qinfo, "curr_acq") else qinfo.curr_acq
}
update = True
if not trajectories or update:
trajectories.append(incumbent)
update = False
if save_history:
recorded_history.append({
"cpu_time": qinfo.receive_time,
"wallclock_time": qinfo.receive_time,
"evaluations": qinfo.step_idx,
"cost": -qinfo.val,
"incumbent": [list(pt) for pt in qinfo.point] if is_cp else list(qinfo.point),
"origin": "xxx" if not hasattr(qinfo, "curr_acq") else qinfo.curr_acq
})
import json
with open(save_file, "w") as f:
f.write("\n".join([json.dumps(t, indent=4) for t in trajectories]))
# json.dump(trajectories, f, indent=4)
if save_history:
with open(history_file, 'w') as fp:
json.dump(recorded_history, fp, indent=4)
print("Finished writing trajectories file.")
def change_cwd(tries=5):
if tries <= 0:
raise RuntimeError("Could not create random temporary dragonfly directory due to timeout.")
tmp_dir = Path(os.getenv('TMPDIR', "/tmp")) / "dragonfly" / str(uuid.uuid4())
try:
tmp_dir.mkdir(parents=True, exist_ok=False)
except FileExistsError:
change_cwd(tries=tries - 1)
except PermissionError as e:
_log.debug("Encountered PermissionError: %s" % e.strerror)
change_cwd(tries=tries - 1)
else:
os.chdir(tmp_dir)
_log.debug("Switched to temporary directory %s" % str(tmp_dir))
return
| en | 0.779923 | # -------------------------------Begin code adapted directly from the dragonfly repo------------------------------------ # Get options # get_option_specs('config', False, None, 'Path to the json or pb config file. '), # get_option_specs('options', False, None, 'Path to the options file. '), Returns all arguments for the command line. # ---------------------------------End code adapted directly from the dragonfly repo------------------------------------ Interpret the options provided by HPOBenchExperimentUtils to those compatible with dragonfly. # Dragonfly prioritises init_capital > init_capital_frac > num_init_evals Handles the mapping of ConfigSpace.UniformFloatHyperparameter objects to dragonfly's 'float' parameters. Caveats: - Dragonfly does not support sampling on a log scale, therefore this mapping will instead ask dragonfly to uniformly sample values in the range [log(lower), log(upper)], and then forward the exponentiated sampled values to the objective function. - It is assumed that the costs are directly proportional to the sampled value, such that the minimum value corresponds to a cost of 0 and the maximum value corresponds to a cost of 1. # Here, x is in the mapped space! Handles the mapping of ConfigSpace.UniformFloatHyperparameter objects to dragonfly's 'int' parameters. Caveats: - Dragonfly does not support sampling on a log scale, therefore this mapping will instead ask dragonfly to uniformly sample integers in the range [floor(log(lower)), floor(log(upper))], and then forward the exponentiated sampled values to the objective function. - It is assumed that the costs are a directly proportional to the sampled value, such that the minimum value corresponds to a cost of 0 and the maximum value corresponds to a cost of 1. # Here, x is in the dragonfly space! # Here, x is in the dragonfly space! Handles the mapping of ConfigSpace.CategoricalHyperparameter objects to dragonfly's 'discrete' parameters. Caveats: - Dragonfly cannot handle non-uniform item weights. - The items will be internally stored as a list and dragonfly will only be provided the indices of the items as a categorical parameter to choose from. - It is assumed that each individual choice incurs exactly the same cost, 1/N, where N is the number of choices. - Dragonfly will read the indices as strings. Handles the mapping of ConfigSpace.OrdinalHyperparameter objects to dragonfly's 'discrete_numeric' parameters. Caveats: - The only difference between an Ordinal and a Categorical is the meta-information of item ordering, which is not useful for dragonfly in any case, therefore dragonfly is only provided indices to an internally stored ordered sequence. - It is assumed that the costs are directly proportional to the index location of the sampled value, such that the item with index 0 or the first item in the sequence incurs a cost of 0 and the last item incurs a cost of 1. # Dragonfly uses the closed interval [min, max] # fidelity_space, fidelity_parsers = _generate_xgboost_fidelity_space(fidelity_cs) Given the history generated by a call to minimise_function in dragonfly, generates a SMAC-like trajectory and saves it as the given file. The parameter save_file should be the full path of the filename to which the history is to be saved. The is_cp flag indicates that a Cartesian Product space was used, thus affecting the output format. If a history_file is specified, the dragonfly run history will be dumped to that file. # Remember, dragonfly maximizes. # In the history namespace, query_true_vals refers to the values used for maximization, and query_vals refers # to the actual value returned from the objective function. This means that if the optimizer was told to # minimize instead of maximize, query_true_vals will be the negated query_vals. However, the corresponding # fields in each query_qinfo do not follow this convention and always contain the value used for maximization. # json.dump(trajectories, f, indent=4) | 2.153702 | 2 |
tests/multivariate/secret_key_agreement/test_two_part_intrinsic_mutual_information.py | Ejjaffe/dit | 1 | 6630213 | """
Tests for dit.multivariate.secret_key_agreement.two_part_intrinsic_mutual_information
"""
import pytest
from dit.example_dists import giant_bit, n_mod_m
from dit.exceptions import ditException
from dit.multivariate.secret_key_agreement import (
two_part_intrinsic_total_correlation,
two_part_intrinsic_dual_total_correlation,
two_part_intrinsic_CAEKL_mutual_information
)
from dit.multivariate.secret_key_agreement.base_skar_optimizers import InnerTwoPartIntrinsicMutualInformation
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize(['dist', 'value'], [
(giant_bit(3, 2), 0.0),
# (n_mod_m(3, 2), 0.0),
])
def test_tpitc1(dist, value):
"""
"""
tpitc = two_part_intrinsic_total_correlation(dist, [[0], [1]], [2], bound_j=2, bound_u=2, bound_v=2)
assert tpitc == pytest.approx(value, abs=1e-6)
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize(['dist', 'value'], [
(giant_bit(3, 2), 0.0),
# (n_mod_m(3, 2), 0.0),
])
def test_tpidtc1(dist, value):
"""
"""
tpidtc = two_part_intrinsic_dual_total_correlation(dist, [[0], [1]], [2], bound_j=2, bound_u=2, bound_v=2)
assert tpidtc == pytest.approx(value, abs=1e-6)
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize(['dist', 'value'], [
(giant_bit(3, 2), 0.0),
# (n_mod_m(3, 2), 0.0),
])
def test_tpicaekl1(dist, value):
"""
"""
tpicaekl = two_part_intrinsic_CAEKL_mutual_information(dist, [[0], [1]], [2], bound_j=2, bound_u=2, bound_v=2)
assert tpicaekl == pytest.approx(value, abs=1e-6)
def test_tpimi_fail1():
"""
Ensure an exception is raised if no conditional variable is supplied.
"""
with pytest.raises(ditException):
two_part_intrinsic_total_correlation(n_mod_m(3, 2), [[0], [1]])
def test_tpimi_fail2():
"""
Ensure an exception is raised if no conditional variable is supplied.
"""
with pytest.raises(ditException):
InnerTwoPartIntrinsicMutualInformation(n_mod_m(3, 2), [[0], [1]])
| """
Tests for dit.multivariate.secret_key_agreement.two_part_intrinsic_mutual_information
"""
import pytest
from dit.example_dists import giant_bit, n_mod_m
from dit.exceptions import ditException
from dit.multivariate.secret_key_agreement import (
two_part_intrinsic_total_correlation,
two_part_intrinsic_dual_total_correlation,
two_part_intrinsic_CAEKL_mutual_information
)
from dit.multivariate.secret_key_agreement.base_skar_optimizers import InnerTwoPartIntrinsicMutualInformation
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize(['dist', 'value'], [
(giant_bit(3, 2), 0.0),
# (n_mod_m(3, 2), 0.0),
])
def test_tpitc1(dist, value):
"""
"""
tpitc = two_part_intrinsic_total_correlation(dist, [[0], [1]], [2], bound_j=2, bound_u=2, bound_v=2)
assert tpitc == pytest.approx(value, abs=1e-6)
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize(['dist', 'value'], [
(giant_bit(3, 2), 0.0),
# (n_mod_m(3, 2), 0.0),
])
def test_tpidtc1(dist, value):
"""
"""
tpidtc = two_part_intrinsic_dual_total_correlation(dist, [[0], [1]], [2], bound_j=2, bound_u=2, bound_v=2)
assert tpidtc == pytest.approx(value, abs=1e-6)
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize(['dist', 'value'], [
(giant_bit(3, 2), 0.0),
# (n_mod_m(3, 2), 0.0),
])
def test_tpicaekl1(dist, value):
"""
"""
tpicaekl = two_part_intrinsic_CAEKL_mutual_information(dist, [[0], [1]], [2], bound_j=2, bound_u=2, bound_v=2)
assert tpicaekl == pytest.approx(value, abs=1e-6)
def test_tpimi_fail1():
"""
Ensure an exception is raised if no conditional variable is supplied.
"""
with pytest.raises(ditException):
two_part_intrinsic_total_correlation(n_mod_m(3, 2), [[0], [1]])
def test_tpimi_fail2():
"""
Ensure an exception is raised if no conditional variable is supplied.
"""
with pytest.raises(ditException):
InnerTwoPartIntrinsicMutualInformation(n_mod_m(3, 2), [[0], [1]])
| en | 0.654337 | Tests for dit.multivariate.secret_key_agreement.two_part_intrinsic_mutual_information # (n_mod_m(3, 2), 0.0), # (n_mod_m(3, 2), 0.0), # (n_mod_m(3, 2), 0.0), Ensure an exception is raised if no conditional variable is supplied. Ensure an exception is raised if no conditional variable is supplied. | 2.153329 | 2 |
core/src/zeit/content/link/link.py | rickdg/vivi | 5 | 6630214 | from zeit.cms.i18n import MessageFactory as _
import grokcore.component as grok
import zeit.cms.content.metadata
import zeit.cms.content.property
import zeit.cms.content.xmlsupport
import zeit.cms.interfaces
import zeit.cms.type
import zeit.content.link.interfaces
import zeit.push.interfaces
import zope.component
import zope.interface
@zope.interface.implementer(
zeit.content.link.interfaces.ILink,
zeit.cms.interfaces.IEditorialContent)
class Link(zeit.cms.content.metadata.CommonMetadata):
"""A type for managing links to non-local content."""
default_template = (
'<link xmlns:py="http://codespeak.net/lxml/objectify/pytype">'
'<head/><body/></link>')
url = zeit.cms.content.property.ObjectPathProperty('.body.url')
target = zeit.cms.content.property.ObjectPathProperty('.body.target')
nofollow = zeit.cms.content.property.ObjectPathProperty('.body.nofollow')
@property
def blog(self):
if not self.url:
return
source = zeit.content.link.interfaces.ILink['blog'].source(self)
for blog in source:
if blog.url in self.url:
return blog
@property
def title(self):
return self.teaserTitle
@title.setter
def title(self, value):
self.teaserTitle = value
class LinkType(zeit.cms.type.XMLContentTypeDeclaration):
factory = Link
interface = zeit.content.link.interfaces.ILink
title = _('Link')
type = 'link'
@zope.component.adapter(zeit.content.link.interfaces.ILink)
class XMLReferenceUpdater(zeit.cms.content.xmlsupport.XMLReferenceUpdater):
def update(self, entry, suppress_errors=False):
url = self.context.url
if not url:
url = ''
entry.set('{http://namespaces.zeit.de/CMS/link}href', url)
target_attribute = '{http://namespaces.zeit.de/CMS/link}target'
if self.context.target:
entry.set(target_attribute, self.context.target)
else:
entry.attrib.pop(target_attribute, None)
rel_attribute = '{http://namespaces.zeit.de/CMS/link}rel'
if self.context.nofollow:
entry.set(rel_attribute, 'nofollow')
else:
entry.attrib.pop(rel_attribute, None)
@grok.adapter(zeit.content.link.interfaces.ILink)
@grok.implementer(zeit.push.interfaces.IPushURL)
def link_push_url(context):
return context.url
| from zeit.cms.i18n import MessageFactory as _
import grokcore.component as grok
import zeit.cms.content.metadata
import zeit.cms.content.property
import zeit.cms.content.xmlsupport
import zeit.cms.interfaces
import zeit.cms.type
import zeit.content.link.interfaces
import zeit.push.interfaces
import zope.component
import zope.interface
@zope.interface.implementer(
zeit.content.link.interfaces.ILink,
zeit.cms.interfaces.IEditorialContent)
class Link(zeit.cms.content.metadata.CommonMetadata):
"""A type for managing links to non-local content."""
default_template = (
'<link xmlns:py="http://codespeak.net/lxml/objectify/pytype">'
'<head/><body/></link>')
url = zeit.cms.content.property.ObjectPathProperty('.body.url')
target = zeit.cms.content.property.ObjectPathProperty('.body.target')
nofollow = zeit.cms.content.property.ObjectPathProperty('.body.nofollow')
@property
def blog(self):
if not self.url:
return
source = zeit.content.link.interfaces.ILink['blog'].source(self)
for blog in source:
if blog.url in self.url:
return blog
@property
def title(self):
return self.teaserTitle
@title.setter
def title(self, value):
self.teaserTitle = value
class LinkType(zeit.cms.type.XMLContentTypeDeclaration):
factory = Link
interface = zeit.content.link.interfaces.ILink
title = _('Link')
type = 'link'
@zope.component.adapter(zeit.content.link.interfaces.ILink)
class XMLReferenceUpdater(zeit.cms.content.xmlsupport.XMLReferenceUpdater):
def update(self, entry, suppress_errors=False):
url = self.context.url
if not url:
url = ''
entry.set('{http://namespaces.zeit.de/CMS/link}href', url)
target_attribute = '{http://namespaces.zeit.de/CMS/link}target'
if self.context.target:
entry.set(target_attribute, self.context.target)
else:
entry.attrib.pop(target_attribute, None)
rel_attribute = '{http://namespaces.zeit.de/CMS/link}rel'
if self.context.nofollow:
entry.set(rel_attribute, 'nofollow')
else:
entry.attrib.pop(rel_attribute, None)
@grok.adapter(zeit.content.link.interfaces.ILink)
@grok.implementer(zeit.push.interfaces.IPushURL)
def link_push_url(context):
return context.url
| en | 0.919901 | A type for managing links to non-local content. | 1.57147 | 2 |
computer_guess_number.py | AWells595/computer-guess-number | 0 | 6630215 | # this script will seek to have the computer guess a number selected by the user between 1 - 100
# the user will be prompted to anwser 'too high' or 'too low' when the computer guesses wrong
# or 'correct' when the computer guesses correctly
# it will then count how many guesses it took to get the correct number
import random
def user_response(computer_guess):
"""This function is called by the main function to show the guess to the user, it takes the randomized
number from the computer, and returns the user's input"""
user_input = input('Is {} your number?'.format(computer_guess))
user_input = user_input.title()
return user_input
def high_or_low(lower=1, upper=100):
"""This function takes two inputs from main, the upper or lower bound of a random number based on user input,
it defaults to a number between 1 and 100, and it returns a random number within the given range"""
computer_guess = random.randint(lower, upper)
return computer_guess
play_game = True
num_guesses = 1 # starts at 1 so if first guess is correct it displays correctly
lower_limit = 0
upper_limit = 100
computer_choice = random.randint(lower_limit, upper_limit)
print('Welcome! Please think of a number between 1 and 100 and I will try to guess what it is! ')
user_anwser = user_response(computer_choice)
while play_game is True:
while user_anwser != 'Yes' and user_anwser != 'No':
print('Invalid input please try again! ')
user_anwser = user_response(computer_choice)
if user_anwser == 'Yes':
if num_guesses == 1:
print('I guessed correctly on my first try! ')
exit()
elif num_guesses <= 5:
print("I guessed correctly, it only took me {} tries!".format(num_guesses))
exit()
elif num_guesses <= 10:
print("I guessed correctly, it took me {} tries!".format(num_guesses))
exit()
else: # more than 10 guesses
print("I guessed correctly, this was tough! It took me {} guesses".format(num_guesses))
exit()
elif user_anwser == 'No':
too_high_or_too_low = input("Was my guess too high or too low? ")
too_high_or_too_low = too_high_or_too_low.title()
while too_high_or_too_low != 'Too High' and too_high_or_too_low != 'Too Low':
too_high_or_too_low = input('Invalid input please try again! ')
too_high_or_too_low = too_high_or_too_low.title()
if too_high_or_too_low == 'Too Low':
lower_limit = computer_choice + 1
computer_choice = high_or_low(lower_limit, upper_limit)
num_guesses += 1
user_anwser = user_response(computer_choice)
elif too_high_or_too_low == 'Too High':
upper_limit = computer_choice - 1
computer_choice = high_or_low(lower_limit, upper_limit)
num_guesses += 1
user_anwser = user_response(computer_choice)
| # this script will seek to have the computer guess a number selected by the user between 1 - 100
# the user will be prompted to anwser 'too high' or 'too low' when the computer guesses wrong
# or 'correct' when the computer guesses correctly
# it will then count how many guesses it took to get the correct number
import random
def user_response(computer_guess):
"""This function is called by the main function to show the guess to the user, it takes the randomized
number from the computer, and returns the user's input"""
user_input = input('Is {} your number?'.format(computer_guess))
user_input = user_input.title()
return user_input
def high_or_low(lower=1, upper=100):
"""This function takes two inputs from main, the upper or lower bound of a random number based on user input,
it defaults to a number between 1 and 100, and it returns a random number within the given range"""
computer_guess = random.randint(lower, upper)
return computer_guess
play_game = True
num_guesses = 1 # starts at 1 so if first guess is correct it displays correctly
lower_limit = 0
upper_limit = 100
computer_choice = random.randint(lower_limit, upper_limit)
print('Welcome! Please think of a number between 1 and 100 and I will try to guess what it is! ')
user_anwser = user_response(computer_choice)
while play_game is True:
while user_anwser != 'Yes' and user_anwser != 'No':
print('Invalid input please try again! ')
user_anwser = user_response(computer_choice)
if user_anwser == 'Yes':
if num_guesses == 1:
print('I guessed correctly on my first try! ')
exit()
elif num_guesses <= 5:
print("I guessed correctly, it only took me {} tries!".format(num_guesses))
exit()
elif num_guesses <= 10:
print("I guessed correctly, it took me {} tries!".format(num_guesses))
exit()
else: # more than 10 guesses
print("I guessed correctly, this was tough! It took me {} guesses".format(num_guesses))
exit()
elif user_anwser == 'No':
too_high_or_too_low = input("Was my guess too high or too low? ")
too_high_or_too_low = too_high_or_too_low.title()
while too_high_or_too_low != 'Too High' and too_high_or_too_low != 'Too Low':
too_high_or_too_low = input('Invalid input please try again! ')
too_high_or_too_low = too_high_or_too_low.title()
if too_high_or_too_low == 'Too Low':
lower_limit = computer_choice + 1
computer_choice = high_or_low(lower_limit, upper_limit)
num_guesses += 1
user_anwser = user_response(computer_choice)
elif too_high_or_too_low == 'Too High':
upper_limit = computer_choice - 1
computer_choice = high_or_low(lower_limit, upper_limit)
num_guesses += 1
user_anwser = user_response(computer_choice)
| en | 0.89502 | # this script will seek to have the computer guess a number selected by the user between 1 - 100 # the user will be prompted to anwser 'too high' or 'too low' when the computer guesses wrong # or 'correct' when the computer guesses correctly # it will then count how many guesses it took to get the correct number This function is called by the main function to show the guess to the user, it takes the randomized number from the computer, and returns the user's input This function takes two inputs from main, the upper or lower bound of a random number based on user input, it defaults to a number between 1 and 100, and it returns a random number within the given range # starts at 1 so if first guess is correct it displays correctly # more than 10 guesses | 4.559936 | 5 |
MyBinarizer.py | CMU-IDS-2022/final-project-zebra | 0 | 6630216 | from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
# This class is used to binarize a multi-label column
# It can be understood as a multi-label one-hot encoder
class MyBinarizer(BaseEstimator, TransformerMixin):
def __init__(self):
"""
Set up the class
"""
print(">>>> Binarizer Initialized")
self.mlb_list = []
def fit(self, X, y=None):
"""
Fit the binarizer on all the features in the dataframe
"""
print(">>>> Fit called")
for column in list(X.columns):
mlb = MultiLabelBinarizer()
self.mlb_list.append((column, mlb.fit(X[column]), list(mlb.classes_)))
# print(self.mlb_list)
return self
def transform(self, X, y=None):
"""
Return the transformed dataframe
"""
print(">>>> Transform called")
X_ = pd.DataFrame()
for item in self.mlb_list:
column, mlb, cols = item
X_temp = pd.DataFrame(mlb.transform(X[column]), columns=cols)
X_ = pd.concat([X_, X_temp], axis=1)
return X_
| from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
# This class is used to binarize a multi-label column
# It can be understood as a multi-label one-hot encoder
class MyBinarizer(BaseEstimator, TransformerMixin):
def __init__(self):
"""
Set up the class
"""
print(">>>> Binarizer Initialized")
self.mlb_list = []
def fit(self, X, y=None):
"""
Fit the binarizer on all the features in the dataframe
"""
print(">>>> Fit called")
for column in list(X.columns):
mlb = MultiLabelBinarizer()
self.mlb_list.append((column, mlb.fit(X[column]), list(mlb.classes_)))
# print(self.mlb_list)
return self
def transform(self, X, y=None):
"""
Return the transformed dataframe
"""
print(">>>> Transform called")
X_ = pd.DataFrame()
for item in self.mlb_list:
column, mlb, cols = item
X_temp = pd.DataFrame(mlb.transform(X[column]), columns=cols)
X_ = pd.concat([X_, X_temp], axis=1)
return X_
| en | 0.736725 | # This class is used to binarize a multi-label column # It can be understood as a multi-label one-hot encoder Set up the class Fit the binarizer on all the features in the dataframe # print(self.mlb_list) Return the transformed dataframe | 3.536633 | 4 |
niapy/tests/test_utility.py | hrnciar/NiaPy | 0 | 6630217 | <gh_stars>0
# encoding=utf8
from unittest import TestCase
import numpy as np
from numpy.random import default_rng
from niapy.algorithms import Algorithm
from niapy.benchmarks import Benchmark
from niapy.util import full_array, repair
class FullArrayTestCase(TestCase):
def test_a_float(self):
A = full_array(25.25, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25.25)))
def test_a_int(self):
A = full_array(25, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25)))
def test_a_float_list(self):
a = [25.25 for i in range(10)]
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25.25)))
def test_a_int_list(self):
a = [25 for i in range(10)]
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25)))
def test_a_float_array(self):
a = np.asarray([25.25 for i in range(10)])
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25.25)))
def test_a_int_array(self):
a = np.asarray([25 for i in range(10)])
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25)))
def test_a_float_list1(self):
a = [25.25 + i for i in range(10)]
A = full_array(a, 15)
a.extend([25.25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_list1(self):
a = [25 + i for i in range(10)]
A = full_array(a, 15)
a.extend([25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_array1(self):
a = [25.25 + i for i in range(10)]
A = full_array(np.asarray(a), 15)
a.extend([25.25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_array1(self):
a = [25 + i for i in range(10)]
A = full_array(np.asarray(a), 15)
a.extend([25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_list2(self):
a = [25.25 + i for i in range(10)]
A = full_array(a, 13)
a.extend([25.25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_list2(self):
a = [25 + i for i in range(10)]
A = full_array(a, 13)
a.extend([25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_array2(self):
a = [25.25 + i for i in range(10)]
A = full_array(np.asarray(a), 13)
a.extend([25.25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_array2(self):
a = [25 + i for i in range(10)]
A = full_array(np.asarray(a), 13)
a.extend([25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_list3(self):
a = [25.25 + i for i in range(10)]
A = full_array(a, 9)
a.remove(34.25)
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_list3(self):
a = [25 + i for i in range(10)]
A = full_array(a, 9)
a.remove(34)
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_array3(self):
a = [25.25 + i for i in range(10)]
A = full_array(np.asarray(a), 9)
a.remove(34.25)
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_array3(self):
a = [25 + i for i in range(10)]
A = full_array(np.asarray(a), 9)
a.remove(34)
self.assertTrue(np.array_equal(A, np.asarray(a)))
class NoLimits:
@classmethod
def function(cls):
def evaluate(D, x):
return 0
return evaluate
class MyBenchmark(Benchmark):
def __init__(self):
Benchmark.__init__(self, -10, 10)
def function(self):
def evaluate(D, x):
return np.sum(x ** 2)
return evaluate
class MyFakeAlgorithm:
def __init__(self):
pass
class MyCustomAlgorithm(Algorithm):
pass
class LimitRepairTestCase(TestCase):
def setUp(self):
self.D = 10
self.Upper, self.Lower = full_array(10, self.D), full_array(-10, self.D)
self.met = repair.limit
def generateIndividual(self, D, upper, lower):
upp, low = full_array(upper, D), full_array(lower, D)
return default_rng().uniform(low, upp, D)
def test_limit_repair_good_solution(self):
x = self.generateIndividual(self.D, self.Upper, self.Lower)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
def test_limit_repair_bad_upper_solution(self):
x = self.generateIndividual(self.D, 12, 11)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
def test_limit_repair_bad_lower_soluiton(self):
x = self.generateIndividual(self.D, -11, -12)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
def test_limit_repair_bad_upper_lower_soluiton(self):
x = self.generateIndividual(self.D, 100, -100)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
class LimitInverseRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.limit_inverse
class WangRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.wang
class RandRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.rand
class ReflectRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.reflect
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| # encoding=utf8
from unittest import TestCase
import numpy as np
from numpy.random import default_rng
from niapy.algorithms import Algorithm
from niapy.benchmarks import Benchmark
from niapy.util import full_array, repair
class FullArrayTestCase(TestCase):
def test_a_float(self):
A = full_array(25.25, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25.25)))
def test_a_int(self):
A = full_array(25, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25)))
def test_a_float_list(self):
a = [25.25 for i in range(10)]
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25.25)))
def test_a_int_list(self):
a = [25 for i in range(10)]
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25)))
def test_a_float_array(self):
a = np.asarray([25.25 for i in range(10)])
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25.25)))
def test_a_int_array(self):
a = np.asarray([25 for i in range(10)])
A = full_array(a, 10)
self.assertTrue(np.array_equal(A, np.full(10, 25)))
def test_a_float_list1(self):
a = [25.25 + i for i in range(10)]
A = full_array(a, 15)
a.extend([25.25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_list1(self):
a = [25 + i for i in range(10)]
A = full_array(a, 15)
a.extend([25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_array1(self):
a = [25.25 + i for i in range(10)]
A = full_array(np.asarray(a), 15)
a.extend([25.25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_array1(self):
a = [25 + i for i in range(10)]
A = full_array(np.asarray(a), 15)
a.extend([25 + i for i in range(5)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_list2(self):
a = [25.25 + i for i in range(10)]
A = full_array(a, 13)
a.extend([25.25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_list2(self):
a = [25 + i for i in range(10)]
A = full_array(a, 13)
a.extend([25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_array2(self):
a = [25.25 + i for i in range(10)]
A = full_array(np.asarray(a), 13)
a.extend([25.25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_array2(self):
a = [25 + i for i in range(10)]
A = full_array(np.asarray(a), 13)
a.extend([25 + i for i in range(3)])
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_list3(self):
a = [25.25 + i for i in range(10)]
A = full_array(a, 9)
a.remove(34.25)
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_list3(self):
a = [25 + i for i in range(10)]
A = full_array(a, 9)
a.remove(34)
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_float_array3(self):
a = [25.25 + i for i in range(10)]
A = full_array(np.asarray(a), 9)
a.remove(34.25)
self.assertTrue(np.array_equal(A, np.asarray(a)))
def test_a_int_array3(self):
a = [25 + i for i in range(10)]
A = full_array(np.asarray(a), 9)
a.remove(34)
self.assertTrue(np.array_equal(A, np.asarray(a)))
class NoLimits:
@classmethod
def function(cls):
def evaluate(D, x):
return 0
return evaluate
class MyBenchmark(Benchmark):
def __init__(self):
Benchmark.__init__(self, -10, 10)
def function(self):
def evaluate(D, x):
return np.sum(x ** 2)
return evaluate
class MyFakeAlgorithm:
def __init__(self):
pass
class MyCustomAlgorithm(Algorithm):
pass
class LimitRepairTestCase(TestCase):
def setUp(self):
self.D = 10
self.Upper, self.Lower = full_array(10, self.D), full_array(-10, self.D)
self.met = repair.limit
def generateIndividual(self, D, upper, lower):
upp, low = full_array(upper, D), full_array(lower, D)
return default_rng().uniform(low, upp, D)
def test_limit_repair_good_solution(self):
x = self.generateIndividual(self.D, self.Upper, self.Lower)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
def test_limit_repair_bad_upper_solution(self):
x = self.generateIndividual(self.D, 12, 11)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
def test_limit_repair_bad_lower_soluiton(self):
x = self.generateIndividual(self.D, -11, -12)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
def test_limit_repair_bad_upper_lower_soluiton(self):
x = self.generateIndividual(self.D, 100, -100)
x = self.met(x, self.Lower, self.Upper)
self.assertFalse((x > self.Upper).any())
self.assertFalse((x < self.Lower).any())
class LimitInverseRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.limit_inverse
class WangRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.wang
class RandRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.rand
class ReflectRepairTestCase(LimitRepairTestCase):
def setUp(self):
LimitRepairTestCase.setUp(self)
self.met = repair.reflect
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3 | de | 0.140563 | # encoding=utf8 # vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3 | 2.483227 | 2 |
CA/Assets/FbxExporters/Integrations/Autodesk/maya/plug-ins/UnityFbxForMayaPlugin.py | Bartlett-RC3/skilling-module-1-peljevic | 0 | 6630218 | #-
########################################################################
# Copyright (c) 2017 Unity Technologies. All rights reserved.
# NOTICE: All information contained herein is, and remains
# the property of Unity Technology Aps. and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Unity Technology Aps. and its
# suppliers and may be covered by Canadian, U.S. and/or
# Foreign Patents, patents in process, and are protected
# by trade secret or copyright law. Dissemination of this
# information or reproduction of this material is strictly
# forbidden unless prior written permission is obtained from
# Unity Technology Aps.
#
########################################################################
#+
import sys
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds
from UnityFbxForMaya import (version, commands, ui, debug)
kPluginInfo = { 'name': version.pluginName(), 'version': version.versionName(), 'vendor': version.vendorName() }
kVerbose = True
kHeadlessInstall = (maya.cmds.optionVar( exists='UnityFbxForMaya_Headless')
and maya.cmds.optionVar(q='UnityFbxForMaya_Headless') == 1)
# initialize the script plug-in
def initializePlugin(mobject):
pluginFn = OpenMayaMPx.MFnPlugin(mobject, kPluginInfo['vendor'], str(kPluginInfo['version']))
try:
if debug.EnableDebugMessages:
sys.stdout.write('loading %s\n'%kPluginInfo['name'])
commands.register(pluginFn)
if not kHeadlessInstall:
ui.register(pluginFn)
except Exception as e:
assert isinstance(sys.stderr.write, object)
sys.stderr.write( "Failed to register plugin: %s" % [kPluginInfo['name'], e] )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
pluginFn = OpenMayaMPx.MFnPlugin(mobject)
try:
if debug.EnableDebugMessages:
sys.stdout.write('unloading %s\n'%kPluginInfo['name'])
if not kHeadlessInstall:
ui.unregister(pluginFn)
commands.unregister(pluginFn)
except:
sys.stderr.write( "Failed to deregister plugin: %s" % kPluginInfo['name'] )
raise
| #-
########################################################################
# Copyright (c) 2017 Unity Technologies. All rights reserved.
# NOTICE: All information contained herein is, and remains
# the property of Unity Technology Aps. and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Unity Technology Aps. and its
# suppliers and may be covered by Canadian, U.S. and/or
# Foreign Patents, patents in process, and are protected
# by trade secret or copyright law. Dissemination of this
# information or reproduction of this material is strictly
# forbidden unless prior written permission is obtained from
# Unity Technology Aps.
#
########################################################################
#+
import sys
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds
from UnityFbxForMaya import (version, commands, ui, debug)
kPluginInfo = { 'name': version.pluginName(), 'version': version.versionName(), 'vendor': version.vendorName() }
kVerbose = True
kHeadlessInstall = (maya.cmds.optionVar( exists='UnityFbxForMaya_Headless')
and maya.cmds.optionVar(q='UnityFbxForMaya_Headless') == 1)
# initialize the script plug-in
def initializePlugin(mobject):
pluginFn = OpenMayaMPx.MFnPlugin(mobject, kPluginInfo['vendor'], str(kPluginInfo['version']))
try:
if debug.EnableDebugMessages:
sys.stdout.write('loading %s\n'%kPluginInfo['name'])
commands.register(pluginFn)
if not kHeadlessInstall:
ui.register(pluginFn)
except Exception as e:
assert isinstance(sys.stderr.write, object)
sys.stderr.write( "Failed to register plugin: %s" % [kPluginInfo['name'], e] )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
pluginFn = OpenMayaMPx.MFnPlugin(mobject)
try:
if debug.EnableDebugMessages:
sys.stdout.write('unloading %s\n'%kPluginInfo['name'])
if not kHeadlessInstall:
ui.unregister(pluginFn)
commands.unregister(pluginFn)
except:
sys.stderr.write( "Failed to deregister plugin: %s" % kPluginInfo['name'] )
raise
| en | 0.707196 | #- ######################################################################## # Copyright (c) 2017 Unity Technologies. All rights reserved. # NOTICE: All information contained herein is, and remains # the property of Unity Technology Aps. and its suppliers, # if any. The intellectual and technical concepts contained # herein are proprietary to Unity Technology Aps. and its # suppliers and may be covered by Canadian, U.S. and/or # Foreign Patents, patents in process, and are protected # by trade secret or copyright law. Dissemination of this # information or reproduction of this material is strictly # forbidden unless prior written permission is obtained from # Unity Technology Aps. # ######################################################################## #+ # initialize the script plug-in # uninitialize the script plug-in | 1.799209 | 2 |
polls/migrations/0003_search.py | simranlotey/Blood-Bank-Management-System | 9 | 6630219 | <reponame>simranlotey/Blood-Bank-Management-System
# Generated by Django 3.2 on 2021-05-08 22:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_contact'),
]
operations = [
migrations.CreateModel(
name='search',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blood_group', models.CharField(blank=True, choices=[('a+', 'A+'), ('a-', 'A-'), ('b+', 'B+'), ('b', 'B'), ('b-', 'B-'), ('o+', 'O+'), ('ab+', 'AB+'), ('ab-', 'AB-')], max_length=4, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
],
),
]
| # Generated by Django 3.2 on 2021-05-08 22:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_contact'),
]
operations = [
migrations.CreateModel(
name='search',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blood_group', models.CharField(blank=True, choices=[('a+', 'A+'), ('a-', 'A-'), ('b+', 'B+'), ('b', 'B'), ('b-', 'B-'), ('o+', 'O+'), ('ab+', 'AB+'), ('ab-', 'AB-')], max_length=4, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
],
),
] | en | 0.856412 | # Generated by Django 3.2 on 2021-05-08 22:35 | 1.913833 | 2 |
Regression_Modeling.py | kevinidea/Predict_Lap_Times | 0 | 6630220 | <reponame>kevinidea/Predict_Lap_Times
import pandas as pd
from sklearn.cross_validation import train_test_split, ShuffleSplit
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.linear_model import LinearRegression, Ridge, Lasso, BayesianRidge, SGDRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.feature_extraction import DictVectorizer
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.neighbors import KNeighborsRegressor
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
data = pd.read_csv("cars_data.csv")
#convert all lap times into seconds
pattern = data['Lap Time'].str.extract(r"(\d+)\:(\d+\.\d+)")
condition = (data['Lap Time'].str.contains(r"(\d+)\:(\d+\.\d+)")) & (data['Lap Time'].notnull())
#Copy Lap Time column to Lap_Time
data['Lap_Time'] = data['Lap Time']
#Override the lap_time that match the pattern with transformed lap time
data.loc[condition, 'Lap_Time'] = pattern.loc[condition, 0].astype(float)*60 + pattern.loc[condition, 1].astype(float)
#Delete the Original lap time column
data = data.drop('Lap Time', axis =1)
#Encode categorical variable(s) into boolean dummy variable(s)
def transform_categorical_variables(data, cols, drop_categorical_columns=False):
vec = DictVectorizer()
mkdict = lambda row: dict((col, row[col]) for col in cols)
vecData = pd.DataFrame(vec.fit_transform(data[cols].apply(mkdict, axis=1)).toarray())
vecData.columns = vec.get_feature_names()
vecData.index = data.index
if drop_categorical_columns is True:
data = data.drop(cols, axis=1)
data = data.join(vecData)
else:
data = data.join(vecData)
return data
#Dummy code categorical variable(s)
#All categorical variable(s): 'Condition', 'Track', 'Country', 'Layout', 'Car Type', 'Engine', 'Car Brand'
data2 = transform_categorical_variables \
(data, ['Track', 'Condition', 'Car Brand', 'Layout'], drop_categorical_columns = True)
#Choose features to run model
#All numerical variable(s): 'Year Model', 'HP', 'Torque', 'Weight', 'Turbocharged',
# 'Diesel', 'Gear', 'Displacement', 'HP Per Liter', 'HP Per Ton', 'Top Speed'
x = data2.drop(['Car', 'Ranking', 'Lap_Time', 'Country', 'Car Type', 'Engine',\
'Diesel', 'Gear', 'HP Per Liter', 'HP Per Ton' ], axis =1)
y = data2['Lap_Time']
#print x.columns
#Separate training and testing data
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size =0.25, random_state =6)
#Scale x to certain range
#scaler = StandardScaler()
scaler = MinMaxScaler(feature_range=(-1,1))
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.transform(x_test)
#####Tuning Kernel Ridge
kr = KernelRidge()
test_params = {'kernel':['linear', 'rbf'], 'alpha':[10**-7,10**-6,10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'gamma':[10**-5,10**-4,0.001,0.01,0.1,1,10], 'degree':[1,2,3,4]}
kr_optimized = GridSearchCV(estimator = kr, param_grid = test_params, scoring = 'mean_absolute_error' )
kr_optimized.fit(x_train_scaled, y_train)
y_predicted = kr_optimized.predict(x_test_scaled)
print "Best Parameters for KR: %s" %kr_optimized.best_estimator_
print "MAE for KR:", mean_absolute_error(y_test, y_predicted)
print "MSE for KR", mean_squared_error(y_test, y_predicted)
print "R2 for KR", r2_score(y_test, y_predicted)
'''Best Parameters for KNR: KernelRidge(alpha=0.0001, coef0=1, degree=1, gamma=0.001, kernel='rbf',kernel_params=None)
MAE for KR: 2.35570415904
MSE for KR 20.3426329621
R2 for KR 0.997206503468'''
######Tuning Lasso
lasso = Lasso()
test_params = {'alpha':[10**-9, 10**-8,10**-7,10**-6,10**-5, 10**-4,0.001,0.01,0.1,1,10,100,1000]}
lasso_optimized = GridSearchCV(estimator = lasso, param_grid = test_params, scoring = 'mean_absolute_error' )
lasso_optimized.fit(x_train_scaled, y_train)
y_predicted = lasso_optimized.predict(x_test_scaled)
print "Best Parameters for lasso: %s" %lasso_optimized.best_estimator_
print "MAE for lasso:", mean_absolute_error(y_test, y_predicted)
print "MSE for lasso", mean_squared_error(y_test, y_predicted)
print "R2 for lasso", r2_score(y_test, y_predicted)
'''Best Parameters for lasso: Lasso(alpha=1e-09, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
MAE for lasso: 3.38177782627
MSE for lasso 41.3155554331
R2 for lasso 0.993617255849'''
######Tuning Linear Ridge
linear_ridge = Ridge()
test_params = {'alpha':[10**-9, 10**-8,10**-7,10**-6,10**-5, 10**-4,0.001,0.01,0.1,1,10,100,1000]}
linear_ridge_optimized = GridSearchCV(estimator = linear_ridge, param_grid = test_params, scoring = 'mean_absolute_error' )
linear_ridge_optimized.fit(x_train_scaled, y_train)
y_predicted = linear_ridge_optimized.predict(x_test_scaled)
print "Best Parameters for linear ridge: %s" %linear_ridge_optimized.best_estimator_
print "MAE for linear ridge:", mean_absolute_error(y_test, y_predicted)
print "MSE for linear ridge", mean_squared_error(y_test, y_predicted)
print "R2 for linear ridge", r2_score(y_test, y_predicted)
'''Best Parameters for linear ridge: Ridge(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
MAE for linear ridge: 3.35795768117
MSE for linear ridge 38.473182419
R2 for linear ridge 0.994056367451'''
######Tuning Bayesian
bayesian_ridge = BayesianRidge()
test_params = {'alpha_1':[10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'alpha_2':[10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'lambda_1': [10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'lambda_2': [10**-5, 10**-4,0.001,0.01,0.1,1,10] }
bayesian_optimized = GridSearchCV(estimator = bayesian_ridge, param_grid = test_params, scoring = 'mean_absolute_error' )
bayesian_optimized.fit(x_train_scaled, y_train)
y_predicted = bayesian_optimized.predict(x_test_scaled)
print "Best Parameters for bayesian: %s" %bayesian_optimized.best_estimator_
print "MAE for bayesian:", mean_absolute_error(y_test, y_predicted)
print "MSE for bayesian", mean_squared_error(y_test, y_predicted)
print "R2 for bayesian", r2_score(y_test, y_predicted)
'''Best Parameters for bayesian: BayesianRidge(alpha_1=1e-05, alpha_2=10, compute_score=False, copy_X=True,
fit_intercept=True, lambda_1=10, lambda_2=1e-05, n_iter=300,
normalize=False, tol=0.001, verbose=False)
MAE for bayesian: 3.35807586285
MSE for bayesian 38.4740276071
R2 for bayesian 0.99405623688'''
######Tuning SGD
sgd = SGDRegressor()
test_params = {'alpha':[10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'loss':['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'], \
'penalty': ['none', 'l2', 'l1', 'elasticnet'], \
'epsilon': [10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'l1_ratio': [0.1, 0.2, 0.5, 0.6, 0.8, 0.9], \
'power_t': [0.1, 0.2, 0.25, 0.5, 0.8, 0.9]}
sgd_optimized = GridSearchCV(estimator = sgd, param_grid = test_params, scoring = 'mean_absolute_error' )
sgd_optimized.fit(x_train_scaled, y_train)
y_predicted = sgd_optimized.predict(x_test_scaled)
print "Best Parameters for SGD: %s" %sgd_optimized.best_estimator_
print "MAE for SGD:", mean_absolute_error(y_test, y_predicted)
print "MSE for SGD", mean_squared_error(y_test, y_predicted)
print "R2 for SGD", r2_score(y_test, y_predicted)
'''Best Parameters for SGD: SGDRegressor(alpha=0.1, average=False, epsilon=0.001, eta0=0.01,
fit_intercept=True, l1_ratio=0.2, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='none', power_t=0.2,
random_state=None, shuffle=True, verbose=0, warm_start=False)
#MAE for SGD: 9.04117895779
#MSE for SGD 292.104437304
#R2 for SGD 0.954873464267'''
####Develop models using various tuned algorithms above
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predicted = lr.predict(x_test)
svr = SVR(C=10, gamma =1, kernel = 'linear')
svr.fit(x_train_scaled, y_train)
y2 = svr.predict(x_test_scaled)
kr = KernelRidge(alpha=0.0001, coef0=1, degree=1, gamma=0.001, kernel='rbf',kernel_params=None)
kr.fit(x_train_scaled, y_train)
y3 = kr.predict(x_test_scaled)
lasso = Lasso(alpha=1e-09)
lasso.fit(x_train_scaled, y_train)
y4 = lasso.predict(x_test_scaled)
linear_ridge = Ridge(alpha=0.1)
linear_ridge.fit(x_train_scaled,y_train)
y5 = linear_ridge.predict(x_test_scaled)
bayesian_ridge = BayesianRidge(alpha_1=1e-05, alpha_2=10, lambda_1=10, lambda_2=1e-05)
bayesian_ridge.fit(x_train_scaled, y_train)
y6 = bayesian_ridge.predict(x_test_scaled)
sgd = SGDRegressor(alpha=0.1, epsilon=0.001, l1_ratio=0.2, loss='squared_loss', penalty='none', power_t=0.2)
sgd.fit(x_train_scaled, y_train)
y7 = sgd.predict(x_test_scaled)
###########################################
print '########## TESTING ERRORS ##########'
print "MAE for Linear Regression:", mean_absolute_error(y_test, y_predicted)
print "MAE for SVR:", mean_absolute_error(y_test, y2)
print "MAE for Kernel Ridge Regression:", mean_absolute_error(y_test, y3)
print "MAE for Lasso Regression:", mean_absolute_error(y_test, y4)
print "MAE for Linear Ridge Regression:", mean_absolute_error(y_test, y5)
print "MAE for Bayesian Ridge Regression:", mean_absolute_error(y_test, y6)
print "MAE for Stochastic Gradient Descent Regression:", mean_absolute_error(y_test, y7)
print "--------------------------------"
print "MSE for Linear Regression", mean_squared_error(y_test, y_predicted)
print "MSE for SVR", mean_squared_error(y_test, y2)
print "MSE for Kernel Ridge Regression", mean_squared_error(y_test, y3)
print "MSE for Lasso Regression:", mean_squared_error(y_test, y4)
print "MSE for Linear Ridge Regression:", mean_squared_error(y_test, y5)
print "MSE for Bayesian Ridge Regression:", mean_squared_error(y_test, y6)
print "MSE for Stochastic Gradient Descent Regression:", mean_squared_error(y_test, y7)
print "--------------------------------"
print "R2 for Linear Regression", r2_score(y_test, y_predicted)
print "R2 for SVR", r2_score(y_test, y2)
print "R2 for Kernel Ridge Regression", r2_score(y_test, y3)
print "R2 for Lasso Regression:", r2_score(y_test, y4)
print "R2 for Linear Ridge Regression:", r2_score(y_test, y5)
print "R2 for Bayesian Ridge Regression:", r2_score(y_test, y6)
print "R2 for Stochastic Gradient Descent Regression:", r2_score(y_test, y7)
###########################################
print '########## TRAINING ERRORS ##########'
y_predicted = lr.predict(x_train)
y2 = svr.predict(x_train_scaled)
y3 = kr.predict(x_train_scaled)
y4 = lasso.predict(x_train_scaled)
y5 = linear_ridge.predict(x_train_scaled)
y6 = bayesian_ridge.predict(x_train_scaled)
y7 = sgd.predict(x_train_scaled)
print "MAE for Linear Regression:", mean_absolute_error(y_train, y_predicted)
print "MAE for SVR:", mean_absolute_error(y_train, y2)
print "MAE for Kernel Ridge Regression:", mean_absolute_error(y_train, y3)
print "MAE for Lasso Regression:", mean_absolute_error(y_train, y4)
print "MAE for Linear Ridge Regression:", mean_absolute_error(y_train, y5)
print "MAE for Bayesian Ridge Regression:", mean_absolute_error(y_train, y6)
print "MAE for Stochastic Gradient Descent Regression:", mean_absolute_error(y_train, y7)
print "--------------------------------"
print "MSE for Linear Regression:", mean_squared_error(y_train, y_predicted)
print "MSE for SVR:", mean_squared_error(y_train, y2)
print "MSE for Kernel Ridge Regression:", mean_squared_error(y_train, y3)
print "MSE for Lasso Regression:", mean_squared_error(y_train, y4)
print "MSE for Linear Ridge Regression:", mean_squared_error(y_train, y5)
print "MSE for Bayesian Ridge Regression:", mean_squared_error(y_train, y6)
print "MSE for Stochastic Gradient Descent Regression:", mean_squared_error(y_train, y7)
print "--------------------------------"
print "R2 for Linear Regression:", r2_score(y_train, y_predicted)
print "R2 for SVR:", r2_score(y_train, y2)
print "R2 for Kernel Ridge Regression:", r2_score(y_train, y3)
print "R2 for Lasso Regression:", r2_score(y_train, y4)
print "R2 for Linear Ridge Regression:", r2_score(y_train, y5)
print "R2 for Bayesian Ridge Regression:", r2_score(y_train, y6)
print "R2 for Stochastic Gradient Descent Regression:", r2_score(y_train, y7) | import pandas as pd
from sklearn.cross_validation import train_test_split, ShuffleSplit
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.linear_model import LinearRegression, Ridge, Lasso, BayesianRidge, SGDRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.feature_extraction import DictVectorizer
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.neighbors import KNeighborsRegressor
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
data = pd.read_csv("cars_data.csv")
#convert all lap times into seconds
pattern = data['Lap Time'].str.extract(r"(\d+)\:(\d+\.\d+)")
condition = (data['Lap Time'].str.contains(r"(\d+)\:(\d+\.\d+)")) & (data['Lap Time'].notnull())
#Copy Lap Time column to Lap_Time
data['Lap_Time'] = data['Lap Time']
#Override the lap_time that match the pattern with transformed lap time
data.loc[condition, 'Lap_Time'] = pattern.loc[condition, 0].astype(float)*60 + pattern.loc[condition, 1].astype(float)
#Delete the Original lap time column
data = data.drop('Lap Time', axis =1)
#Encode categorical variable(s) into boolean dummy variable(s)
def transform_categorical_variables(data, cols, drop_categorical_columns=False):
vec = DictVectorizer()
mkdict = lambda row: dict((col, row[col]) for col in cols)
vecData = pd.DataFrame(vec.fit_transform(data[cols].apply(mkdict, axis=1)).toarray())
vecData.columns = vec.get_feature_names()
vecData.index = data.index
if drop_categorical_columns is True:
data = data.drop(cols, axis=1)
data = data.join(vecData)
else:
data = data.join(vecData)
return data
#Dummy code categorical variable(s)
#All categorical variable(s): 'Condition', 'Track', 'Country', 'Layout', 'Car Type', 'Engine', 'Car Brand'
data2 = transform_categorical_variables \
(data, ['Track', 'Condition', 'Car Brand', 'Layout'], drop_categorical_columns = True)
#Choose features to run model
#All numerical variable(s): 'Year Model', 'HP', 'Torque', 'Weight', 'Turbocharged',
# 'Diesel', 'Gear', 'Displacement', 'HP Per Liter', 'HP Per Ton', 'Top Speed'
x = data2.drop(['Car', 'Ranking', 'Lap_Time', 'Country', 'Car Type', 'Engine',\
'Diesel', 'Gear', 'HP Per Liter', 'HP Per Ton' ], axis =1)
y = data2['Lap_Time']
#print x.columns
#Separate training and testing data
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size =0.25, random_state =6)
#Scale x to certain range
#scaler = StandardScaler()
scaler = MinMaxScaler(feature_range=(-1,1))
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.transform(x_test)
#####Tuning Kernel Ridge
kr = KernelRidge()
test_params = {'kernel':['linear', 'rbf'], 'alpha':[10**-7,10**-6,10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'gamma':[10**-5,10**-4,0.001,0.01,0.1,1,10], 'degree':[1,2,3,4]}
kr_optimized = GridSearchCV(estimator = kr, param_grid = test_params, scoring = 'mean_absolute_error' )
kr_optimized.fit(x_train_scaled, y_train)
y_predicted = kr_optimized.predict(x_test_scaled)
print "Best Parameters for KR: %s" %kr_optimized.best_estimator_
print "MAE for KR:", mean_absolute_error(y_test, y_predicted)
print "MSE for KR", mean_squared_error(y_test, y_predicted)
print "R2 for KR", r2_score(y_test, y_predicted)
'''Best Parameters for KNR: KernelRidge(alpha=0.0001, coef0=1, degree=1, gamma=0.001, kernel='rbf',kernel_params=None)
MAE for KR: 2.35570415904
MSE for KR 20.3426329621
R2 for KR 0.997206503468'''
######Tuning Lasso
lasso = Lasso()
test_params = {'alpha':[10**-9, 10**-8,10**-7,10**-6,10**-5, 10**-4,0.001,0.01,0.1,1,10,100,1000]}
lasso_optimized = GridSearchCV(estimator = lasso, param_grid = test_params, scoring = 'mean_absolute_error' )
lasso_optimized.fit(x_train_scaled, y_train)
y_predicted = lasso_optimized.predict(x_test_scaled)
print "Best Parameters for lasso: %s" %lasso_optimized.best_estimator_
print "MAE for lasso:", mean_absolute_error(y_test, y_predicted)
print "MSE for lasso", mean_squared_error(y_test, y_predicted)
print "R2 for lasso", r2_score(y_test, y_predicted)
'''Best Parameters for lasso: Lasso(alpha=1e-09, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
MAE for lasso: 3.38177782627
MSE for lasso 41.3155554331
R2 for lasso 0.993617255849'''
######Tuning Linear Ridge
linear_ridge = Ridge()
test_params = {'alpha':[10**-9, 10**-8,10**-7,10**-6,10**-5, 10**-4,0.001,0.01,0.1,1,10,100,1000]}
linear_ridge_optimized = GridSearchCV(estimator = linear_ridge, param_grid = test_params, scoring = 'mean_absolute_error' )
linear_ridge_optimized.fit(x_train_scaled, y_train)
y_predicted = linear_ridge_optimized.predict(x_test_scaled)
print "Best Parameters for linear ridge: %s" %linear_ridge_optimized.best_estimator_
print "MAE for linear ridge:", mean_absolute_error(y_test, y_predicted)
print "MSE for linear ridge", mean_squared_error(y_test, y_predicted)
print "R2 for linear ridge", r2_score(y_test, y_predicted)
'''Best Parameters for linear ridge: Ridge(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
MAE for linear ridge: 3.35795768117
MSE for linear ridge 38.473182419
R2 for linear ridge 0.994056367451'''
######Tuning Bayesian
bayesian_ridge = BayesianRidge()
test_params = {'alpha_1':[10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'alpha_2':[10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'lambda_1': [10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'lambda_2': [10**-5, 10**-4,0.001,0.01,0.1,1,10] }
bayesian_optimized = GridSearchCV(estimator = bayesian_ridge, param_grid = test_params, scoring = 'mean_absolute_error' )
bayesian_optimized.fit(x_train_scaled, y_train)
y_predicted = bayesian_optimized.predict(x_test_scaled)
print "Best Parameters for bayesian: %s" %bayesian_optimized.best_estimator_
print "MAE for bayesian:", mean_absolute_error(y_test, y_predicted)
print "MSE for bayesian", mean_squared_error(y_test, y_predicted)
print "R2 for bayesian", r2_score(y_test, y_predicted)
'''Best Parameters for bayesian: BayesianRidge(alpha_1=1e-05, alpha_2=10, compute_score=False, copy_X=True,
fit_intercept=True, lambda_1=10, lambda_2=1e-05, n_iter=300,
normalize=False, tol=0.001, verbose=False)
MAE for bayesian: 3.35807586285
MSE for bayesian 38.4740276071
R2 for bayesian 0.99405623688'''
######Tuning SGD
sgd = SGDRegressor()
test_params = {'alpha':[10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'loss':['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'], \
'penalty': ['none', 'l2', 'l1', 'elasticnet'], \
'epsilon': [10**-5, 10**-4,0.001,0.01,0.1,1,10], \
'l1_ratio': [0.1, 0.2, 0.5, 0.6, 0.8, 0.9], \
'power_t': [0.1, 0.2, 0.25, 0.5, 0.8, 0.9]}
sgd_optimized = GridSearchCV(estimator = sgd, param_grid = test_params, scoring = 'mean_absolute_error' )
sgd_optimized.fit(x_train_scaled, y_train)
y_predicted = sgd_optimized.predict(x_test_scaled)
print "Best Parameters for SGD: %s" %sgd_optimized.best_estimator_
print "MAE for SGD:", mean_absolute_error(y_test, y_predicted)
print "MSE for SGD", mean_squared_error(y_test, y_predicted)
print "R2 for SGD", r2_score(y_test, y_predicted)
'''Best Parameters for SGD: SGDRegressor(alpha=0.1, average=False, epsilon=0.001, eta0=0.01,
fit_intercept=True, l1_ratio=0.2, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='none', power_t=0.2,
random_state=None, shuffle=True, verbose=0, warm_start=False)
#MAE for SGD: 9.04117895779
#MSE for SGD 292.104437304
#R2 for SGD 0.954873464267'''
####Develop models using various tuned algorithms above
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predicted = lr.predict(x_test)
svr = SVR(C=10, gamma =1, kernel = 'linear')
svr.fit(x_train_scaled, y_train)
y2 = svr.predict(x_test_scaled)
kr = KernelRidge(alpha=0.0001, coef0=1, degree=1, gamma=0.001, kernel='rbf',kernel_params=None)
kr.fit(x_train_scaled, y_train)
y3 = kr.predict(x_test_scaled)
lasso = Lasso(alpha=1e-09)
lasso.fit(x_train_scaled, y_train)
y4 = lasso.predict(x_test_scaled)
linear_ridge = Ridge(alpha=0.1)
linear_ridge.fit(x_train_scaled,y_train)
y5 = linear_ridge.predict(x_test_scaled)
bayesian_ridge = BayesianRidge(alpha_1=1e-05, alpha_2=10, lambda_1=10, lambda_2=1e-05)
bayesian_ridge.fit(x_train_scaled, y_train)
y6 = bayesian_ridge.predict(x_test_scaled)
sgd = SGDRegressor(alpha=0.1, epsilon=0.001, l1_ratio=0.2, loss='squared_loss', penalty='none', power_t=0.2)
sgd.fit(x_train_scaled, y_train)
y7 = sgd.predict(x_test_scaled)
###########################################
print '########## TESTING ERRORS ##########'
print "MAE for Linear Regression:", mean_absolute_error(y_test, y_predicted)
print "MAE for SVR:", mean_absolute_error(y_test, y2)
print "MAE for Kernel Ridge Regression:", mean_absolute_error(y_test, y3)
print "MAE for Lasso Regression:", mean_absolute_error(y_test, y4)
print "MAE for Linear Ridge Regression:", mean_absolute_error(y_test, y5)
print "MAE for Bayesian Ridge Regression:", mean_absolute_error(y_test, y6)
print "MAE for Stochastic Gradient Descent Regression:", mean_absolute_error(y_test, y7)
print "--------------------------------"
print "MSE for Linear Regression", mean_squared_error(y_test, y_predicted)
print "MSE for SVR", mean_squared_error(y_test, y2)
print "MSE for Kernel Ridge Regression", mean_squared_error(y_test, y3)
print "MSE for Lasso Regression:", mean_squared_error(y_test, y4)
print "MSE for Linear Ridge Regression:", mean_squared_error(y_test, y5)
print "MSE for Bayesian Ridge Regression:", mean_squared_error(y_test, y6)
print "MSE for Stochastic Gradient Descent Regression:", mean_squared_error(y_test, y7)
print "--------------------------------"
print "R2 for Linear Regression", r2_score(y_test, y_predicted)
print "R2 for SVR", r2_score(y_test, y2)
print "R2 for Kernel Ridge Regression", r2_score(y_test, y3)
print "R2 for Lasso Regression:", r2_score(y_test, y4)
print "R2 for Linear Ridge Regression:", r2_score(y_test, y5)
print "R2 for Bayesian Ridge Regression:", r2_score(y_test, y6)
print "R2 for Stochastic Gradient Descent Regression:", r2_score(y_test, y7)
###########################################
print '########## TRAINING ERRORS ##########'
y_predicted = lr.predict(x_train)
y2 = svr.predict(x_train_scaled)
y3 = kr.predict(x_train_scaled)
y4 = lasso.predict(x_train_scaled)
y5 = linear_ridge.predict(x_train_scaled)
y6 = bayesian_ridge.predict(x_train_scaled)
y7 = sgd.predict(x_train_scaled)
print "MAE for Linear Regression:", mean_absolute_error(y_train, y_predicted)
print "MAE for SVR:", mean_absolute_error(y_train, y2)
print "MAE for Kernel Ridge Regression:", mean_absolute_error(y_train, y3)
print "MAE for Lasso Regression:", mean_absolute_error(y_train, y4)
print "MAE for Linear Ridge Regression:", mean_absolute_error(y_train, y5)
print "MAE for Bayesian Ridge Regression:", mean_absolute_error(y_train, y6)
print "MAE for Stochastic Gradient Descent Regression:", mean_absolute_error(y_train, y7)
print "--------------------------------"
print "MSE for Linear Regression:", mean_squared_error(y_train, y_predicted)
print "MSE for SVR:", mean_squared_error(y_train, y2)
print "MSE for Kernel Ridge Regression:", mean_squared_error(y_train, y3)
print "MSE for Lasso Regression:", mean_squared_error(y_train, y4)
print "MSE for Linear Ridge Regression:", mean_squared_error(y_train, y5)
print "MSE for Bayesian Ridge Regression:", mean_squared_error(y_train, y6)
print "MSE for Stochastic Gradient Descent Regression:", mean_squared_error(y_train, y7)
print "--------------------------------"
print "R2 for Linear Regression:", r2_score(y_train, y_predicted)
print "R2 for SVR:", r2_score(y_train, y2)
print "R2 for Kernel Ridge Regression:", r2_score(y_train, y3)
print "R2 for Lasso Regression:", r2_score(y_train, y4)
print "R2 for Linear Ridge Regression:", r2_score(y_train, y5)
print "R2 for Bayesian Ridge Regression:", r2_score(y_train, y6)
print "R2 for Stochastic Gradient Descent Regression:", r2_score(y_train, y7) | en | 0.520623 | #convert all lap times into seconds #Copy Lap Time column to Lap_Time #Override the lap_time that match the pattern with transformed lap time #Delete the Original lap time column #Encode categorical variable(s) into boolean dummy variable(s) #Dummy code categorical variable(s) #All categorical variable(s): 'Condition', 'Track', 'Country', 'Layout', 'Car Type', 'Engine', 'Car Brand' #Choose features to run model #All numerical variable(s): 'Year Model', 'HP', 'Torque', 'Weight', 'Turbocharged', # 'Diesel', 'Gear', 'Displacement', 'HP Per Liter', 'HP Per Ton', 'Top Speed' #print x.columns #Separate training and testing data #Scale x to certain range #scaler = StandardScaler() #####Tuning Kernel Ridge Best Parameters for KNR: KernelRidge(alpha=0.0001, coef0=1, degree=1, gamma=0.001, kernel='rbf',kernel_params=None) MAE for KR: 2.35570415904 MSE for KR 20.3426329621 R2 for KR 0.997206503468 ######Tuning Lasso Best Parameters for lasso: Lasso(alpha=1e-09, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) MAE for lasso: 3.38177782627 MSE for lasso 41.3155554331 R2 for lasso 0.993617255849 ######Tuning Linear Ridge Best Parameters for linear ridge: Ridge(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='auto', tol=0.001) MAE for linear ridge: 3.35795768117 MSE for linear ridge 38.473182419 R2 for linear ridge 0.994056367451 ######Tuning Bayesian Best Parameters for bayesian: BayesianRidge(alpha_1=1e-05, alpha_2=10, compute_score=False, copy_X=True, fit_intercept=True, lambda_1=10, lambda_2=1e-05, n_iter=300, normalize=False, tol=0.001, verbose=False) MAE for bayesian: 3.35807586285 MSE for bayesian 38.4740276071 R2 for bayesian 0.99405623688 ######Tuning SGD Best Parameters for SGD: SGDRegressor(alpha=0.1, average=False, epsilon=0.001, eta0=0.01, fit_intercept=True, l1_ratio=0.2, learning_rate='invscaling', loss='squared_loss', n_iter=5, penalty='none', power_t=0.2, random_state=None, shuffle=True, verbose=0, warm_start=False) #MAE for SGD: 9.04117895779 #MSE for SGD 292.104437304 #R2 for SGD 0.954873464267 ####Develop models using various tuned algorithms above ########################################### ######### TESTING ERRORS ##########' ########################################### ######### TRAINING ERRORS ##########' | 2.735115 | 3 |
gevent/echo2.py | irr/python-labs | 4 | 6630221 | from gevent.server import StreamServer
def connection_handler(socket, address):
for l in socket.makefile('r'):
socket.sendall(l.encode('ascii'))
if __name__ == '__main__':
server = StreamServer(('0.0.0.0', 8000), connection_handler)
server.serve_forever()
| from gevent.server import StreamServer
def connection_handler(socket, address):
for l in socket.makefile('r'):
socket.sendall(l.encode('ascii'))
if __name__ == '__main__':
server = StreamServer(('0.0.0.0', 8000), connection_handler)
server.serve_forever()
| none | 1 | 2.537946 | 3 |
|
model_revision/apps.py | Proper-Job/django-model-revision | 0 | 6630222 | <gh_stars>0
from django.apps import AppConfig
class ModelRevisionConfig(AppConfig):
name = 'model_revision'
| from django.apps import AppConfig
class ModelRevisionConfig(AppConfig):
name = 'model_revision' | none | 1 | 1.119261 | 1 |
|
minghu6/tools/send_email.py | minghu6/py-minghu6 | 2 | 6630223 | <filename>minghu6/tools/send_email.py
# -*- coding:utf-8 -*-
"""SEND_EMAIL
Usage:
send_email <from> <to> <subj> <body> [--attachments=<attachments>] [--password=<password>] [--cc=<cc>] [--bcc=<bcc>] [--debug]
Options:
<from> from email address
<to> to email address
<subj> subject of the email
<body> body of the email
-a --attachments=<attachments> attach paths split with ":" or ";" based on OS
-p --password=<password> user password
-c --cc=<cc> cc split with ":" or ";" based on OS
-b --bcc=<bcc> bcc split with ":" or ";" based on OS
-d --debug enable debug mode
"""
from docopt import docopt
from minghu6.etc.cmd import env_sep
#from minghu6.email import EmailSender
from minghu6.internet.email_test import EmailSender
def cli():
arguments = docopt(__doc__)
email_sender = EmailSender(arguments['<from>'],
arguments['--password'] if arguments['--password'] else None,
debug=arguments['--debug'])
cc = arguments['--cc'].split(env_sep) if arguments['--cc'] else ()
bcc = arguments['--bcc'].split(env_sep) if arguments['--bcc'] else ()
att = arguments['--attachments'].split(env_sep) if arguments['--attachments'] else ()
email_sender.send([arguments['<to>']],
[*map(lambda x:('cc', x), cc), *map(lambda x:('bcc', x), bcc)],
arguments['<subj>'],
arguments['<body>'],
att)
if __name__ == '__main__':
cli()
| <filename>minghu6/tools/send_email.py
# -*- coding:utf-8 -*-
"""SEND_EMAIL
Usage:
send_email <from> <to> <subj> <body> [--attachments=<attachments>] [--password=<password>] [--cc=<cc>] [--bcc=<bcc>] [--debug]
Options:
<from> from email address
<to> to email address
<subj> subject of the email
<body> body of the email
-a --attachments=<attachments> attach paths split with ":" or ";" based on OS
-p --password=<password> user password
-c --cc=<cc> cc split with ":" or ";" based on OS
-b --bcc=<bcc> bcc split with ":" or ";" based on OS
-d --debug enable debug mode
"""
from docopt import docopt
from minghu6.etc.cmd import env_sep
#from minghu6.email import EmailSender
from minghu6.internet.email_test import EmailSender
def cli():
arguments = docopt(__doc__)
email_sender = EmailSender(arguments['<from>'],
arguments['--password'] if arguments['--password'] else None,
debug=arguments['--debug'])
cc = arguments['--cc'].split(env_sep) if arguments['--cc'] else ()
bcc = arguments['--bcc'].split(env_sep) if arguments['--bcc'] else ()
att = arguments['--attachments'].split(env_sep) if arguments['--attachments'] else ()
email_sender.send([arguments['<to>']],
[*map(lambda x:('cc', x), cc), *map(lambda x:('bcc', x), bcc)],
arguments['<subj>'],
arguments['<body>'],
att)
if __name__ == '__main__':
cli()
| en | 0.716923 | # -*- coding:utf-8 -*- SEND_EMAIL
Usage:
send_email <from> <to> <subj> <body> [--attachments=<attachments>] [--password=<password>] [--cc=<cc>] [--bcc=<bcc>] [--debug]
Options:
<from> from email address
<to> to email address
<subj> subject of the email
<body> body of the email
-a --attachments=<attachments> attach paths split with ":" or ";" based on OS
-p --password=<password> user password
-c --cc=<cc> cc split with ":" or ";" based on OS
-b --bcc=<bcc> bcc split with ":" or ";" based on OS
-d --debug enable debug mode #from minghu6.email import EmailSender | 2.49574 | 2 |
tests/test_wf_status_update.py | ankur6ue/bff-ocr | 0 | 6630224 | import pytest
import requests
import time
import uuid
import time
import json
import redis
import threading
import datetime as dt
from prettytable import PrettyTable
# This is the URL for the kubernetes ingress + prefix path for the ocr-bff service
url = 'http://127.0.0.1:30559/ocr-bff'
# url = 'http://127.0.0.1:5001'
redis_url = '10.100.184.216'
def test_wf_status_update():
wf_id = uuid.uuid1()
r = requests.post(url + '/wf_update_status', {'uuid': wf_id.urn, 'status_msg': 'hello', 'is_completed': True})
assert r.status_code == 200
def test_wf_status_update_missing_field():
wf_id = uuid.uuid1()
r = requests.post(url + '/wf_update_status', {'is_completed': True})
assert r.status_code == 200
# Trigger a job and check the status updates in redis directly
def test_get_wf_status_update1():
r = requests.post(url + '/wf_trigger', {'image_list': 'trading-issue.jpg'})
assert r.status_code == 200
r = redis.Redis(host=redis_url, port=6379, db=0)
keys = r.keys('*')
for key in keys:
type = r.type(key)
vals = r.lrange(key, 0, -1)
print(vals)
# Send a start_workflow status update message
def test_get_wf_status_update2():
msg = "Flow started"
uuid6 = str(uuid.uuid4())[:6]
job_name = "ocr-job-{0}".format(uuid6)
# add another 6 characters for the pod_name. Remember, status updates are sent by a pod
uuid6 = str(uuid.uuid4())[:6]
pod_name = "{0}-{1}".format(job_name, uuid6)
r = requests.post(url + '/wf_update_status', {'job_name': pod_name,
'timestamp': dt.datetime.now(),
'status_msg': msg,
'is_completed': False,
'success': False},
timeout=0.1)
assert r.status_code == 200
# get status of non-existent job and verify it returns bad job
# remmber, status get requests use the job_name, not pod_name
r = requests.get(url + '/wf_update_status', {'job_name': job_name})
# get status and verify it returns pending
r = requests.get(url + '/wf_update_status', {'job_name': job_name})
r = requests.get(url + '/wf_update_status', {'job_name': "job1"})
msg = "workflow_complete"
r = requests.post(url + '/wf_update_status', {'job_name': "job1",
'timestamp': dt.datetime.now(),
'status_msg': msg,
'is_completed': True,
'success': True},
timeout=0.1)
assert r.status_code == 200
# Trigger a job and call the update_status endpoint. If the status of the job is completed, read the bounding boxes
def test_get_wf_status_update3():
jobs = []
imgs = ['trading-issue.jpg', 'IMG-9134.jpg']
# imgs = ['doesntexist.jpg']
num_jobs = len(imgs)
for job_idx in range(0, num_jobs):
# trading_issue.jpg is already stored in S3
r = requests.post(url + '/wf_trigger', {'image_list': imgs[job_idx]})
assert r.status_code == 200.
resp = json.loads(r.content)
assert resp['success'] is True
job_name = resp['job_name']
jobs.append(job_name)
print_job_status(jobs)
def print_job_status(jobs):
all_completed = True
pt = PrettyTable()
pt.field_names = ["job_name", "status_msg", "success"]
for job in jobs:
r = requests.get(url + '/wf_update_status', {'job_name': job})
content_json = json.loads(r.content)
is_completed = False
content_list = content_json['status']
for l in content_list:
pt.add_row([job, l['status_msg'], l["success"]])
is_completed = l['is_completed']
all_completed = all_completed & is_completed
print(pt)
if not all_completed:
threading.Timer(2.0, lambda: print_job_status(jobs)).start()
else:
print("all jobs finished")
test_get_wf_status_update3()
| import pytest
import requests
import time
import uuid
import time
import json
import redis
import threading
import datetime as dt
from prettytable import PrettyTable
# This is the URL for the kubernetes ingress + prefix path for the ocr-bff service
url = 'http://127.0.0.1:30559/ocr-bff'
# url = 'http://127.0.0.1:5001'
redis_url = '10.100.184.216'
def test_wf_status_update():
wf_id = uuid.uuid1()
r = requests.post(url + '/wf_update_status', {'uuid': wf_id.urn, 'status_msg': 'hello', 'is_completed': True})
assert r.status_code == 200
def test_wf_status_update_missing_field():
wf_id = uuid.uuid1()
r = requests.post(url + '/wf_update_status', {'is_completed': True})
assert r.status_code == 200
# Trigger a job and check the status updates in redis directly
def test_get_wf_status_update1():
r = requests.post(url + '/wf_trigger', {'image_list': 'trading-issue.jpg'})
assert r.status_code == 200
r = redis.Redis(host=redis_url, port=6379, db=0)
keys = r.keys('*')
for key in keys:
type = r.type(key)
vals = r.lrange(key, 0, -1)
print(vals)
# Send a start_workflow status update message
def test_get_wf_status_update2():
msg = "Flow started"
uuid6 = str(uuid.uuid4())[:6]
job_name = "ocr-job-{0}".format(uuid6)
# add another 6 characters for the pod_name. Remember, status updates are sent by a pod
uuid6 = str(uuid.uuid4())[:6]
pod_name = "{0}-{1}".format(job_name, uuid6)
r = requests.post(url + '/wf_update_status', {'job_name': pod_name,
'timestamp': dt.datetime.now(),
'status_msg': msg,
'is_completed': False,
'success': False},
timeout=0.1)
assert r.status_code == 200
# get status of non-existent job and verify it returns bad job
# remmber, status get requests use the job_name, not pod_name
r = requests.get(url + '/wf_update_status', {'job_name': job_name})
# get status and verify it returns pending
r = requests.get(url + '/wf_update_status', {'job_name': job_name})
r = requests.get(url + '/wf_update_status', {'job_name': "job1"})
msg = "workflow_complete"
r = requests.post(url + '/wf_update_status', {'job_name': "job1",
'timestamp': dt.datetime.now(),
'status_msg': msg,
'is_completed': True,
'success': True},
timeout=0.1)
assert r.status_code == 200
# Trigger a job and call the update_status endpoint. If the status of the job is completed, read the bounding boxes
def test_get_wf_status_update3():
jobs = []
imgs = ['trading-issue.jpg', 'IMG-9134.jpg']
# imgs = ['doesntexist.jpg']
num_jobs = len(imgs)
for job_idx in range(0, num_jobs):
# trading_issue.jpg is already stored in S3
r = requests.post(url + '/wf_trigger', {'image_list': imgs[job_idx]})
assert r.status_code == 200.
resp = json.loads(r.content)
assert resp['success'] is True
job_name = resp['job_name']
jobs.append(job_name)
print_job_status(jobs)
def print_job_status(jobs):
all_completed = True
pt = PrettyTable()
pt.field_names = ["job_name", "status_msg", "success"]
for job in jobs:
r = requests.get(url + '/wf_update_status', {'job_name': job})
content_json = json.loads(r.content)
is_completed = False
content_list = content_json['status']
for l in content_list:
pt.add_row([job, l['status_msg'], l["success"]])
is_completed = l['is_completed']
all_completed = all_completed & is_completed
print(pt)
if not all_completed:
threading.Timer(2.0, lambda: print_job_status(jobs)).start()
else:
print("all jobs finished")
test_get_wf_status_update3()
| en | 0.650108 | # This is the URL for the kubernetes ingress + prefix path for the ocr-bff service # url = 'http://127.0.0.1:5001' # Trigger a job and check the status updates in redis directly # Send a start_workflow status update message # add another 6 characters for the pod_name. Remember, status updates are sent by a pod # get status of non-existent job and verify it returns bad job # remmber, status get requests use the job_name, not pod_name # get status and verify it returns pending # Trigger a job and call the update_status endpoint. If the status of the job is completed, read the bounding boxes # imgs = ['doesntexist.jpg'] # trading_issue.jpg is already stored in S3 | 2.13147 | 2 |
portscan.py | M4chin3M4N/pyscan | 0 | 6630225 | <gh_stars>0
import socket
from colorama import Fore as fore
from threading import Thread, Lock
from queue import Queue
import os
def init():
print("portscanner_module [OK]")
def execute(host):
N_THREADS = 400
global q
q = Queue()
print_lock = Lock()
global open_ports
open_ports = []
def portscan(port):
try:
s = socket.socket()
s.connect((host,port))
except:
with print_lock:
print(f"{fore.LIGHTBLACK_EX}{host:15} : {port:5} {fore.RESET}",end='\r')
else:
with print_lock:
print(f"{fore.GREEN}{host:15} : {port:5} is open {fore.RESET}")
open_ports.append(port)
finally:
s.close()
def scan_thread():
global q
while True:
worker = q.get()
portscan(worker)
q.task_done()
def main(host,ports):
global q
for t in range(N_THREADS):
t = Thread(target=scan_thread)
t.daemon = True
t.start()
for worker in ports:
q.put(worker)
q.join
print(f"{fore.RED}[+]Target: {host}")
#print("Enter the range for scan Default: 1-1024")
#port_range = 1-65535
#try:
# start,end = port_range.split("-")
# start,end = int(start),int(end)
# global ports
# ports = [p for p in range(start,end)]
#except:
start = "1"
end = "65535"
ports = [p for p in range(int(start),int(end))]
main(host,ports)
print("--------------------------------")
print("--------------------------------")
#os.system("clear")
print("[Wait......]")
return open_ports
| import socket
from colorama import Fore as fore
from threading import Thread, Lock
from queue import Queue
import os
def init():
print("portscanner_module [OK]")
def execute(host):
N_THREADS = 400
global q
q = Queue()
print_lock = Lock()
global open_ports
open_ports = []
def portscan(port):
try:
s = socket.socket()
s.connect((host,port))
except:
with print_lock:
print(f"{fore.LIGHTBLACK_EX}{host:15} : {port:5} {fore.RESET}",end='\r')
else:
with print_lock:
print(f"{fore.GREEN}{host:15} : {port:5} is open {fore.RESET}")
open_ports.append(port)
finally:
s.close()
def scan_thread():
global q
while True:
worker = q.get()
portscan(worker)
q.task_done()
def main(host,ports):
global q
for t in range(N_THREADS):
t = Thread(target=scan_thread)
t.daemon = True
t.start()
for worker in ports:
q.put(worker)
q.join
print(f"{fore.RED}[+]Target: {host}")
#print("Enter the range for scan Default: 1-1024")
#port_range = 1-65535
#try:
# start,end = port_range.split("-")
# start,end = int(start),int(end)
# global ports
# ports = [p for p in range(start,end)]
#except:
start = "1"
end = "65535"
ports = [p for p in range(int(start),int(end))]
main(host,ports)
print("--------------------------------")
print("--------------------------------")
#os.system("clear")
print("[Wait......]")
return open_ports | en | 0.311778 | #print("Enter the range for scan Default: 1-1024") #port_range = 1-65535 #try: # start,end = port_range.split("-") # start,end = int(start),int(end) # global ports # ports = [p for p in range(start,end)] #except: #os.system("clear") | 2.933126 | 3 |
models/5-Deepflash2/input/deepflash2-lfs/deepflash2/_nbdev.py | cns-iu/HuBMAP---Hacking-the-Kidney | 0 | 6630226 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Config": "00_learner.ipynb",
"Learner.apply_dropout": "00_learner.ipynb",
"energy_max": "00_learner.ipynb",
"Learner.predict_tiles": "00_learner.ipynb",
"EnsembleLearner": "00_learner.ipynb",
"UNetConvBlock": "01_models.ipynb",
"UNetUpBlock": "01_models.ipynb",
"UNet2D": "01_models.ipynb",
"unet_ronneberger2015": "01_models.ipynb",
"unet_falk2019": "01_models.ipynb",
"unet_deepflash2": "01_models.ipynb",
"unet_custom": "01_models.ipynb",
"FPN": "01_models.ipynb",
"UnetBlock": "01_models.ipynb",
"ASPP": "01_models.ipynb",
"UneXt50": "01_models.ipynb",
"unext50_deepflash2": "01_models.ipynb",
"load_smp_model": "01_models.ipynb",
"get_default_shapes": "01_models.ipynb",
"show": "02_data.ipynb",
"DeformationField": "02_data.ipynb",
"BaseDataset": "02_data.ipynb",
"RandomTileDataset": "02_data.ipynb",
"TileDataset": "02_data.ipynb",
"preprocess_mask": "02a_transforms.ipynb",
"create_pdf": "02a_transforms.ipynb",
"random_center": "02a_transforms.ipynb",
"calculate_weights": "02a_transforms.ipynb",
"lambda_kernel": "02a_transforms.ipynb",
"SeparableConv2D": "02a_transforms.ipynb",
"WeightTransformSingle": "02a_transforms.ipynb",
"WeightTransform": "02a_transforms.ipynb",
"Dice_f1": "03_metrics.ipynb",
"Iou": "03_metrics.ipynb",
"Recorder.plot_metrics": "03_metrics.ipynb",
"ElasticDeformCallback": "04_callbacks.ipynb",
"WeightedSoftmaxCrossEntropy": "05_losses.ipynb",
"load_kornia_loss": "05_losses.ipynb",
"unzip": "06_utils.ipynb",
"ensemble_results": "06_utils.ipynb",
"plot_results": "06_utils.ipynb",
"iou": "06_utils.ipynb",
"label_mask": "06_utils.ipynb",
"get_candidates": "06_utils.ipynb",
"iou_mapping": "06_utils.ipynb",
"calculate_roi_measures": "06_utils.ipynb",
"calc_iterations": "06_utils.ipynb",
"get_label_fn": "06_utils.ipynb",
"save_mask": "06_utils.ipynb",
"save_unc": "06_utils.ipynb",
"install_package": "06_utils.ipynb",
"import_package": "06_utils.ipynb",
"rot90": "07_tta.ipynb",
"hflip": "07_tta.ipynb",
"vflip": "07_tta.ipynb",
"BaseTransform": "07_tta.ipynb",
"Chain": "07_tta.ipynb",
"Transformer": "07_tta.ipynb",
"Compose": "07_tta.ipynb",
"Merger": "07_tta.ipynb",
"HorizontalFlip": "07_tta.ipynb",
"VerticalFlip": "07_tta.ipynb",
"Rotate90": "07_tta.ipynb",
"GRID_COLS": "08_gui.ipynb",
"set_css_in_cell_output": "08_gui.ipynb",
"tooltip_css": "08_gui.ipynb",
"ZipUpload": "08_gui.ipynb",
"ItemsPerPage": "08_gui.ipynb",
"BaseParamWidget": "08_gui.ipynb",
"BaseUI": "08_gui.ipynb",
"PathSelector": "08_gui.ipynb",
"PathDownloads": "08_gui.ipynb",
"PathConfig": "08_gui.ipynb",
"GTDataSB": "08_gui.ipynb",
"GTEstSB": "08_gui.ipynb",
"GTEstUI": "08_gui.ipynb",
"TrainDataSB": "08_gui.ipynb",
"TrainModelSB": "08_gui.ipynb",
"TrainValidSB": "08_gui.ipynb",
"LRWidget": "08_gui.ipynb",
"BasePopUpParamWidget": "08_gui.ipynb",
"ParamWidget": "08_gui.ipynb",
"MWWidget": "08_gui.ipynb",
"TrainUI": "08_gui.ipynb",
"PredInputSB": "08_gui.ipynb",
"PredSB": "08_gui.ipynb",
"PredUI": "08_gui.ipynb",
"GUI": "08_gui.ipynb",
"import_sitk": "09_gt.ipynb",
"staple": "09_gt.ipynb",
"m_voting": "09_gt.ipynb",
"msk_show": "09_gt.ipynb",
"GTEstimator": "09_gt.ipynb"}
modules = ["learner.py",
"models.py",
"data.py",
"transforms.py",
"metrics.py",
"callbacks.py",
"losses.py",
"utils.py",
"tta.py",
"gui.py",
"gt.py"]
doc_url = "https://matjesg.github.io/deepflash2/"
git_url = "https://github.com/matjesg/deepflash2/tree/master/"
def custom_doc_links(name): return None
| # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Config": "00_learner.ipynb",
"Learner.apply_dropout": "00_learner.ipynb",
"energy_max": "00_learner.ipynb",
"Learner.predict_tiles": "00_learner.ipynb",
"EnsembleLearner": "00_learner.ipynb",
"UNetConvBlock": "01_models.ipynb",
"UNetUpBlock": "01_models.ipynb",
"UNet2D": "01_models.ipynb",
"unet_ronneberger2015": "01_models.ipynb",
"unet_falk2019": "01_models.ipynb",
"unet_deepflash2": "01_models.ipynb",
"unet_custom": "01_models.ipynb",
"FPN": "01_models.ipynb",
"UnetBlock": "01_models.ipynb",
"ASPP": "01_models.ipynb",
"UneXt50": "01_models.ipynb",
"unext50_deepflash2": "01_models.ipynb",
"load_smp_model": "01_models.ipynb",
"get_default_shapes": "01_models.ipynb",
"show": "02_data.ipynb",
"DeformationField": "02_data.ipynb",
"BaseDataset": "02_data.ipynb",
"RandomTileDataset": "02_data.ipynb",
"TileDataset": "02_data.ipynb",
"preprocess_mask": "02a_transforms.ipynb",
"create_pdf": "02a_transforms.ipynb",
"random_center": "02a_transforms.ipynb",
"calculate_weights": "02a_transforms.ipynb",
"lambda_kernel": "02a_transforms.ipynb",
"SeparableConv2D": "02a_transforms.ipynb",
"WeightTransformSingle": "02a_transforms.ipynb",
"WeightTransform": "02a_transforms.ipynb",
"Dice_f1": "03_metrics.ipynb",
"Iou": "03_metrics.ipynb",
"Recorder.plot_metrics": "03_metrics.ipynb",
"ElasticDeformCallback": "04_callbacks.ipynb",
"WeightedSoftmaxCrossEntropy": "05_losses.ipynb",
"load_kornia_loss": "05_losses.ipynb",
"unzip": "06_utils.ipynb",
"ensemble_results": "06_utils.ipynb",
"plot_results": "06_utils.ipynb",
"iou": "06_utils.ipynb",
"label_mask": "06_utils.ipynb",
"get_candidates": "06_utils.ipynb",
"iou_mapping": "06_utils.ipynb",
"calculate_roi_measures": "06_utils.ipynb",
"calc_iterations": "06_utils.ipynb",
"get_label_fn": "06_utils.ipynb",
"save_mask": "06_utils.ipynb",
"save_unc": "06_utils.ipynb",
"install_package": "06_utils.ipynb",
"import_package": "06_utils.ipynb",
"rot90": "07_tta.ipynb",
"hflip": "07_tta.ipynb",
"vflip": "07_tta.ipynb",
"BaseTransform": "07_tta.ipynb",
"Chain": "07_tta.ipynb",
"Transformer": "07_tta.ipynb",
"Compose": "07_tta.ipynb",
"Merger": "07_tta.ipynb",
"HorizontalFlip": "07_tta.ipynb",
"VerticalFlip": "07_tta.ipynb",
"Rotate90": "07_tta.ipynb",
"GRID_COLS": "08_gui.ipynb",
"set_css_in_cell_output": "08_gui.ipynb",
"tooltip_css": "08_gui.ipynb",
"ZipUpload": "08_gui.ipynb",
"ItemsPerPage": "08_gui.ipynb",
"BaseParamWidget": "08_gui.ipynb",
"BaseUI": "08_gui.ipynb",
"PathSelector": "08_gui.ipynb",
"PathDownloads": "08_gui.ipynb",
"PathConfig": "08_gui.ipynb",
"GTDataSB": "08_gui.ipynb",
"GTEstSB": "08_gui.ipynb",
"GTEstUI": "08_gui.ipynb",
"TrainDataSB": "08_gui.ipynb",
"TrainModelSB": "08_gui.ipynb",
"TrainValidSB": "08_gui.ipynb",
"LRWidget": "08_gui.ipynb",
"BasePopUpParamWidget": "08_gui.ipynb",
"ParamWidget": "08_gui.ipynb",
"MWWidget": "08_gui.ipynb",
"TrainUI": "08_gui.ipynb",
"PredInputSB": "08_gui.ipynb",
"PredSB": "08_gui.ipynb",
"PredUI": "08_gui.ipynb",
"GUI": "08_gui.ipynb",
"import_sitk": "09_gt.ipynb",
"staple": "09_gt.ipynb",
"m_voting": "09_gt.ipynb",
"msk_show": "09_gt.ipynb",
"GTEstimator": "09_gt.ipynb"}
modules = ["learner.py",
"models.py",
"data.py",
"transforms.py",
"metrics.py",
"callbacks.py",
"losses.py",
"utils.py",
"tta.py",
"gui.py",
"gt.py"]
doc_url = "https://matjesg.github.io/deepflash2/"
git_url = "https://github.com/matjesg/deepflash2/tree/master/"
def custom_doc_links(name): return None
| en | 0.183601 | # AUTOGENERATED BY NBDEV! DO NOT EDIT! | 1.12593 | 1 |
Logic/Classes/LogicConfData.py | terexdev/BSDS-V39 | 11 | 6630227 | <filename>Logic/Classes/LogicConfData.py
from Logic.Data.DataManager import Reader
from Logic.Data.DataManager import Writer
class LogicConfData:
def decode(self: Reader):
pass
def encode(self: Writer):
self.writeVint(0)
# Event Slots IDs Array
self.writeVint(22) # Count
self.writeVint(1) # Gem Grab
self.writeVint(2) # Showdown
self.writeVint(3) # Daily Events
self.writeVint(4) # Team Events
self.writeVint(5) # Duo Showdown
self.writeVint(6) # Team Events 2
self.writeVint(7) # Special Events
self.writeVint(8) # Solo Events
self.writeVint(9) # Power Play
self.writeVint(10) # Seasonal Events
self.writeVint(11) # Seasonal Events 2
self.writeVint(12) # Candidates of The Day
self.writeVint(13) # Winner of The Day
self.writeVint(14) # Solo Mode Power League
self.writeVint(15) # Team Mode Power League
self.writeVint(16) # Club league
self.writeVint(17) # Club league
self.writeVint(20) # Championship Challenge (Stage 1)
self.writeVint(21) # Championship Challenge (Stage 2)
self.writeVint(22) # Championship Challenge (Stage 3)
self.writeVint(23) # Championship Challenge (Stage 4)
self.writeVint(24) # Championship Challenge (Stage 5)
# Event Slots IDs Array End
self.writeVint(len(self.player.allMaps)) # Events Count
eventIndex = 1
for i in self.player.allMaps:
self.writeVint(0)
self.writeVint(eventIndex) # EventType
self.writeVint(0) # EventsBeginCountdown
self.writeVint(99999) # Timer
self.writeVint(0) # tokens reward for new event
self.writeDataReference(15, i) # MapID
self.writeVint(-64) # GameModeVariation
self.writeVint(2) # State
self.writeString()
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) # Modifiers
self.writeVint(0)
self.writeVint(0)
self.writeBoolean(False) # Map Maker Map Structure Array
self.writeVint(0)
self.writeBoolean(False) # Power League Data Array
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) # ChronosTextEntry
self.writeVint(-64)
self.writeBoolean(False)
eventIndex += 1
self.writeVint(0) # Coming Up Events Count
self.writeVint(8) # Brawler Upgrade Cost
self.writeVint(20)
self.writeVint(35)
self.writeVint(75)
self.writeVint(140)
self.writeVint(290)
self.writeVint(480)
self.writeVint(800)
self.writeVint(1250)
self.writeVint(4) # Shop Coins Price
self.writeVint(20)
self.writeVint(50)
self.writeVint(140)
self.writeVint(280)
self.writeVint(4) # Shop Coins Amount
self.writeVint(150)
self.writeVint(400)
self.writeVint(1200)
self.writeVint(2600)
self.writeBoolean(True) # Show Offers Packs
self.writeVint(0) # Release Entry
self.writeVint(18) # IntValueEntry
self.writeLong(10008, 501)
self.writeLong(65, 2)
self.writeLong(1, 41000034) # ThemeID
self.writeLong(60, 36270)
self.writeLong(66, 1)
self.writeLong(61, 36270) # SupportDisabled State | if 36218 < state its true
self.writeLong(47, 41381)
self.writeLong(29, 10) # Skin Group Active For Campaign
self.writeLong(48, 41381)
self.writeLong(50, 0) # Coming up quests placeholder
self.writeLong(1100, 500)
self.writeLong(1003, 1)
self.writeLong(36, 0)
self.writeLong(14, 0) # Double Token Event
self.writeLong(79, 149999)
self.writeLong(80, 160000)
self.writeLong(28, 4)
self.writeLong(74, 1)
self.writeVint(3) # Timed Int Value Entry
self.writeVint(14)
self.writeVint(1)
self.writeVint(0)
self.writeVint(739760) # Time left
self.writeVint(29)
self.writeVint(7)
self.writeVint(0)
self.writeVint(746960) # Time left
self.writeVint(29)
self.writeVint(10)
self.writeVint(0)
self.writeVint(1330340) # Time left
self.writeVint(0) # Custom Event
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) | <filename>Logic/Classes/LogicConfData.py
from Logic.Data.DataManager import Reader
from Logic.Data.DataManager import Writer
class LogicConfData:
def decode(self: Reader):
pass
def encode(self: Writer):
self.writeVint(0)
# Event Slots IDs Array
self.writeVint(22) # Count
self.writeVint(1) # Gem Grab
self.writeVint(2) # Showdown
self.writeVint(3) # Daily Events
self.writeVint(4) # Team Events
self.writeVint(5) # Duo Showdown
self.writeVint(6) # Team Events 2
self.writeVint(7) # Special Events
self.writeVint(8) # Solo Events
self.writeVint(9) # Power Play
self.writeVint(10) # Seasonal Events
self.writeVint(11) # Seasonal Events 2
self.writeVint(12) # Candidates of The Day
self.writeVint(13) # Winner of The Day
self.writeVint(14) # Solo Mode Power League
self.writeVint(15) # Team Mode Power League
self.writeVint(16) # Club league
self.writeVint(17) # Club league
self.writeVint(20) # Championship Challenge (Stage 1)
self.writeVint(21) # Championship Challenge (Stage 2)
self.writeVint(22) # Championship Challenge (Stage 3)
self.writeVint(23) # Championship Challenge (Stage 4)
self.writeVint(24) # Championship Challenge (Stage 5)
# Event Slots IDs Array End
self.writeVint(len(self.player.allMaps)) # Events Count
eventIndex = 1
for i in self.player.allMaps:
self.writeVint(0)
self.writeVint(eventIndex) # EventType
self.writeVint(0) # EventsBeginCountdown
self.writeVint(99999) # Timer
self.writeVint(0) # tokens reward for new event
self.writeDataReference(15, i) # MapID
self.writeVint(-64) # GameModeVariation
self.writeVint(2) # State
self.writeString()
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) # Modifiers
self.writeVint(0)
self.writeVint(0)
self.writeBoolean(False) # Map Maker Map Structure Array
self.writeVint(0)
self.writeBoolean(False) # Power League Data Array
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) # ChronosTextEntry
self.writeVint(-64)
self.writeBoolean(False)
eventIndex += 1
self.writeVint(0) # Coming Up Events Count
self.writeVint(8) # Brawler Upgrade Cost
self.writeVint(20)
self.writeVint(35)
self.writeVint(75)
self.writeVint(140)
self.writeVint(290)
self.writeVint(480)
self.writeVint(800)
self.writeVint(1250)
self.writeVint(4) # Shop Coins Price
self.writeVint(20)
self.writeVint(50)
self.writeVint(140)
self.writeVint(280)
self.writeVint(4) # Shop Coins Amount
self.writeVint(150)
self.writeVint(400)
self.writeVint(1200)
self.writeVint(2600)
self.writeBoolean(True) # Show Offers Packs
self.writeVint(0) # Release Entry
self.writeVint(18) # IntValueEntry
self.writeLong(10008, 501)
self.writeLong(65, 2)
self.writeLong(1, 41000034) # ThemeID
self.writeLong(60, 36270)
self.writeLong(66, 1)
self.writeLong(61, 36270) # SupportDisabled State | if 36218 < state its true
self.writeLong(47, 41381)
self.writeLong(29, 10) # Skin Group Active For Campaign
self.writeLong(48, 41381)
self.writeLong(50, 0) # Coming up quests placeholder
self.writeLong(1100, 500)
self.writeLong(1003, 1)
self.writeLong(36, 0)
self.writeLong(14, 0) # Double Token Event
self.writeLong(79, 149999)
self.writeLong(80, 160000)
self.writeLong(28, 4)
self.writeLong(74, 1)
self.writeVint(3) # Timed Int Value Entry
self.writeVint(14)
self.writeVint(1)
self.writeVint(0)
self.writeVint(739760) # Time left
self.writeVint(29)
self.writeVint(7)
self.writeVint(0)
self.writeVint(746960) # Time left
self.writeVint(29)
self.writeVint(10)
self.writeVint(0)
self.writeVint(1330340) # Time left
self.writeVint(0) # Custom Event
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) | en | 0.699249 | # Event Slots IDs Array # Count # Gem Grab # Showdown # Daily Events # Team Events # Duo Showdown # Team Events 2 # Special Events # Solo Events # Power Play # Seasonal Events # Seasonal Events 2 # Candidates of The Day # Winner of The Day # Solo Mode Power League # Team Mode Power League # Club league # Club league # Championship Challenge (Stage 1) # Championship Challenge (Stage 2) # Championship Challenge (Stage 3) # Championship Challenge (Stage 4) # Championship Challenge (Stage 5) # Event Slots IDs Array End # Events Count # EventType # EventsBeginCountdown # Timer # tokens reward for new event # MapID # GameModeVariation # State # Modifiers # Map Maker Map Structure Array # Power League Data Array # ChronosTextEntry # Coming Up Events Count # Brawler Upgrade Cost # Shop Coins Price # Shop Coins Amount # Show Offers Packs # Release Entry # IntValueEntry # ThemeID # SupportDisabled State | if 36218 < state its true # Skin Group Active For Campaign # Coming up quests placeholder # Double Token Event # Timed Int Value Entry # Time left # Time left # Time left # Custom Event | 2.551912 | 3 |
Python/Product/PythonTools/ptvsd/repl/ipython.py | techkey/PTVS | 404 | 6630228 | # Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABILITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import, print_function
"""Automatically selects REPL support for Jupyter/IPython"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "3.2.1.0"
def is_version_at_least(ver_str, *version):
try:
for v1, v2 in zip(version, ver_str.split('.')):
i1, i2 = int(v1), int(v2)
if i1 != i2:
return i1 < i2
except ValueError:
# Versions matched as far as we could go
return True
return True
USE_JUPYTER_CLIENT = False
try:
import jupyter_client
if is_version_at_least(jupyter_client.__version__, 5, 1):
USE_JUPYTER_CLIENT = True
except ImportError:
pass
if USE_JUPYTER_CLIENT:
from .jupyter_client import JupyterClientBackend
IPythonBackend = IPythonBackendWithoutPyLab = JupyterClientBackend
else:
from .ipython_client import IPythonBackend, IPythonBackendWithoutPyLab
| # Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABILITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import, print_function
"""Automatically selects REPL support for Jupyter/IPython"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "3.2.1.0"
def is_version_at_least(ver_str, *version):
try:
for v1, v2 in zip(version, ver_str.split('.')):
i1, i2 = int(v1), int(v2)
if i1 != i2:
return i1 < i2
except ValueError:
# Versions matched as far as we could go
return True
return True
USE_JUPYTER_CLIENT = False
try:
import jupyter_client
if is_version_at_least(jupyter_client.__version__, 5, 1):
USE_JUPYTER_CLIENT = True
except ImportError:
pass
if USE_JUPYTER_CLIENT:
from .jupyter_client import JupyterClientBackend
IPythonBackend = IPythonBackendWithoutPyLab = JupyterClientBackend
else:
from .ipython_client import IPythonBackend, IPythonBackendWithoutPyLab
| en | 0.775019 | # Python Tools for Visual Studio # Copyright(c) Microsoft Corporation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the License); you may not use # this file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0 # # THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY # IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, # MERCHANTABILITY OR NON-INFRINGEMENT. # # See the Apache Version 2.0 License for specific language governing # permissions and limitations under the License. Automatically selects REPL support for Jupyter/IPython # Versions matched as far as we could go | 2.293838 | 2 |
virtool/uploads/utils.py | ReeceHoffmann/virtool | 0 | 6630229 | import os
import pathlib
from logging import getLogger
from typing import Any, Callable, Optional
import aiofiles
from aiohttp.web_request import Request
from cerberus import Validator
logger = getLogger(__name__)
CHUNK_SIZE = 1024 * 1000 * 50
def is_gzip_compressed(chunk: bytes):
"""
Check if a file is gzip compressed.
Peek at the first two bytes for the gzip magic number and raise and exception if it
is not present.
:param chunk: First byte chunk from a file being uploaded
:raises OSError: An OSError is raised when the file is not gzip-compressed
"""
if not chunk[:2] == b"\x1f\x8b":
raise OSError("Not a gzipped file")
def naive_validator(req) -> Validator.errors:
"""
Validate `name` given in an HTTP request using cerberus
"""
v = Validator({"name": {"type": "string", "required": True}}, allow_unknown=True)
if not v.validate(dict(req.query)):
return v.errors
async def naive_writer(
req: Request,
path: pathlib.Path,
on_first_chunk: Optional[Callable[[bytes], Any]] = None,
) -> Optional[int]:
"""
Write a new file from a HTTP multipart request.
:param req: aiohttp request object
:param path: the file path to write the data to
:param on_first_chunk: a function to call with the first chunk of the file stream
:return: size of the new file in bytes
"""
reader = await req.multipart()
file = await reader.next()
size = 0
try:
await req.app["run_in_thread"](os.makedirs, path.parent)
except FileExistsError:
pass
async with aiofiles.open(path, "wb") as f:
while True:
chunk = await file.read_chunk(CHUNK_SIZE)
if not chunk:
break
if size == 0 and on_first_chunk:
on_first_chunk(chunk)
await f.write(chunk)
size += len(chunk)
return size
| import os
import pathlib
from logging import getLogger
from typing import Any, Callable, Optional
import aiofiles
from aiohttp.web_request import Request
from cerberus import Validator
logger = getLogger(__name__)
CHUNK_SIZE = 1024 * 1000 * 50
def is_gzip_compressed(chunk: bytes):
"""
Check if a file is gzip compressed.
Peek at the first two bytes for the gzip magic number and raise and exception if it
is not present.
:param chunk: First byte chunk from a file being uploaded
:raises OSError: An OSError is raised when the file is not gzip-compressed
"""
if not chunk[:2] == b"\x1f\x8b":
raise OSError("Not a gzipped file")
def naive_validator(req) -> Validator.errors:
"""
Validate `name` given in an HTTP request using cerberus
"""
v = Validator({"name": {"type": "string", "required": True}}, allow_unknown=True)
if not v.validate(dict(req.query)):
return v.errors
async def naive_writer(
req: Request,
path: pathlib.Path,
on_first_chunk: Optional[Callable[[bytes], Any]] = None,
) -> Optional[int]:
"""
Write a new file from a HTTP multipart request.
:param req: aiohttp request object
:param path: the file path to write the data to
:param on_first_chunk: a function to call with the first chunk of the file stream
:return: size of the new file in bytes
"""
reader = await req.multipart()
file = await reader.next()
size = 0
try:
await req.app["run_in_thread"](os.makedirs, path.parent)
except FileExistsError:
pass
async with aiofiles.open(path, "wb") as f:
while True:
chunk = await file.read_chunk(CHUNK_SIZE)
if not chunk:
break
if size == 0 and on_first_chunk:
on_first_chunk(chunk)
await f.write(chunk)
size += len(chunk)
return size
| en | 0.85831 | Check if a file is gzip compressed. Peek at the first two bytes for the gzip magic number and raise and exception if it is not present. :param chunk: First byte chunk from a file being uploaded :raises OSError: An OSError is raised when the file is not gzip-compressed Validate `name` given in an HTTP request using cerberus Write a new file from a HTTP multipart request. :param req: aiohttp request object :param path: the file path to write the data to :param on_first_chunk: a function to call with the first chunk of the file stream :return: size of the new file in bytes | 2.76486 | 3 |
game/Skeleton.py | anjali1729/FuzzyGame-GOT | 0 | 6630230 | <filename>game/Skeleton.py
from random import randrange
import pygame
class Skeleton:
x = 0
y = 0
direction = -1
step = 1
image = None
up_image = down_image = left_image = right_image = None
attack_image = None
health = 0
attack = None
attack_rate = 0
attack_charge_full = 0
speed = None
_image_surf = None
_attack_surf = None
sprite_width = 60
sprite_height = 60
type = None
COLON = ":"
scan_range = None
dead = False
unique_id = None
max_charge = 0
text = None
textrect = None
previous_decision = 'move'
def __init__(self, up_image, down_image, left_image, right_image, attack_image, army_x, army_y, global_min_x, global_min_y, global_max_x, global_max_y):
self.health = 300
# formation based on army coordinates
self.global_min_x, self.global_min_y, self.global_max_x, self.global_max_y = global_min_x, global_min_y, global_max_x, global_max_y
self.up_image = up_image
self.down_image = down_image
self.left_image = left_image
self.right_image = right_image
self.attack_image = attack_image
img_up = pygame.image.load(self.up_image) # sprite
img_up = pygame.transform.scale(img_up, (60, 60))
img_down = pygame.image.load(self.down_image) # sprite
img_left = pygame.image.load(self.left_image) # sprite
img_right = pygame.image.load(self.right_image) # sprite
self.sprite_width = img_up.get_width()
self.sprite_height = img_up.get_height()
self.gridy = army_y
self.gridx = army_x
self.x = army_x * self.sprite_width
self.y = army_y * self.sprite_height
self.font = pygame.font.SysFont('Sans', 15)
self.text = self.font.render('stay', True, (255, 255, 255), (255, 255, 255))
self.textrect = self.text.get_rect()
def update(self, grid, display_surf, to_attack, direction, enemy_bot, temp_bot_coord_dict):
self.attack_charge_full += 1
if to_attack and self.attack_charge_full >= self.max_charge:
enemy_bot = self.attack_enemy(display_surf, direction, enemy_bot, temp_bot_coord_dict)
if enemy_bot.dead:
if str(enemy_bot.gridx) + ":" + str(enemy_bot.gridy) in temp_bot_coord_dict:
#temp_bot_coord_dict.pop(str(enemy_bot.gridx) + ":" + str(enemy_bot.gridy), None)
grid[enemy_bot.gridx][enemy_bot.gridy] = '0'
else:
# update position
if self.direction == 0 and (self.gridx + self.step) < self.global_max_x:
if grid[(self.gridx + self.step)][self.gridy] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.x = self.x + self.step * self.sprite_width
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridx += self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
if self.direction == 1 and (self.gridx - self.step) > self.global_min_x:
if grid[(self.gridx - self.step)][self.gridy] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.x = self.x - self.step * self.sprite_width
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridx -= self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
if self.direction == 2 and (self.gridy - self.step) > self.global_min_y:
if grid[(self.gridx)][self.gridy - self.step] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.y = self.y - self.step * self.sprite_height
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridy -= self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
if self.direction == 3 and (self.gridy + self.step) < self.global_max_y:
if grid[(self.gridx)][self.gridy + self.step] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.y = self.y + self.step * self.sprite_height
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridy += self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
else:
if self.speed == "fast":
self.speed = "medium"
elif self.speed == "medium":
self.speed = "slow"
elif self.speed == "slow":
self.speed = "medium"
text = self.font.render("stay", True, (0, 0, 0), (255, 255, 255))
textrect = text.get_rect()
textrect.centerx = self.x + int(self.sprite_width / 2)
textrect.centery = self.y
display_surf.blit(text, textrect)
#self.draw(display_surf)
return grid, enemy_bot, temp_bot_coord_dict
def moveRight(self):
self.direction = 0
self.image = self.right_image
self._image_surf = self._right_image_surf
def moveLeft(self):
self.direction = 1
self.image = self.left_image
self._image_surf = self._left_image_surf
def moveUp(self):
self.direction = 2
self.image = self.up_image
self._image_surf = self._up_image_surf
def moveDown(self):
self.direction = 3
self.image = self.down_image
self._image_surf = self._down_image_surf
def create_avatar(self):
self._up_image_surf = pygame.transform.scale(pygame.image.load(self.up_image), (self.sprite_width, self.sprite_height)).convert()
self._down_image_surf = pygame.transform.scale(pygame.image.load(self.down_image), (self.sprite_width, self.sprite_height)).convert()
self._left_image_surf = pygame.transform.scale(pygame.image.load(self.left_image), (self.sprite_width, self.sprite_height)).convert()
self._right_image_surf = pygame.transform.scale(pygame.image.load(self.right_image), (self.sprite_width, self.sprite_height)).convert()
attack_image = pygame.image.load(self.attack_image)
#alpha = 128
#attack_image.fill((255, 255, 255, alpha), None, pygame.BLEND_RGBA_MULT)
self._attack_surf = pygame.transform.scale(attack_image, (self.sprite_width, self.sprite_height)).convert()
self._image_surf = self._down_image_surf
def draw(self, surface):
surface.blit(self._image_surf, (self.x, self.y))
def draw_decision(self, surface):
surface.blit(self.text, self.textrect)
def sense_range(self, rang, grid):
step_path = int(rang/2)
goblin_count = 0
ogre_count = 0
troll_count = 0
for i in range(-step_path, step_path):
for j in range(-step_path, step_path):
if i >= 0 and self.gridx + i < len(grid) and j >=0 and self.gridy + j < len(grid[0]):
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
elif i >=0 and self.gridx + i < len(grid) and j<0 and self.gridy + j >= 0:
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
elif i<0 and self.gridx + i >= 0 and j>=0 and self.gridy + j < len(grid[0]):
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
elif i<0 and self.gridx - i >= 0 and j<0 and self.gridy +j >= 0:
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
return goblin_count, ogre_count, troll_count
def attack_enemy(self, surface, direction, enemy_bot, temp_bot_coord_dict):
self.attack_charge_full = 0
if direction == "up":
surface.blit(self._attack_surf, (self.x, self.y - 1 * self.sprite_height))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
elif direction == "down":
surface.blit(self._attack_surf, (self.x, self.y + 1 * self.sprite_height))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
elif direction == "left":
surface.blit(self._attack_surf, (self.x - 1 * self.sprite_width, self.y))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
elif direction == "right":
surface.blit(self._attack_surf, (self.x + 1 * self.sprite_width, self.y))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
if enemy_bot.health <= 0:
enemy_bot.is_dead()
print(enemy_bot.unique_id + " HAS DIED!")
self.draw(surface)
return enemy_bot
def return_image_sprite(self):
return self._image_surf
def is_dead(self):
self.dead = True
def scan_stage(self, grid, print_range=False):
goblin_count, ogre_count, troll_count = self.sense_range(self.scan_range,grid)
if print_range:
print("*******" + self.unique_id + "*********")
print("Ninja Count: " + str(goblin_count))
print("Samurai Count: " + str(ogre_count))
print("Mage Count: " + str(troll_count))
print("****************")
return goblin_count, ogre_count, troll_count
| <filename>game/Skeleton.py
from random import randrange
import pygame
class Skeleton:
x = 0
y = 0
direction = -1
step = 1
image = None
up_image = down_image = left_image = right_image = None
attack_image = None
health = 0
attack = None
attack_rate = 0
attack_charge_full = 0
speed = None
_image_surf = None
_attack_surf = None
sprite_width = 60
sprite_height = 60
type = None
COLON = ":"
scan_range = None
dead = False
unique_id = None
max_charge = 0
text = None
textrect = None
previous_decision = 'move'
def __init__(self, up_image, down_image, left_image, right_image, attack_image, army_x, army_y, global_min_x, global_min_y, global_max_x, global_max_y):
self.health = 300
# formation based on army coordinates
self.global_min_x, self.global_min_y, self.global_max_x, self.global_max_y = global_min_x, global_min_y, global_max_x, global_max_y
self.up_image = up_image
self.down_image = down_image
self.left_image = left_image
self.right_image = right_image
self.attack_image = attack_image
img_up = pygame.image.load(self.up_image) # sprite
img_up = pygame.transform.scale(img_up, (60, 60))
img_down = pygame.image.load(self.down_image) # sprite
img_left = pygame.image.load(self.left_image) # sprite
img_right = pygame.image.load(self.right_image) # sprite
self.sprite_width = img_up.get_width()
self.sprite_height = img_up.get_height()
self.gridy = army_y
self.gridx = army_x
self.x = army_x * self.sprite_width
self.y = army_y * self.sprite_height
self.font = pygame.font.SysFont('Sans', 15)
self.text = self.font.render('stay', True, (255, 255, 255), (255, 255, 255))
self.textrect = self.text.get_rect()
def update(self, grid, display_surf, to_attack, direction, enemy_bot, temp_bot_coord_dict):
self.attack_charge_full += 1
if to_attack and self.attack_charge_full >= self.max_charge:
enemy_bot = self.attack_enemy(display_surf, direction, enemy_bot, temp_bot_coord_dict)
if enemy_bot.dead:
if str(enemy_bot.gridx) + ":" + str(enemy_bot.gridy) in temp_bot_coord_dict:
#temp_bot_coord_dict.pop(str(enemy_bot.gridx) + ":" + str(enemy_bot.gridy), None)
grid[enemy_bot.gridx][enemy_bot.gridy] = '0'
else:
# update position
if self.direction == 0 and (self.gridx + self.step) < self.global_max_x:
if grid[(self.gridx + self.step)][self.gridy] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.x = self.x + self.step * self.sprite_width
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridx += self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
if self.direction == 1 and (self.gridx - self.step) > self.global_min_x:
if grid[(self.gridx - self.step)][self.gridy] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.x = self.x - self.step * self.sprite_width
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridx -= self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
if self.direction == 2 and (self.gridy - self.step) > self.global_min_y:
if grid[(self.gridx)][self.gridy - self.step] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.y = self.y - self.step * self.sprite_height
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridy -= self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
if self.direction == 3 and (self.gridy + self.step) < self.global_max_y:
if grid[(self.gridx)][self.gridy + self.step] == '0':
grid[(self.gridx)][self.gridy] = '0'
self.y = self.y + self.step * self.sprite_height
if str(self.gridx) + ":" + str(self.gridy) in temp_bot_coord_dict:
temp_bot_coord_dict.pop(str(self.gridx) + ":" + str(self.gridy), None)
self.gridy += self.step
grid[(self.gridx)][self.gridy] = self.type
temp_bot_coord_dict[str(self.gridx) + ":" + str(self.gridy)] = self
else:
if self.speed == "fast":
self.speed = "medium"
elif self.speed == "medium":
self.speed = "slow"
elif self.speed == "slow":
self.speed = "medium"
text = self.font.render("stay", True, (0, 0, 0), (255, 255, 255))
textrect = text.get_rect()
textrect.centerx = self.x + int(self.sprite_width / 2)
textrect.centery = self.y
display_surf.blit(text, textrect)
#self.draw(display_surf)
return grid, enemy_bot, temp_bot_coord_dict
def moveRight(self):
self.direction = 0
self.image = self.right_image
self._image_surf = self._right_image_surf
def moveLeft(self):
self.direction = 1
self.image = self.left_image
self._image_surf = self._left_image_surf
def moveUp(self):
self.direction = 2
self.image = self.up_image
self._image_surf = self._up_image_surf
def moveDown(self):
self.direction = 3
self.image = self.down_image
self._image_surf = self._down_image_surf
def create_avatar(self):
self._up_image_surf = pygame.transform.scale(pygame.image.load(self.up_image), (self.sprite_width, self.sprite_height)).convert()
self._down_image_surf = pygame.transform.scale(pygame.image.load(self.down_image), (self.sprite_width, self.sprite_height)).convert()
self._left_image_surf = pygame.transform.scale(pygame.image.load(self.left_image), (self.sprite_width, self.sprite_height)).convert()
self._right_image_surf = pygame.transform.scale(pygame.image.load(self.right_image), (self.sprite_width, self.sprite_height)).convert()
attack_image = pygame.image.load(self.attack_image)
#alpha = 128
#attack_image.fill((255, 255, 255, alpha), None, pygame.BLEND_RGBA_MULT)
self._attack_surf = pygame.transform.scale(attack_image, (self.sprite_width, self.sprite_height)).convert()
self._image_surf = self._down_image_surf
def draw(self, surface):
surface.blit(self._image_surf, (self.x, self.y))
def draw_decision(self, surface):
surface.blit(self.text, self.textrect)
def sense_range(self, rang, grid):
step_path = int(rang/2)
goblin_count = 0
ogre_count = 0
troll_count = 0
for i in range(-step_path, step_path):
for j in range(-step_path, step_path):
if i >= 0 and self.gridx + i < len(grid) and j >=0 and self.gridy + j < len(grid[0]):
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
elif i >=0 and self.gridx + i < len(grid) and j<0 and self.gridy + j >= 0:
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
elif i<0 and self.gridx + i >= 0 and j>=0 and self.gridy + j < len(grid[0]):
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
elif i<0 and self.gridx - i >= 0 and j<0 and self.gridy +j >= 0:
if grid[self.gridx + i][self.gridy + j] == 'G':
goblin_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'O':
ogre_count+=1
elif grid[self.gridx + i][self.gridy + j] == 'T':
troll_count+=1
return goblin_count, ogre_count, troll_count
def attack_enemy(self, surface, direction, enemy_bot, temp_bot_coord_dict):
self.attack_charge_full = 0
if direction == "up":
surface.blit(self._attack_surf, (self.x, self.y - 1 * self.sprite_height))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
elif direction == "down":
surface.blit(self._attack_surf, (self.x, self.y + 1 * self.sprite_height))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
elif direction == "left":
surface.blit(self._attack_surf, (self.x - 1 * self.sprite_width, self.y))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
elif direction == "right":
surface.blit(self._attack_surf, (self.x + 1 * self.sprite_width, self.y))
enemy_bot.health -= self.attack_rate
print(self.unique_id + " ATTACKS " + enemy_bot.unique_id + " WITH A FORCE OF " + str(self.attack_rate) + " LEAVING ENEMY HEART AT " + str(enemy_bot.health))
if enemy_bot.health <= 0:
enemy_bot.is_dead()
print(enemy_bot.unique_id + " HAS DIED!")
self.draw(surface)
return enemy_bot
def return_image_sprite(self):
return self._image_surf
def is_dead(self):
self.dead = True
def scan_stage(self, grid, print_range=False):
goblin_count, ogre_count, troll_count = self.sense_range(self.scan_range,grid)
if print_range:
print("*******" + self.unique_id + "*********")
print("Ninja Count: " + str(goblin_count))
print("Samurai Count: " + str(ogre_count))
print("Mage Count: " + str(troll_count))
print("****************")
return goblin_count, ogre_count, troll_count
| en | 0.222379 | # formation based on army coordinates # sprite # sprite # sprite # sprite #temp_bot_coord_dict.pop(str(enemy_bot.gridx) + ":" + str(enemy_bot.gridy), None) # update position #self.draw(display_surf) #alpha = 128 #attack_image.fill((255, 255, 255, alpha), None, pygame.BLEND_RGBA_MULT) | 3.184461 | 3 |
spiketools/tests/utils/test_trials.py | claire98han/SpikeTools | 1 | 6630231 | <gh_stars>1-10
"""Tests for spiketools.utils.trials"""
import numpy as np
from spiketools.utils.trials import *
###################################################################################################
###################################################################################################
def test_epoch_spikes_by_event():
spikes = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
events = np.array([5, 10, 15])
window = [-1, 1]
trials = epoch_spikes_by_event(spikes, events, window)
assert isinstance(trials, list)
assert isinstance(trials[0], np.ndarray)
assert len(trials) == len(events)
assert np.array_equal(trials[0], np.array([4.25, 5.5]) - events[0])
assert np.array_equal(trials[1], np.array([9.25, 9.75, 10.5]) - events[1])
assert np.array_equal(trials[2], np.array([14.1, 15.2, 15.9]) - events[2])
def test_epoch_spikes_by_range():
spikes = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
starts = np.array([5, 12])
stops = np.array([10, 15])
trials = epoch_spikes_by_range(spikes, starts, stops)
assert isinstance(trials, list)
assert isinstance(trials[0], np.ndarray)
assert len(trials) == len(starts)
assert np.array_equal(trials[0], np.array([5.5, 6.1, 8., 9.25, 9.75]))
assert np.array_equal(trials[1], np.array([12., 14.1]))
# Check with time reseting
trials = epoch_spikes_by_range(spikes, starts, stops, reset=True)
assert np.array_equal(trials[0], np.array([5.5, 6.1, 8., 9.25, 9.75]) - starts[0])
assert np.array_equal(trials[1], np.array([12., 14.1]) - starts[1])
def test_epoch_data_by_event():
times = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
events = np.array([5, 10, 15])
window = [-1, 1]
ttimes, tvalues = epoch_data_by_event(times, values, events, window)
assert isinstance(ttimes, list)
assert isinstance(tvalues, list)
assert isinstance(ttimes[0], np.ndarray)
assert isinstance(tvalues[0], np.ndarray)
assert len(ttimes) == len(tvalues) == len(events)
assert np.array_equal(ttimes[0], np.array([4.25, 5.5]) - events[0])
assert np.array_equal(tvalues[0], np.array([2, 3]))
assert np.array_equal(ttimes[1], np.array([9.25, 9.75, 10.5]) - events[1])
assert np.array_equal(tvalues[1], np.array([6, 7, 8]))
assert np.array_equal(ttimes[2], np.array([14.1, 15.2, 15.9]) - events[2])
assert np.array_equal(tvalues[2], np.array([10, 11, 12]))
def test_epoch_data_by_range():
times = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
starts = np.array([5, 12])
stops = np.array([10, 15])
ttimes, tvalues = epoch_data_by_range(times, values, starts, stops)
assert isinstance(ttimes, list)
assert isinstance(tvalues, list)
assert isinstance(ttimes[0], np.ndarray)
assert isinstance(tvalues[0], np.ndarray)
assert len(ttimes) == len(tvalues) == len(starts)
assert np.array_equal(ttimes[0], np.array([5.5, 6.1, 8., 9.25, 9.75]))
assert np.array_equal(tvalues[0], np.array([3, 4, 5, 6, 7]))
assert np.array_equal(ttimes[1], np.array([12.0, 14.1]))
assert np.array_equal(tvalues[1], np.array([9, 10]))
# Check with time reseting
ttimes, tvalues = epoch_data_by_range(times, values, starts, stops, reset=True)
assert np.array_equal(ttimes[0], np.array([5.5, 6.1, 8., 9.25, 9.75]) - starts[0])
assert np.array_equal(tvalues[0], np.array([3, 4, 5, 6, 7]))
assert np.array_equal(ttimes[1], np.array([12.0, 14.1]) - starts[1])
assert np.array_equal(tvalues[1], np.array([9, 10]))
| """Tests for spiketools.utils.trials"""
import numpy as np
from spiketools.utils.trials import *
###################################################################################################
###################################################################################################
def test_epoch_spikes_by_event():
spikes = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
events = np.array([5, 10, 15])
window = [-1, 1]
trials = epoch_spikes_by_event(spikes, events, window)
assert isinstance(trials, list)
assert isinstance(trials[0], np.ndarray)
assert len(trials) == len(events)
assert np.array_equal(trials[0], np.array([4.25, 5.5]) - events[0])
assert np.array_equal(trials[1], np.array([9.25, 9.75, 10.5]) - events[1])
assert np.array_equal(trials[2], np.array([14.1, 15.2, 15.9]) - events[2])
def test_epoch_spikes_by_range():
spikes = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
starts = np.array([5, 12])
stops = np.array([10, 15])
trials = epoch_spikes_by_range(spikes, starts, stops)
assert isinstance(trials, list)
assert isinstance(trials[0], np.ndarray)
assert len(trials) == len(starts)
assert np.array_equal(trials[0], np.array([5.5, 6.1, 8., 9.25, 9.75]))
assert np.array_equal(trials[1], np.array([12., 14.1]))
# Check with time reseting
trials = epoch_spikes_by_range(spikes, starts, stops, reset=True)
assert np.array_equal(trials[0], np.array([5.5, 6.1, 8., 9.25, 9.75]) - starts[0])
assert np.array_equal(trials[1], np.array([12., 14.1]) - starts[1])
def test_epoch_data_by_event():
times = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
events = np.array([5, 10, 15])
window = [-1, 1]
ttimes, tvalues = epoch_data_by_event(times, values, events, window)
assert isinstance(ttimes, list)
assert isinstance(tvalues, list)
assert isinstance(ttimes[0], np.ndarray)
assert isinstance(tvalues[0], np.ndarray)
assert len(ttimes) == len(tvalues) == len(events)
assert np.array_equal(ttimes[0], np.array([4.25, 5.5]) - events[0])
assert np.array_equal(tvalues[0], np.array([2, 3]))
assert np.array_equal(ttimes[1], np.array([9.25, 9.75, 10.5]) - events[1])
assert np.array_equal(tvalues[1], np.array([6, 7, 8]))
assert np.array_equal(ttimes[2], np.array([14.1, 15.2, 15.9]) - events[2])
assert np.array_equal(tvalues[2], np.array([10, 11, 12]))
def test_epoch_data_by_range():
times = np.array([2.5, 3.5, 4.25, 5.5, 6.1, 8., 9.25, 9.75, 10.5, 12., 14.1, 15.2, 15.9])
values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
starts = np.array([5, 12])
stops = np.array([10, 15])
ttimes, tvalues = epoch_data_by_range(times, values, starts, stops)
assert isinstance(ttimes, list)
assert isinstance(tvalues, list)
assert isinstance(ttimes[0], np.ndarray)
assert isinstance(tvalues[0], np.ndarray)
assert len(ttimes) == len(tvalues) == len(starts)
assert np.array_equal(ttimes[0], np.array([5.5, 6.1, 8., 9.25, 9.75]))
assert np.array_equal(tvalues[0], np.array([3, 4, 5, 6, 7]))
assert np.array_equal(ttimes[1], np.array([12.0, 14.1]))
assert np.array_equal(tvalues[1], np.array([9, 10]))
# Check with time reseting
ttimes, tvalues = epoch_data_by_range(times, values, starts, stops, reset=True)
assert np.array_equal(ttimes[0], np.array([5.5, 6.1, 8., 9.25, 9.75]) - starts[0])
assert np.array_equal(tvalues[0], np.array([3, 4, 5, 6, 7]))
assert np.array_equal(ttimes[1], np.array([12.0, 14.1]) - starts[1])
assert np.array_equal(tvalues[1], np.array([9, 10])) | de | 0.722249 | Tests for spiketools.utils.trials ################################################################################################### ################################################################################################### # Check with time reseting # Check with time reseting | 2.368327 | 2 |
api/v1/recipe_groups/serializers.py | RyanNoelk/OpenEats | 113 | 6630232 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from rest_framework import serializers
from .models import Cuisine, Course, Tag
class CuisineSerializer(serializers.ModelSerializer):
""" Standard `rest_framework` ModelSerializer """
total = serializers.IntegerField(read_only=True)
class Meta:
model = Cuisine
class CourseSerializer(serializers.ModelSerializer):
""" Standard `rest_framework` ModelSerializer """
total = serializers.IntegerField(read_only=True)
class Meta:
model = Course
class TagSerializer(serializers.ModelSerializer):
""" Standard `rest_framework` ModelSerializer """
class Meta:
model = Tag
fields = ('title',)
# TODO: I really don't get how to process many to many db fields with django rest,
# So, I'll just remove the validation on the title so that it will pass.
# Tags will only get created if a new recipe creates one.
extra_kwargs = {
'title': {'validators': []},
}
| #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from rest_framework import serializers
from .models import Cuisine, Course, Tag
class CuisineSerializer(serializers.ModelSerializer):
""" Standard `rest_framework` ModelSerializer """
total = serializers.IntegerField(read_only=True)
class Meta:
model = Cuisine
class CourseSerializer(serializers.ModelSerializer):
""" Standard `rest_framework` ModelSerializer """
total = serializers.IntegerField(read_only=True)
class Meta:
model = Course
class TagSerializer(serializers.ModelSerializer):
""" Standard `rest_framework` ModelSerializer """
class Meta:
model = Tag
fields = ('title',)
# TODO: I really don't get how to process many to many db fields with django rest,
# So, I'll just remove the validation on the title so that it will pass.
# Tags will only get created if a new recipe creates one.
extra_kwargs = {
'title': {'validators': []},
}
| en | 0.825695 | #!/usr/bin/env python # encoding: utf-8 Standard `rest_framework` ModelSerializer Standard `rest_framework` ModelSerializer Standard `rest_framework` ModelSerializer # TODO: I really don't get how to process many to many db fields with django rest, # So, I'll just remove the validation on the title so that it will pass. # Tags will only get created if a new recipe creates one. | 2.288697 | 2 |
trustscore.py | google/TrustScore | 145 | 6630233 | <filename>trustscore.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.neighbors import KDTree
from sklearn.neighbors import KNeighborsClassifier
class TrustScore:
"""
Trust Score: a measure of classifier uncertainty based on nearest neighbors.
"""
def __init__(self, k=10, alpha=0., filtering="none", min_dist=1e-12):
"""
k and alpha are the tuning parameters for the filtering,
filtering: method of filtering. option are "none", "density",
"uncertainty"
min_dist: some small number to mitigate possible division by 0.
"""
self.k = k
self.filtering = filtering
self.alpha = alpha
self.min_dist = min_dist
def filter_by_density(self, X):
"""Filter out points with low kNN density.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
original points of kNN density.
"""
kdtree = KDTree(X)
knn_radii = kdtree.query(X, k=self.k)[0][:, -1]
eps = np.percentile(knn_radii, (1 - self.alpha) * 100)
return X[np.where(knn_radii <= eps)[0], :]
def filter_by_uncertainty(self, X, y):
"""Filter out points with high label disagreement amongst its kNN neighbors.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
samples with highest disagreement amongst its k nearest neighbors.
"""
neigh = KNeighborsClassifier(n_neighbors=self.k)
neigh.fit(X, y)
confidence = neigh.predict_proba(X)
cutoff = np.percentile(confidence, self.alpha * 100)
unfiltered_idxs = np.where(confidence >= cutoff)[0]
return X[unfiltered_idxs, :], y[unfiltered_idxs]
def fit(self, X, y):
"""Initialize trust score precomputations with training data.
WARNING: assumes that the labels are 0-indexed (i.e.
0, 1,..., n_labels-1).
Args:
X: an array of sample points.
y: corresponding labels.
"""
self.n_labels = np.max(y) + 1
self.kdtrees = [None] * self.n_labels
if self.filtering == "uncertainty":
X_filtered, y_filtered = self.filter_by_uncertainty(X, y)
for label in xrange(self.n_labels):
if self.filtering == "none":
X_to_use = X[np.where(y == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "density":
X_to_use = self.filter_by_density(X[np.where(y == label)[0]])
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "uncertainty":
X_to_use = X_filtered[np.where(y_filtered == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
if len(X_to_use) == 0:
print(
"Filtered too much or missing examples from a label! Please lower "
"alpha or check data.")
def get_score(self, X, y_pred):
"""Compute the trust scores.
Given a set of points, determines the distance to each class.
Args:
X: an array of sample points.
y_pred: The predicted labels for these points.
Returns:
The trust score, which is ratio of distance to closest class that was not
the predicted class to the distance to the predicted class.
"""
d = np.tile(None, (X.shape[0], self.n_labels))
for label_idx in xrange(self.n_labels):
d[:, label_idx] = self.kdtrees[label_idx].query(X, k=2)[0][:, -1]
sorted_d = np.sort(d, axis=1)
d_to_pred = d[range(d.shape[0]), y_pred]
d_to_closest_not_pred = np.where(sorted_d[:, 0] != d_to_pred,
sorted_d[:, 0], sorted_d[:, 1])
return d_to_closest_not_pred / (d_to_pred + self.min_dist)
class KNNConfidence:
"""Baseline which uses disagreement to kNN classifier.
"""
def __init__(self, k=10):
self.k = k
def fit(self, X, y):
self.kdtree = KDTree(X)
self.y = y
def get_score(self, X, y_pred):
knn_idxs = self.kdtree.query(X, k=self.k)[1]
knn_outputs = self.y[knn_idxs]
return np.mean(
knn_outputs == np.transpose(np.tile(y_pred, (self.k, 1))), axis=1)
| <filename>trustscore.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.neighbors import KDTree
from sklearn.neighbors import KNeighborsClassifier
class TrustScore:
"""
Trust Score: a measure of classifier uncertainty based on nearest neighbors.
"""
def __init__(self, k=10, alpha=0., filtering="none", min_dist=1e-12):
"""
k and alpha are the tuning parameters for the filtering,
filtering: method of filtering. option are "none", "density",
"uncertainty"
min_dist: some small number to mitigate possible division by 0.
"""
self.k = k
self.filtering = filtering
self.alpha = alpha
self.min_dist = min_dist
def filter_by_density(self, X):
"""Filter out points with low kNN density.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
original points of kNN density.
"""
kdtree = KDTree(X)
knn_radii = kdtree.query(X, k=self.k)[0][:, -1]
eps = np.percentile(knn_radii, (1 - self.alpha) * 100)
return X[np.where(knn_radii <= eps)[0], :]
def filter_by_uncertainty(self, X, y):
"""Filter out points with high label disagreement amongst its kNN neighbors.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
samples with highest disagreement amongst its k nearest neighbors.
"""
neigh = KNeighborsClassifier(n_neighbors=self.k)
neigh.fit(X, y)
confidence = neigh.predict_proba(X)
cutoff = np.percentile(confidence, self.alpha * 100)
unfiltered_idxs = np.where(confidence >= cutoff)[0]
return X[unfiltered_idxs, :], y[unfiltered_idxs]
def fit(self, X, y):
"""Initialize trust score precomputations with training data.
WARNING: assumes that the labels are 0-indexed (i.e.
0, 1,..., n_labels-1).
Args:
X: an array of sample points.
y: corresponding labels.
"""
self.n_labels = np.max(y) + 1
self.kdtrees = [None] * self.n_labels
if self.filtering == "uncertainty":
X_filtered, y_filtered = self.filter_by_uncertainty(X, y)
for label in xrange(self.n_labels):
if self.filtering == "none":
X_to_use = X[np.where(y == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "density":
X_to_use = self.filter_by_density(X[np.where(y == label)[0]])
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "uncertainty":
X_to_use = X_filtered[np.where(y_filtered == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
if len(X_to_use) == 0:
print(
"Filtered too much or missing examples from a label! Please lower "
"alpha or check data.")
def get_score(self, X, y_pred):
"""Compute the trust scores.
Given a set of points, determines the distance to each class.
Args:
X: an array of sample points.
y_pred: The predicted labels for these points.
Returns:
The trust score, which is ratio of distance to closest class that was not
the predicted class to the distance to the predicted class.
"""
d = np.tile(None, (X.shape[0], self.n_labels))
for label_idx in xrange(self.n_labels):
d[:, label_idx] = self.kdtrees[label_idx].query(X, k=2)[0][:, -1]
sorted_d = np.sort(d, axis=1)
d_to_pred = d[range(d.shape[0]), y_pred]
d_to_closest_not_pred = np.where(sorted_d[:, 0] != d_to_pred,
sorted_d[:, 0], sorted_d[:, 1])
return d_to_closest_not_pred / (d_to_pred + self.min_dist)
class KNNConfidence:
"""Baseline which uses disagreement to kNN classifier.
"""
def __init__(self, k=10):
self.k = k
def fit(self, X, y):
self.kdtree = KDTree(X)
self.y = y
def get_score(self, X, y_pred):
knn_idxs = self.kdtree.query(X, k=self.k)[1]
knn_outputs = self.y[knn_idxs]
return np.mean(
knn_outputs == np.transpose(np.tile(y_pred, (self.k, 1))), axis=1)
| en | 0.884246 | # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Trust Score: a measure of classifier uncertainty based on nearest neighbors. k and alpha are the tuning parameters for the filtering, filtering: method of filtering. option are "none", "density", "uncertainty" min_dist: some small number to mitigate possible division by 0. Filter out points with low kNN density. Args: X: an array of sample points. Returns: A subset of the array without points in the bottom alpha-fraction of original points of kNN density. Filter out points with high label disagreement amongst its kNN neighbors. Args: X: an array of sample points. Returns: A subset of the array without points in the bottom alpha-fraction of samples with highest disagreement amongst its k nearest neighbors. Initialize trust score precomputations with training data. WARNING: assumes that the labels are 0-indexed (i.e. 0, 1,..., n_labels-1). Args: X: an array of sample points. y: corresponding labels. Compute the trust scores. Given a set of points, determines the distance to each class. Args: X: an array of sample points. y_pred: The predicted labels for these points. Returns: The trust score, which is ratio of distance to closest class that was not the predicted class to the distance to the predicted class. Baseline which uses disagreement to kNN classifier. | 2.820567 | 3 |
apps/authapp.py | sparsh-ai/reco-front | 0 | 6630234 | import dash
import dash_auth
import dash_core_components as dcc
import dash_html_components as html
import plotly
# Keep this out of source code repository - save in a file or a database
VALID_USERNAME_PASSWORD_PAIRS = {
'hello': '<PASSWORD>'
}
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
app.layout = html.Div([
html.H1('Welcome to the app'),
html.H3('You are successfully authorized'),
dcc.Dropdown(
id='dropdown',
options=[{'label': i, 'value': i} for i in ['A', 'B']],
value='A'
),
dcc.Graph(id='graph')
], className='container')
@app.callback(
dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('dropdown', 'value')])
def update_graph(dropdown_value):
return {
'layout': {
'title': 'Graph of {}'.format(dropdown_value),
'margin': {
'l': 20,
'b': 20,
'r': 10,
't': 60
}
},
'data': [{'x': [1, 2, 3], 'y': [4, 1, 2]}]
}
if __name__ == '__main__':
app.run_server(debug=True) | import dash
import dash_auth
import dash_core_components as dcc
import dash_html_components as html
import plotly
# Keep this out of source code repository - save in a file or a database
VALID_USERNAME_PASSWORD_PAIRS = {
'hello': '<PASSWORD>'
}
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
app.layout = html.Div([
html.H1('Welcome to the app'),
html.H3('You are successfully authorized'),
dcc.Dropdown(
id='dropdown',
options=[{'label': i, 'value': i} for i in ['A', 'B']],
value='A'
),
dcc.Graph(id='graph')
], className='container')
@app.callback(
dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('dropdown', 'value')])
def update_graph(dropdown_value):
return {
'layout': {
'title': 'Graph of {}'.format(dropdown_value),
'margin': {
'l': 20,
'b': 20,
'r': 10,
't': 60
}
},
'data': [{'x': [1, 2, 3], 'y': [4, 1, 2]}]
}
if __name__ == '__main__':
app.run_server(debug=True) | en | 0.723296 | # Keep this out of source code repository - save in a file or a database | 2.603049 | 3 |
setup.py | prosehair/django-fsm-admin | 161 | 6630235 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
import fsm_admin
readme = open("README.rst").read()
setup(
name="django-fsm-admin",
version=fsm_admin.__version__,
author=fsm_admin.__author__,
description="Integrate django-fsm state transitions into the django admin",
long_description=readme,
long_description_content_type="text/x-rst",
author_email="<EMAIL>",
url="https://github.com/gadventures/django-fsm-admin",
packages=find_packages(),
include_package_data=True,
install_requires=[
"Django>=1.6",
"django-fsm>=2.1.0",
],
keywords="django fsm admin",
license="MIT",
platforms=["any"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
]
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
import fsm_admin
readme = open("README.rst").read()
setup(
name="django-fsm-admin",
version=fsm_admin.__version__,
author=fsm_admin.__author__,
description="Integrate django-fsm state transitions into the django admin",
long_description=readme,
long_description_content_type="text/x-rst",
author_email="<EMAIL>",
url="https://github.com/gadventures/django-fsm-admin",
packages=find_packages(),
include_package_data=True,
install_requires=[
"Django>=1.6",
"django-fsm>=2.1.0",
],
keywords="django fsm admin",
license="MIT",
platforms=["any"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
]
)
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.123838 | 1 |
LC/26.py | szhu3210/LeetCode_Solutions | 2 | 6630236 | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums[:] = sorted(list(set(nums)))
return len(set(nums)) | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums[:] = sorted(list(set(nums)))
return len(set(nums)) | en | 0.231312 | :type nums: List[int] :rtype: int | 3.169306 | 3 |
nginx/Controller/State.py | NTGDeveloper/kasa-camera | 13 | 6630237 | from collections import namedtuple
class State:
def bind_to(self, callback):
self._observers.append(callback)
def notifyObservers(self, propertyName):
for callback in self._observers:
callback(Event(changedProperty=propertyName, state=self))
@property
def isCameraEnabled(self):
return self._isCameraEnabled
@isCameraEnabled.setter
def isCameraEnabled(self, value):
oldValue = self._isCameraEnabled
self._isCameraEnabled = value
if(oldValue != value):
self.notifyObservers('isCameraEnabled')
def __init__(self):
self._isCameraEnabled = False
self._observers = []
global Event
Event = namedtuple('Event', 'changedProperty state')
| from collections import namedtuple
class State:
def bind_to(self, callback):
self._observers.append(callback)
def notifyObservers(self, propertyName):
for callback in self._observers:
callback(Event(changedProperty=propertyName, state=self))
@property
def isCameraEnabled(self):
return self._isCameraEnabled
@isCameraEnabled.setter
def isCameraEnabled(self, value):
oldValue = self._isCameraEnabled
self._isCameraEnabled = value
if(oldValue != value):
self.notifyObservers('isCameraEnabled')
def __init__(self):
self._isCameraEnabled = False
self._observers = []
global Event
Event = namedtuple('Event', 'changedProperty state')
| none | 1 | 2.856378 | 3 |
|
utils/cli_utils.py | yangdangfu/predictor_selection | 0 | 6630238 | <reponame>yangdangfu/predictor_selection
# -*- coding: utf-8 -*-
"""
Some utility functions about the CLI (Command Line Interface) usages
"""
def arguments_check(keys: list, required_keys: list):
for k in keys:
if k not in required_keys:
raise KeyError(f"Unsupported argument {k}")
for sk in required_keys:
if sk not in keys:
raise KeyError(f"Missing argument {sk}") | # -*- coding: utf-8 -*-
"""
Some utility functions about the CLI (Command Line Interface) usages
"""
def arguments_check(keys: list, required_keys: list):
for k in keys:
if k not in required_keys:
raise KeyError(f"Unsupported argument {k}")
for sk in required_keys:
if sk not in keys:
raise KeyError(f"Missing argument {sk}") | en | 0.851395 | # -*- coding: utf-8 -*- Some utility functions about the CLI (Command Line Interface) usages | 3.054194 | 3 |
bigquery/google/cloud/bigquery/dbapi/cursor.py | javisantana/google-cloud-python | 0 | 6630239 | <gh_stars>0
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cursor for the Google BigQuery DB-API."""
import collections
import six
from google.cloud.bigquery import job
from google.cloud.bigquery.dbapi import _helpers
from google.cloud.bigquery.dbapi import exceptions
import google.cloud.exceptions
# Per PEP 249: A 7-item sequence containing information describing one result
# column. The first two items (name and type_code) are mandatory, the other
# five are optional and are set to None if no meaningful values can be
# provided.
Column = collections.namedtuple(
'Column',
[
'name', 'type_code', 'display_size', 'internal_size', 'precision',
'scale', 'null_ok',
])
class Cursor(object):
"""DB-API Cursor to Google BigQuery.
:type connection: :class:`~google.cloud.bigquery.dbapi.Connection`
:param connection: A DB-API connection to Google BigQuery.
"""
def __init__(self, connection):
self.connection = connection
self.description = None
# Per PEP 249: The attribute is -1 in case no .execute*() has been
# performed on the cursor or the rowcount of the last operation
# cannot be determined by the interface.
self.rowcount = -1
# Per PEP 249: The arraysize attribute defaults to 1, meaning to fetch
# a single row at a time.
self.arraysize = 1
self._query_data = None
self._query_job = None
def close(self):
"""No-op."""
def _set_description(self, schema):
"""Set description from schema.
:type schema: Sequence[google.cloud.bigquery.schema.SchemaField]
:param schema: A description of fields in the schema.
"""
if schema is None:
self.description = None
return
self.description = tuple([
Column(
name=field.name,
type_code=field.field_type,
display_size=None,
internal_size=None,
precision=None,
scale=None,
null_ok=field.is_nullable)
for field in schema])
def _set_rowcount(self, query_results):
"""Set the rowcount from query results.
Normally, this sets rowcount to the number of rows returned by the
query, but if it was a DML statement, it sets rowcount to the number
of modified rows.
:type query_results:
:class:`~google.cloud.bigquery.query._QueryResults`
:param query_results: results of a query
"""
total_rows = 0
num_dml_affected_rows = query_results.num_dml_affected_rows
if (query_results.total_rows is not None
and query_results.total_rows > 0):
total_rows = query_results.total_rows
if num_dml_affected_rows is not None and num_dml_affected_rows > 0:
total_rows = num_dml_affected_rows
self.rowcount = total_rows
def execute(self, operation, parameters=None, job_id=None):
"""Prepare and execute a database operation.
.. note::
When setting query parameters, values which are "text"
(``unicode`` in Python2, ``str`` in Python3) will use
the 'STRING' BigQuery type. Values which are "bytes" (``str`` in
Python2, ``bytes`` in Python3), will use using the 'BYTES' type.
A `~datetime.datetime` parameter without timezone information uses
the 'DATETIME' BigQuery type (example: Global Pi Day Celebration
March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with
timezone information uses the 'TIMESTAMP' BigQuery type (example:
a wedding on April 29, 2011 at 11am, British Summer Time).
For more information about BigQuery data types, see:
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not
yet supported. See:
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters:
(Optional) dictionary or sequence of parameter values.
:type job_id: str
:param job_id: (Optional) The job_id to use. If not set, a job ID
is generated at random.
"""
self._query_data = None
self._query_job = None
client = self.connection._client
# The DB-API uses the pyformat formatting, since the way BigQuery does
# query parameters was not one of the standard options. Convert both
# the query and the parameters to the format expected by the client
# libraries.
formatted_operation = _format_operation(
operation, parameters=parameters)
query_parameters = _helpers.to_query_parameters(parameters)
config = job.QueryJobConfig()
config.query_parameters = query_parameters
config.use_legacy_sql = False
self._query_job = client.query(
formatted_operation, job_config=config, job_id=job_id)
# Wait for the query to finish.
try:
self._query_job.result()
except google.cloud.exceptions.GoogleCloudError:
raise exceptions.DatabaseError(self._query_job.errors)
query_results = self._query_job._query_results
self._set_rowcount(query_results)
self._set_description(query_results.schema)
def executemany(self, operation, seq_of_parameters):
"""Prepare and execute a database operation multiple times.
:type operation: str
:param operation: A Google BigQuery query string.
:type seq_of_parameters: Sequence[Mapping[str, Any] or Sequence[Any]]
:param parameters: Sequence of many sets of parameter values.
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def _try_fetch(self, size=None):
"""Try to start fetching data, if not yet started.
Mutates self to indicate that iteration has started.
"""
if self._query_job is None:
raise exceptions.InterfaceError(
'No query results: execute() must be called before fetch.')
is_dml = (
self._query_job.statement_type
and self._query_job.statement_type.upper() != 'SELECT')
if is_dml:
self._query_data = iter([])
return
if self._query_data is None:
client = self.connection._client
# TODO(tswast): pass in page size to list_rows based on arraysize
rows_iter = client.list_rows(
self._query_job.destination,
selected_fields=self._query_job._query_results.schema)
self._query_data = iter(rows_iter)
def fetchone(self):
"""Fetch a single row from the results of the last ``execute*()`` call.
:rtype: tuple
:returns:
A tuple representing a row or ``None`` if no more data is
available.
:raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError`
if called before ``execute()``.
"""
self._try_fetch()
try:
return six.next(self._query_data)
except StopIteration:
return None
def fetchmany(self, size=None):
"""Fetch multiple results from the last ``execute*()`` call.
.. note::
The size parameter is not used for the request/response size.
Set the ``arraysize`` attribute before calling ``execute()`` to
set the batch size.
:type size: int
:param size:
(Optional) Maximum number of rows to return. Defaults to the
``arraysize`` property value.
:rtype: List[tuple]
:returns: A list of rows.
:raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError`
if called before ``execute()``.
"""
if size is None:
size = self.arraysize
self._try_fetch(size=size)
rows = []
for row in self._query_data:
rows.append(row)
if len(rows) >= size:
break
return rows
def fetchall(self):
"""Fetch all remaining results from the last ``execute*()`` call.
:rtype: List[tuple]
:returns: A list of all the rows in the results.
:raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError`
if called before ``execute()``.
"""
self._try_fetch()
return list(self._query_data)
def setinputsizes(self, sizes):
"""No-op."""
def setoutputsize(self, size, column=None):
"""No-op."""
def _format_operation_list(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %s`` and the output
will be a query like ``SELECT ?``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Sequence[Any]
:param parameters: Sequence of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = ['?' for _ in parameters]
try:
return operation % tuple(formatted_params)
except TypeError as exc:
raise exceptions.ProgrammingError(exc)
def _format_operation_dict(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %(namedparam)s`` and
the output will be a query like ``SELECT @namedparam``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Mapping[str, Any]
:param parameters: Dictionary of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = {}
for name in parameters:
escaped_name = name.replace('`', r'\`')
formatted_params[name] = '@`{}`'.format(escaped_name)
try:
return operation % formatted_params
except KeyError as exc:
raise exceptions.ProgrammingError(exc)
def _format_operation(operation, parameters=None):
"""Formats parameters in operation in way BigQuery expects.
:type: str
:param operation: A Google BigQuery query string.
:type: Mapping[str, Any] or Sequence[Any]
:param parameters: Optional parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
if parameters is None:
return operation
if isinstance(parameters, collections.Mapping):
return _format_operation_dict(operation, parameters)
return _format_operation_list(operation, parameters)
| # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cursor for the Google BigQuery DB-API."""
import collections
import six
from google.cloud.bigquery import job
from google.cloud.bigquery.dbapi import _helpers
from google.cloud.bigquery.dbapi import exceptions
import google.cloud.exceptions
# Per PEP 249: A 7-item sequence containing information describing one result
# column. The first two items (name and type_code) are mandatory, the other
# five are optional and are set to None if no meaningful values can be
# provided.
Column = collections.namedtuple(
'Column',
[
'name', 'type_code', 'display_size', 'internal_size', 'precision',
'scale', 'null_ok',
])
class Cursor(object):
"""DB-API Cursor to Google BigQuery.
:type connection: :class:`~google.cloud.bigquery.dbapi.Connection`
:param connection: A DB-API connection to Google BigQuery.
"""
def __init__(self, connection):
self.connection = connection
self.description = None
# Per PEP 249: The attribute is -1 in case no .execute*() has been
# performed on the cursor or the rowcount of the last operation
# cannot be determined by the interface.
self.rowcount = -1
# Per PEP 249: The arraysize attribute defaults to 1, meaning to fetch
# a single row at a time.
self.arraysize = 1
self._query_data = None
self._query_job = None
def close(self):
"""No-op."""
def _set_description(self, schema):
"""Set description from schema.
:type schema: Sequence[google.cloud.bigquery.schema.SchemaField]
:param schema: A description of fields in the schema.
"""
if schema is None:
self.description = None
return
self.description = tuple([
Column(
name=field.name,
type_code=field.field_type,
display_size=None,
internal_size=None,
precision=None,
scale=None,
null_ok=field.is_nullable)
for field in schema])
def _set_rowcount(self, query_results):
"""Set the rowcount from query results.
Normally, this sets rowcount to the number of rows returned by the
query, but if it was a DML statement, it sets rowcount to the number
of modified rows.
:type query_results:
:class:`~google.cloud.bigquery.query._QueryResults`
:param query_results: results of a query
"""
total_rows = 0
num_dml_affected_rows = query_results.num_dml_affected_rows
if (query_results.total_rows is not None
and query_results.total_rows > 0):
total_rows = query_results.total_rows
if num_dml_affected_rows is not None and num_dml_affected_rows > 0:
total_rows = num_dml_affected_rows
self.rowcount = total_rows
def execute(self, operation, parameters=None, job_id=None):
"""Prepare and execute a database operation.
.. note::
When setting query parameters, values which are "text"
(``unicode`` in Python2, ``str`` in Python3) will use
the 'STRING' BigQuery type. Values which are "bytes" (``str`` in
Python2, ``bytes`` in Python3), will use using the 'BYTES' type.
A `~datetime.datetime` parameter without timezone information uses
the 'DATETIME' BigQuery type (example: Global Pi Day Celebration
March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with
timezone information uses the 'TIMESTAMP' BigQuery type (example:
a wedding on April 29, 2011 at 11am, British Summer Time).
For more information about BigQuery data types, see:
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not
yet supported. See:
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters:
(Optional) dictionary or sequence of parameter values.
:type job_id: str
:param job_id: (Optional) The job_id to use. If not set, a job ID
is generated at random.
"""
self._query_data = None
self._query_job = None
client = self.connection._client
# The DB-API uses the pyformat formatting, since the way BigQuery does
# query parameters was not one of the standard options. Convert both
# the query and the parameters to the format expected by the client
# libraries.
formatted_operation = _format_operation(
operation, parameters=parameters)
query_parameters = _helpers.to_query_parameters(parameters)
config = job.QueryJobConfig()
config.query_parameters = query_parameters
config.use_legacy_sql = False
self._query_job = client.query(
formatted_operation, job_config=config, job_id=job_id)
# Wait for the query to finish.
try:
self._query_job.result()
except google.cloud.exceptions.GoogleCloudError:
raise exceptions.DatabaseError(self._query_job.errors)
query_results = self._query_job._query_results
self._set_rowcount(query_results)
self._set_description(query_results.schema)
def executemany(self, operation, seq_of_parameters):
"""Prepare and execute a database operation multiple times.
:type operation: str
:param operation: A Google BigQuery query string.
:type seq_of_parameters: Sequence[Mapping[str, Any] or Sequence[Any]]
:param parameters: Sequence of many sets of parameter values.
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def _try_fetch(self, size=None):
"""Try to start fetching data, if not yet started.
Mutates self to indicate that iteration has started.
"""
if self._query_job is None:
raise exceptions.InterfaceError(
'No query results: execute() must be called before fetch.')
is_dml = (
self._query_job.statement_type
and self._query_job.statement_type.upper() != 'SELECT')
if is_dml:
self._query_data = iter([])
return
if self._query_data is None:
client = self.connection._client
# TODO(tswast): pass in page size to list_rows based on arraysize
rows_iter = client.list_rows(
self._query_job.destination,
selected_fields=self._query_job._query_results.schema)
self._query_data = iter(rows_iter)
def fetchone(self):
"""Fetch a single row from the results of the last ``execute*()`` call.
:rtype: tuple
:returns:
A tuple representing a row or ``None`` if no more data is
available.
:raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError`
if called before ``execute()``.
"""
self._try_fetch()
try:
return six.next(self._query_data)
except StopIteration:
return None
def fetchmany(self, size=None):
"""Fetch multiple results from the last ``execute*()`` call.
.. note::
The size parameter is not used for the request/response size.
Set the ``arraysize`` attribute before calling ``execute()`` to
set the batch size.
:type size: int
:param size:
(Optional) Maximum number of rows to return. Defaults to the
``arraysize`` property value.
:rtype: List[tuple]
:returns: A list of rows.
:raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError`
if called before ``execute()``.
"""
if size is None:
size = self.arraysize
self._try_fetch(size=size)
rows = []
for row in self._query_data:
rows.append(row)
if len(rows) >= size:
break
return rows
def fetchall(self):
"""Fetch all remaining results from the last ``execute*()`` call.
:rtype: List[tuple]
:returns: A list of all the rows in the results.
:raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError`
if called before ``execute()``.
"""
self._try_fetch()
return list(self._query_data)
def setinputsizes(self, sizes):
"""No-op."""
def setoutputsize(self, size, column=None):
"""No-op."""
def _format_operation_list(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %s`` and the output
will be a query like ``SELECT ?``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Sequence[Any]
:param parameters: Sequence of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = ['?' for _ in parameters]
try:
return operation % tuple(formatted_params)
except TypeError as exc:
raise exceptions.ProgrammingError(exc)
def _format_operation_dict(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %(namedparam)s`` and
the output will be a query like ``SELECT @namedparam``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Mapping[str, Any]
:param parameters: Dictionary of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = {}
for name in parameters:
escaped_name = name.replace('`', r'\`')
formatted_params[name] = '@`{}`'.format(escaped_name)
try:
return operation % formatted_params
except KeyError as exc:
raise exceptions.ProgrammingError(exc)
def _format_operation(operation, parameters=None):
"""Formats parameters in operation in way BigQuery expects.
:type: str
:param operation: A Google BigQuery query string.
:type: Mapping[str, Any] or Sequence[Any]
:param parameters: Optional parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
if parameters is None:
return operation
if isinstance(parameters, collections.Mapping):
return _format_operation_dict(operation, parameters)
return _format_operation_list(operation, parameters) | en | 0.65423 | # Copyright 2017 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Cursor for the Google BigQuery DB-API. # Per PEP 249: A 7-item sequence containing information describing one result # column. The first two items (name and type_code) are mandatory, the other # five are optional and are set to None if no meaningful values can be # provided. DB-API Cursor to Google BigQuery. :type connection: :class:`~google.cloud.bigquery.dbapi.Connection` :param connection: A DB-API connection to Google BigQuery. # Per PEP 249: The attribute is -1 in case no .execute*() has been # performed on the cursor or the rowcount of the last operation # cannot be determined by the interface. # Per PEP 249: The arraysize attribute defaults to 1, meaning to fetch # a single row at a time. No-op. Set description from schema. :type schema: Sequence[google.cloud.bigquery.schema.SchemaField] :param schema: A description of fields in the schema. Set the rowcount from query results. Normally, this sets rowcount to the number of rows returned by the query, but if it was a DML statement, it sets rowcount to the number of modified rows. :type query_results: :class:`~google.cloud.bigquery.query._QueryResults` :param query_results: results of a query Prepare and execute a database operation. .. note:: When setting query parameters, values which are "text" (``unicode`` in Python2, ``str`` in Python3) will use the 'STRING' BigQuery type. Values which are "bytes" (``str`` in Python2, ``bytes`` in Python3), will use using the 'BYTES' type. A `~datetime.datetime` parameter without timezone information uses the 'DATETIME' BigQuery type (example: Global Pi Day Celebration March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with timezone information uses the 'TIMESTAMP' BigQuery type (example: a wedding on April 29, 2011 at 11am, British Summer Time). For more information about BigQuery data types, see: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types ``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not yet supported. See: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524 :type operation: str :param operation: A Google BigQuery query string. :type parameters: Mapping[str, Any] or Sequence[Any] :param parameters: (Optional) dictionary or sequence of parameter values. :type job_id: str :param job_id: (Optional) The job_id to use. If not set, a job ID is generated at random. # The DB-API uses the pyformat formatting, since the way BigQuery does # query parameters was not one of the standard options. Convert both # the query and the parameters to the format expected by the client # libraries. # Wait for the query to finish. Prepare and execute a database operation multiple times. :type operation: str :param operation: A Google BigQuery query string. :type seq_of_parameters: Sequence[Mapping[str, Any] or Sequence[Any]] :param parameters: Sequence of many sets of parameter values. Try to start fetching data, if not yet started. Mutates self to indicate that iteration has started. # TODO(tswast): pass in page size to list_rows based on arraysize Fetch a single row from the results of the last ``execute*()`` call. :rtype: tuple :returns: A tuple representing a row or ``None`` if no more data is available. :raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError` if called before ``execute()``. Fetch multiple results from the last ``execute*()`` call. .. note:: The size parameter is not used for the request/response size. Set the ``arraysize`` attribute before calling ``execute()`` to set the batch size. :type size: int :param size: (Optional) Maximum number of rows to return. Defaults to the ``arraysize`` property value. :rtype: List[tuple] :returns: A list of rows. :raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError` if called before ``execute()``. Fetch all remaining results from the last ``execute*()`` call. :rtype: List[tuple] :returns: A list of all the rows in the results. :raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError` if called before ``execute()``. No-op. No-op. Formats parameters in operation in the way BigQuery expects. The input operation will be a query like ``SELECT %s`` and the output will be a query like ``SELECT ?``. :type operation: str :param operation: A Google BigQuery query string. :type parameters: Sequence[Any] :param parameters: Sequence of parameter values. :rtype: str :returns: A formatted query string. :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` if a parameter used in the operation is not found in the ``parameters`` argument. Formats parameters in operation in the way BigQuery expects. The input operation will be a query like ``SELECT %(namedparam)s`` and the output will be a query like ``SELECT @namedparam``. :type operation: str :param operation: A Google BigQuery query string. :type parameters: Mapping[str, Any] :param parameters: Dictionary of parameter values. :rtype: str :returns: A formatted query string. :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` if a parameter used in the operation is not found in the ``parameters`` argument. Formats parameters in operation in way BigQuery expects. :type: str :param operation: A Google BigQuery query string. :type: Mapping[str, Any] or Sequence[Any] :param parameters: Optional parameter values. :rtype: str :returns: A formatted query string. :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` if a parameter used in the operation is not found in the ``parameters`` argument. | 2.301853 | 2 |
src/controller/game_controller.py | frostygum/TicTacToe | 0 | 6630240 | <gh_stars>0
#! Import required python built-in modules
from functools import partial
#! Import required self-made modules
from src.model.board import Board
class GameController():
"""
This class is used as StartView Controller
"""
def __init__(self, app):
"""Function to initiate game window settings"""
self.app = app
self.app.gameView.createTicTacToeGrid()
self.connectSignals()
def connectSignals(self):
"""Function to give the button ability"""
for location, button in self.app.gameView.buttons.items():
button.clicked.connect(partial(self.handleBtnClicked, location))
def handleBtnClicked(self, location):
"""Button Ability when clicked"""
playerSymbol = self.app.board.player[self.app.role]
self.app.board.updateLocation(playerSymbol, location)
self.app.gameView.updateButton(playerSymbol, location, False)
self.app.board.incrementMove()
self.app.board.switchTurn()
if self.app.role == 'host':
self.app.serverThread.sendState()
else:
self.app.clientThread.sendState()
if self.checkGameWinner() == False:
self.checkRightTurn()
print(self.app.board.toJSON())
def handleReceiveUpdate(self, gameBoard):
"""Function to handle when received update from opponent"""
opponentSymbol = self.app.board.findOponent(self.app.board.player[self.app.role])
for location, symbol in gameBoard['gamestate'].items():
if symbol == opponentSymbol:
self.app.board.updateLocation(symbol, location)
self.app.gameView.updateButton(symbol, location, False)
if self.app.board.player != gameBoard['player']:
self.app.board.player = gameBoard['player']
self.app.gameView.title.setText('Hello {}, You are [{}]'.format(self.app.role.upper(), self.app.board.player[self.app.role]))
self.app.board.turn = gameBoard['turn']
self.app.board.move = gameBoard['move']
print(self.app.board.toJSON())
if self.checkGameWinner() == False:
self.checkRightTurn()
def checkRightTurn(self):
"""Function to disable all button if current turn is no apropiate"""
#! Set enable or disable to all button with given settings
for xPos in range(0, 3):
for yPos in range(0, 3):
location = '{}{}'.format(xPos, yPos)
#! Disable button when current is not my turn
if self.app.board.turn != self.app.role:
self.app.gameView.subTitle.setText('Waiting for opponent')
self.app.gameView.buttons[location].setEnabled(False)
else:
#! Enable only empty button when current is my turn, disable the other
self.app.gameView.subTitle.setText('It\'s your turn')
if self.app.board.gameState[location] == None:
self.app.gameView.buttons[location].setEnabled(True)
else:
self.app.gameView.buttons[location].setEnabled(False)
def checkGameWinner(self):
"""Function to check if current state has a winner"""
winner = self.checkGameRules()
if winner == 0:
self.app.showDialog('Game Over, Player {} Wins!'.format(self.app.board.findOponent(self.app.board.player[self.app.board.turn])), 'Game Over', self.gameOver)
if self.app.round % 2 == 1:
if self.app.board.findOponent(self.app.board.player[self.app.board.turn]) == 'X':
self.app.endView.createListWidgetItem('Round {}: Player Host Win'.format(self.app.round))
self.app.hostCount += 1
else:
self.app.endView.createListWidgetItem('Round {}: Player Client Win'.format(self.app.round))
self.app.clientCount += 1
else:
if self.app.board.findOponent(self.app.board.player[self.app.board.turn]) == 'O':
self.app.endView.createListWidgetItem('Round {}: Player Host Win'.format(self.app.round))
self.app.hostCount += 1
else:
self.app.endView.createListWidgetItem('Round {}: Player Client Win'.format(self.app.round))
self.app.clientCount += 1
self.app.endView.countHost.setText('{} Win'.format(self.app.hostCount))
self.app.endView.countClient.setText('{} Win'.format(self.app.clientCount))
self.app.round += 1
return True
elif winner == 2:
self.app.showDialog('Game Over, It\'s a Tie!', 'Game Over', self.gameOver)
self.app.endView.createListWidgetItem('Round {} : Tie'.format(self.app.round))
self.app.round += 1
return True
return False
def checkGameRules(self):
"""Function to check winner rules condition, 0: win, 1: no winner, 2: tie"""
state = self.app.board.gameState
condition = 1
#! Why >= 5 ? Not possible have winner if 5 move have not reached
if self.app.board.move >= 5:
if state['00'] == state['01'] == state['02'] != None:
condition = 0
elif state['10'] == state['11'] == state['12'] != None:
condition = 0
elif state['20'] == state['21'] == state['22'] != None:
condition = 0
elif state['00'] == state['10'] == state['20'] != None:
condition = 0
elif state['01'] == state['11'] == state['21'] != None:
condition = 0
elif state['02'] == state['12'] == state['22'] != None:
condition = 0
elif state['00'] == state['11'] == state['22'] != None:
condition = 0
elif state['02'] == state['11'] == state['20'] != None:
condition = 0
#! Why 9 ? 9 move means all boxes has already filled with symbols but no one wins
if self.app.board.move == 9 and condition == 1:
condition = 2
return condition
def toggleButtons(self, enable):
"""Function to handle when received update from opponent"""
#! Set enable or disable to all button with given settings
for xPos in range(0, self.app.board.size):
for yPos in range(0, self.app.board.size):
location = '{}{}'.format(xPos, yPos)
self.app.gameView.buttons[location].setEnabled(enable)
def gameOver(self):
"""Function to handle when the game is over"""
#! Open End View
self.app.changeWindow('end') | #! Import required python built-in modules
from functools import partial
#! Import required self-made modules
from src.model.board import Board
class GameController():
"""
This class is used as StartView Controller
"""
def __init__(self, app):
"""Function to initiate game window settings"""
self.app = app
self.app.gameView.createTicTacToeGrid()
self.connectSignals()
def connectSignals(self):
"""Function to give the button ability"""
for location, button in self.app.gameView.buttons.items():
button.clicked.connect(partial(self.handleBtnClicked, location))
def handleBtnClicked(self, location):
"""Button Ability when clicked"""
playerSymbol = self.app.board.player[self.app.role]
self.app.board.updateLocation(playerSymbol, location)
self.app.gameView.updateButton(playerSymbol, location, False)
self.app.board.incrementMove()
self.app.board.switchTurn()
if self.app.role == 'host':
self.app.serverThread.sendState()
else:
self.app.clientThread.sendState()
if self.checkGameWinner() == False:
self.checkRightTurn()
print(self.app.board.toJSON())
def handleReceiveUpdate(self, gameBoard):
"""Function to handle when received update from opponent"""
opponentSymbol = self.app.board.findOponent(self.app.board.player[self.app.role])
for location, symbol in gameBoard['gamestate'].items():
if symbol == opponentSymbol:
self.app.board.updateLocation(symbol, location)
self.app.gameView.updateButton(symbol, location, False)
if self.app.board.player != gameBoard['player']:
self.app.board.player = gameBoard['player']
self.app.gameView.title.setText('Hello {}, You are [{}]'.format(self.app.role.upper(), self.app.board.player[self.app.role]))
self.app.board.turn = gameBoard['turn']
self.app.board.move = gameBoard['move']
print(self.app.board.toJSON())
if self.checkGameWinner() == False:
self.checkRightTurn()
def checkRightTurn(self):
"""Function to disable all button if current turn is no apropiate"""
#! Set enable or disable to all button with given settings
for xPos in range(0, 3):
for yPos in range(0, 3):
location = '{}{}'.format(xPos, yPos)
#! Disable button when current is not my turn
if self.app.board.turn != self.app.role:
self.app.gameView.subTitle.setText('Waiting for opponent')
self.app.gameView.buttons[location].setEnabled(False)
else:
#! Enable only empty button when current is my turn, disable the other
self.app.gameView.subTitle.setText('It\'s your turn')
if self.app.board.gameState[location] == None:
self.app.gameView.buttons[location].setEnabled(True)
else:
self.app.gameView.buttons[location].setEnabled(False)
def checkGameWinner(self):
"""Function to check if current state has a winner"""
winner = self.checkGameRules()
if winner == 0:
self.app.showDialog('Game Over, Player {} Wins!'.format(self.app.board.findOponent(self.app.board.player[self.app.board.turn])), 'Game Over', self.gameOver)
if self.app.round % 2 == 1:
if self.app.board.findOponent(self.app.board.player[self.app.board.turn]) == 'X':
self.app.endView.createListWidgetItem('Round {}: Player Host Win'.format(self.app.round))
self.app.hostCount += 1
else:
self.app.endView.createListWidgetItem('Round {}: Player Client Win'.format(self.app.round))
self.app.clientCount += 1
else:
if self.app.board.findOponent(self.app.board.player[self.app.board.turn]) == 'O':
self.app.endView.createListWidgetItem('Round {}: Player Host Win'.format(self.app.round))
self.app.hostCount += 1
else:
self.app.endView.createListWidgetItem('Round {}: Player Client Win'.format(self.app.round))
self.app.clientCount += 1
self.app.endView.countHost.setText('{} Win'.format(self.app.hostCount))
self.app.endView.countClient.setText('{} Win'.format(self.app.clientCount))
self.app.round += 1
return True
elif winner == 2:
self.app.showDialog('Game Over, It\'s a Tie!', 'Game Over', self.gameOver)
self.app.endView.createListWidgetItem('Round {} : Tie'.format(self.app.round))
self.app.round += 1
return True
return False
def checkGameRules(self):
"""Function to check winner rules condition, 0: win, 1: no winner, 2: tie"""
state = self.app.board.gameState
condition = 1
#! Why >= 5 ? Not possible have winner if 5 move have not reached
if self.app.board.move >= 5:
if state['00'] == state['01'] == state['02'] != None:
condition = 0
elif state['10'] == state['11'] == state['12'] != None:
condition = 0
elif state['20'] == state['21'] == state['22'] != None:
condition = 0
elif state['00'] == state['10'] == state['20'] != None:
condition = 0
elif state['01'] == state['11'] == state['21'] != None:
condition = 0
elif state['02'] == state['12'] == state['22'] != None:
condition = 0
elif state['00'] == state['11'] == state['22'] != None:
condition = 0
elif state['02'] == state['11'] == state['20'] != None:
condition = 0
#! Why 9 ? 9 move means all boxes has already filled with symbols but no one wins
if self.app.board.move == 9 and condition == 1:
condition = 2
return condition
def toggleButtons(self, enable):
"""Function to handle when received update from opponent"""
#! Set enable or disable to all button with given settings
for xPos in range(0, self.app.board.size):
for yPos in range(0, self.app.board.size):
location = '{}{}'.format(xPos, yPos)
self.app.gameView.buttons[location].setEnabled(enable)
def gameOver(self):
"""Function to handle when the game is over"""
#! Open End View
self.app.changeWindow('end') | en | 0.866777 | #! Import required python built-in modules #! Import required self-made modules This class is used as StartView Controller Function to initiate game window settings Function to give the button ability Button Ability when clicked Function to handle when received update from opponent Function to disable all button if current turn is no apropiate #! Set enable or disable to all button with given settings #! Disable button when current is not my turn #! Enable only empty button when current is my turn, disable the other Function to check if current state has a winner Function to check winner rules condition, 0: win, 1: no winner, 2: tie #! Why >= 5 ? Not possible have winner if 5 move have not reached #! Why 9 ? 9 move means all boxes has already filled with symbols but no one wins Function to handle when received update from opponent #! Set enable or disable to all button with given settings Function to handle when the game is over #! Open End View | 2.968238 | 3 |
dataframe_generator/data_type.py | szvasas/dataframe-generator | 2 | 6630241 | import re
import string
from datetime import date, timedelta, datetime
from math import copysign
from random import randint, choice
class DataType:
@staticmethod
def create_from_string(data_type, raw_string: str):
if re.match(data_type.type_descriptor, raw_string):
return data_type()
else:
return None
def next_value(self):
pass
def parse_value(self, raw_string: str):
pass
class ByteType(DataType):
type_descriptor = r'ByteType\(\)'
def next_value(self) -> int:
return randint(-128, 127)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class ShortType(DataType):
type_descriptor = r'ShortType\(\)'
def next_value(self) -> int:
return randint(-32768, 32767)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class IntegerType(DataType):
type_descriptor = r'IntegerType\(\)'
def next_value(self) -> int:
return randint(-2147483648, 2147483647)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class LongType(DataType):
type_descriptor = r'LongType\(\)'
def next_value(self) -> int:
return randint(-9223372036854775808, 9223372036854775807)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class DecimalType(DataType):
type_descriptor = r'DecimalType\(\s*\d+\s*,\s*\d+\s*\)'
type_descriptor_grouped = r'DecimalType\(\s*(\d+)\s*,\s*(\d+)\s*\)'
@staticmethod
def create_from_string(data_type, raw_string: str):
match_result = re.match(DecimalType.type_descriptor_grouped, raw_string)
if match_result is None:
return None
else:
scale = int(match_result.group(1).strip())
precision = int(match_result.group(2).strip())
return DecimalType(scale, precision)
def __init__(self, scale: int, precision: int):
self.scale = scale
self.precision = precision
def next_value(self) -> int:
range_max = pow(10, self.scale) - 1
unscaled = randint(0, range_max)
signum = randint(-1, 0)
return copysign(unscaled, signum) / pow(10, self.precision)
def parse_value(self, raw_string: str) -> float:
return float(raw_string)
class StringType(DataType):
type_descriptor = r'StringType\(\)'
def next_value(self, length=10) -> str:
letters = string.ascii_lowercase
return ''.join((choice(letters) for _ in range(length)))
def parse_value(self, raw_string: str) -> str:
return raw_string
class DateType(DataType):
type_descriptor = r'DateType\(\)'
def next_value(self) -> date:
end = date.today()
random_days = randint(0, 1000)
return end - timedelta(days=random_days)
def parse_value(self, raw_string: str) -> date:
return datetime.strptime(raw_string, '%Y-%m-%d').date()
class TimestampType(DataType):
type_descriptor = r'TimestampType\(\)'
def next_value(self) -> datetime:
end = datetime.now()
random_seconds = randint(0, 100000000)
return end - timedelta(seconds=random_seconds)
def parse_value(self, raw_string: str) -> datetime:
return datetime.strptime(raw_string, '%Y-%m-%d %H:%M:%S')
supported_types = [ByteType,
ShortType,
IntegerType,
LongType,
DecimalType,
StringType,
DateType,
TimestampType
]
| import re
import string
from datetime import date, timedelta, datetime
from math import copysign
from random import randint, choice
class DataType:
@staticmethod
def create_from_string(data_type, raw_string: str):
if re.match(data_type.type_descriptor, raw_string):
return data_type()
else:
return None
def next_value(self):
pass
def parse_value(self, raw_string: str):
pass
class ByteType(DataType):
type_descriptor = r'ByteType\(\)'
def next_value(self) -> int:
return randint(-128, 127)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class ShortType(DataType):
type_descriptor = r'ShortType\(\)'
def next_value(self) -> int:
return randint(-32768, 32767)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class IntegerType(DataType):
type_descriptor = r'IntegerType\(\)'
def next_value(self) -> int:
return randint(-2147483648, 2147483647)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class LongType(DataType):
type_descriptor = r'LongType\(\)'
def next_value(self) -> int:
return randint(-9223372036854775808, 9223372036854775807)
def parse_value(self, raw_string: str) -> int:
return int(raw_string)
class DecimalType(DataType):
type_descriptor = r'DecimalType\(\s*\d+\s*,\s*\d+\s*\)'
type_descriptor_grouped = r'DecimalType\(\s*(\d+)\s*,\s*(\d+)\s*\)'
@staticmethod
def create_from_string(data_type, raw_string: str):
match_result = re.match(DecimalType.type_descriptor_grouped, raw_string)
if match_result is None:
return None
else:
scale = int(match_result.group(1).strip())
precision = int(match_result.group(2).strip())
return DecimalType(scale, precision)
def __init__(self, scale: int, precision: int):
self.scale = scale
self.precision = precision
def next_value(self) -> int:
range_max = pow(10, self.scale) - 1
unscaled = randint(0, range_max)
signum = randint(-1, 0)
return copysign(unscaled, signum) / pow(10, self.precision)
def parse_value(self, raw_string: str) -> float:
return float(raw_string)
class StringType(DataType):
type_descriptor = r'StringType\(\)'
def next_value(self, length=10) -> str:
letters = string.ascii_lowercase
return ''.join((choice(letters) for _ in range(length)))
def parse_value(self, raw_string: str) -> str:
return raw_string
class DateType(DataType):
type_descriptor = r'DateType\(\)'
def next_value(self) -> date:
end = date.today()
random_days = randint(0, 1000)
return end - timedelta(days=random_days)
def parse_value(self, raw_string: str) -> date:
return datetime.strptime(raw_string, '%Y-%m-%d').date()
class TimestampType(DataType):
type_descriptor = r'TimestampType\(\)'
def next_value(self) -> datetime:
end = datetime.now()
random_seconds = randint(0, 100000000)
return end - timedelta(seconds=random_seconds)
def parse_value(self, raw_string: str) -> datetime:
return datetime.strptime(raw_string, '%Y-%m-%d %H:%M:%S')
supported_types = [ByteType,
ShortType,
IntegerType,
LongType,
DecimalType,
StringType,
DateType,
TimestampType
]
| none | 1 | 2.993459 | 3 |
|
Resources/DesktopBasic/ComponentsExerciseScript.py | FMEEvangelist/FMEData | 0 | 6630242 | import shutil
import os
if not os.path.exists('C:/FundraisingWalk'):
os.makedirs('C:/FundraisingWalk')
shutil.copy2('c:/FMEData2017/Output/Training/FundraisingWalk.kml', 'C:/FundraisingWalk/FundraisingWalk.kml') | import shutil
import os
if not os.path.exists('C:/FundraisingWalk'):
os.makedirs('C:/FundraisingWalk')
shutil.copy2('c:/FMEData2017/Output/Training/FundraisingWalk.kml', 'C:/FundraisingWalk/FundraisingWalk.kml') | none | 1 | 2.149858 | 2 |
|
fx_findings/base/plotting.py | maxoja/betting-simulator | 0 | 6630243 | import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import numpy as np
from ..base.enums import Direction, Clr
def plot_boxes(samples, labels, block=False):
plt.figure()
plt.boxplot(samples, labels=labels)
plt.show(block=block)
def plot_lines(lines, title="", block=False):
plt.figure()
plt.title(title)
for line in lines:
plt.plot(line)
plt.show(block=block)
def plot_histogram(sample_data, title="", color=None, block=False):
plt.figure()
plt.title(title)
plt.hist(sample_data, color=color, bins=128)
plt.show(block=block)
def plot_scatter(x, y, clr='blue', title="", block=False):
plt.figure()
plt.title(title)
if not type(y) is tuple:
y = [y]
if not type(clr) is tuple:
clr = [clr]
for y_set, c in zip(y, clr):
plt.scatter(x, y_set, c=c, s=2, alpha=0.5)
plt.show(block=block)
def plot_dict_as_bars(d, title="", block=False):
plt.figure()
plt.title(title)
plt.grid(axis='y')
plt.bar(d.keys(), d.values())
plt.xticks(rotation = 90)
plt.tight_layout()
plt.show(block=block)
def plot_outward_cumulative_hist(sample_data, center_val=0, title="", block=False):
if not sample_data:
plt.figure(title)
plt.plot([])
plt.show(block=block)
return
sample_data = sorted(sample_data)
center_index = min(range(len(sample_data)), key=lambda i: abs(center_val-sample_data[i]))
count_y = [abs(center_index-i)/len(sample_data)*100 for i,val in enumerate(sample_data)]
count_x = sample_data[::]
plt.figure(title)
plt.plot(count_x, count_y)
weights = np.ones_like(sample_data)/float(len(sample_data))*50
plt.hist(sample_data, weights=weights, bins=128)
plt.show(block=block)
# use case 1: You know the RSI value at each trade and also the trade results.
# The trades are categorised into 2 groups, gain and loss groups.
# This plot visualises and help choosing which RSI thresholdis best
# to exclude as many loss trades and remain as many gain trades.
def plot_threshold_cross_cumulation(prefer_group, unprefer_group, acc_dir:Direction=None, background='white', normalise=False, title="", block=False):
if len(prefer_group) + len(unprefer_group) == 0:
plt.figure()
plt.title(title)
plt.plot([])
plt.show(block=block)
return
old_background = plt.rcParams['figure.facecolor']
plt.rcParams['figure.facecolor'] = background
if acc_dir == None:
_, axs = plt.subplots(2,1, sharex=True)
ax1 = axs[0]
ax2 = axs[1]
else:
if acc_dir == Direction.LEFT:
_, ax1 = plt.subplots(1,1)
if acc_dir == Direction.RIGHT:
_, ax2 = plt.subplots(1,1)
len_prefer = len(prefer_group)
len_unprefer = len(unprefer_group)
prefer_group = sorted(prefer_group)
unprefer_group = sorted(unprefer_group)
x_a = prefer_group[::]
x_b = unprefer_group[::]
if normalise:
y_a = list(np.arange(len_prefer)/len_prefer*100)
y_b = list(np.arange(len_unprefer)/len_unprefer*100)
else:
y_a = list(range(len_prefer))
y_b = list(range(len_unprefer))
y_a_l = y_a[::-1]
y_b_l = y_b[::-1]
y_a_r = y_a[::]
y_b_r = y_b[::]
if acc_dir in [None, Direction.LEFT]:
ax1.plot(x_a, y_a_l, color=Clr.DEFAULT_BLUE)
ax1.plot(x_b, y_b_l, color=Clr.RED)
if acc_dir in [None, Direction.RIGHT]:
ax2.plot(x_a, y_a_r, color=Clr.DEFAULT_BLUE)
ax2.plot(x_b, y_b_r, color=Clr.RED)
min_x = int(min(x_a + x_b))
max_x = int(max(x_a + x_b))
range_x = range(min_x-1, max_x+1)
y_l = []
y_r = []
net_l = 0
net_r = 0
last_a_l = 0
last_a_r = 0
last_b_l = 0
last_b_r = 0
for x in range_x:
while x_a and x_a[0] <= x:
last_a_r = y_a_r[0]
last_a_l = y_a_l[0]
y_a_r.pop(0)
y_a_l.pop(0)
x_a.pop(0)
while x_b and x_b[0] <= x:
last_b_r = y_b_r[0]
last_b_l = y_b_l[0]
y_b_r.pop(0)
y_b_l.pop(0)
x_b.pop(0)
no_value = last_a_l == 0 or last_b_l == 0
no_value_r = last_a_r == 0 or last_b_r == 0
net_l = np.NaN if no_value else last_a_l - last_b_l
net_r = np.NaN if no_value_r else last_a_r - last_b_r
y_l.append(net_l)
y_r.append(net_r)
best_x_l = range_x[np.nanargmax(y_l)]
best_y_l = np.nanmax(y_l)
best_x_r = range_x[np.nanargmax(y_r)]
best_y_r = np.nanmax(y_r)
plt.suptitle(title)
if acc_dir in [None, Direction.LEFT]:
ax1.set_title(f'\n\nWHEN X > {best_x_l}, DELTA = {best_y_l}')
ax1.hlines(0, min(range_x), max(range_x))
ax1.plot(range_x, y_l, color=Clr.ROSE)
ax1.vlines(best_x_l, -10, 10)
if acc_dir in [None, Direction.RIGHT]:
ax2.set_title(f'\n\nWHEN X < {best_x_r}, DELTA = {best_y_r}')
ax2.hlines(0, min(range_x), max(range_x))
ax2.plot(range_x, y_r, color=Clr.LAVENDER)
ax2.vlines(best_x_r, -10, 10)
plt.rcParams['figure.facecolor'] = old_background
plt.tight_layout()
plt.show(block=block)
return best_y_l
def plot_for_stoploss(sample_data, profits, center_val=0, title="", block=False):
if not sample_data:
plt.figure()
plt.title(title)
plt.plot([])
plt.show(block=block)
return
_, axs = plt.subplots(3,1, sharex=True)
ax1 = axs[0]
ax2 = axs[1]
ax3 = axs[2]
profits = [x for _,x in sorted(zip(sample_data,profits))]
sample_data = sorted(sample_data)
center_index = min(range(len(sample_data))[::-1], key=lambda i: abs(center_val-sample_data[i]))
count_y = [abs(center_index-i)/len(sample_data)*100 for i,val in enumerate(sample_data)]
count_x = sample_data[::]
acc_dd_y = np.cumsum(sample_data)
norm_acc_dd_y = acc_dd_y/abs(sum(sample_data))*-100
acc_pf_y = np.cumsum(profits[::-1])[::-1]
norm_acc_pf_y = acc_pf_y / max(max(acc_pf_y), abs(min(acc_pf_y))) * 100
# print(acc_pf_y)
ax1.set_title(title)
BLUE = 'C0'
ORANGE = 'C1'
RED = 'C3'
GREEN = 'C2'
BLACK = "#000000"
# [OK]
ax1.plot(count_x, norm_acc_pf_y, GREEN, label="cumu prof at normal exit without SL (<<)") # cumulative profit if trades from the right exit at the end of holding period
# []
ax1.plot(count_x, norm_acc_dd_y, RED, label='cumu loss at DD (>>)') # cumulative loss if trades from the left exit at dd
# []
# cumulative loss if trades from the left exit at stoploss of X value
acc_qq_y = np.array([(i+1)*-sample_data[i] for i,acc_dd in enumerate(norm_acc_dd_y)])
norm_acc_qq_y = acc_qq_y / max(max(acc_qq_y), abs(min(acc_qq_y)))*100
ax1.plot(count_x, norm_acc_qq_y, ORANGE, label='cumu loss at SL set at DD (>>)')
# [OK]
ax1.plot(count_x, count_y, BLUE, label='cumulative trade dist by DD (<<)') # cumulative distribution of trades over worst dd
# [OK]
weights = np.ones_like(sample_data)/float(len(sample_data))*50
ax1.hist(sample_data, color=BLUE, weights=weights, bins=128, label='trade dist by DD') # trades distributed by worst dd
ax1.legend(fontsize="x-small")
ax2.plot(count_x, acc_pf_y, GREEN, label="cumu prof at normal exit without SL (<<)")
ax2.plot(count_x, acc_qq_y, ORANGE, label='cumu loss at SL set at DD (>>)')
net = acc_pf_y - acc_qq_y
ax2.plot(count_x, net, BLACK, linewidth=1.5, label='net')
# plt.plot(count_x, net/acc_qq_y/100, GREY, linewidth=1.5, label='recovery factor')
# [OK]
weights = np.ones_like(sample_data)/float(len(sample_data))*0.01
ax2.hist(sample_data, color=BLUE, weights=weights, bins=128, label='trade dist by DD') # trades distributed by worst dd
ax2.vlines(-0.0015, 0,max(acc_pf_y))
ax2.legend(fontsize='x-small')
recovery = net/acc_qq_y
recovery = np.clip(recovery, -5, 5)
ax3.plot(count_x, recovery)
ax3.vlines(-0.00001*200, 0, max(recovery), color=BLACK)
ax3.yaxis.set_major_locator(MultipleLocator(1))
ax3.grid(True)
plt.show(block=block)
def block():
plt.show() | import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import numpy as np
from ..base.enums import Direction, Clr
def plot_boxes(samples, labels, block=False):
plt.figure()
plt.boxplot(samples, labels=labels)
plt.show(block=block)
def plot_lines(lines, title="", block=False):
plt.figure()
plt.title(title)
for line in lines:
plt.plot(line)
plt.show(block=block)
def plot_histogram(sample_data, title="", color=None, block=False):
plt.figure()
plt.title(title)
plt.hist(sample_data, color=color, bins=128)
plt.show(block=block)
def plot_scatter(x, y, clr='blue', title="", block=False):
plt.figure()
plt.title(title)
if not type(y) is tuple:
y = [y]
if not type(clr) is tuple:
clr = [clr]
for y_set, c in zip(y, clr):
plt.scatter(x, y_set, c=c, s=2, alpha=0.5)
plt.show(block=block)
def plot_dict_as_bars(d, title="", block=False):
plt.figure()
plt.title(title)
plt.grid(axis='y')
plt.bar(d.keys(), d.values())
plt.xticks(rotation = 90)
plt.tight_layout()
plt.show(block=block)
def plot_outward_cumulative_hist(sample_data, center_val=0, title="", block=False):
if not sample_data:
plt.figure(title)
plt.plot([])
plt.show(block=block)
return
sample_data = sorted(sample_data)
center_index = min(range(len(sample_data)), key=lambda i: abs(center_val-sample_data[i]))
count_y = [abs(center_index-i)/len(sample_data)*100 for i,val in enumerate(sample_data)]
count_x = sample_data[::]
plt.figure(title)
plt.plot(count_x, count_y)
weights = np.ones_like(sample_data)/float(len(sample_data))*50
plt.hist(sample_data, weights=weights, bins=128)
plt.show(block=block)
# use case 1: You know the RSI value at each trade and also the trade results.
# The trades are categorised into 2 groups, gain and loss groups.
# This plot visualises and help choosing which RSI thresholdis best
# to exclude as many loss trades and remain as many gain trades.
def plot_threshold_cross_cumulation(prefer_group, unprefer_group, acc_dir:Direction=None, background='white', normalise=False, title="", block=False):
if len(prefer_group) + len(unprefer_group) == 0:
plt.figure()
plt.title(title)
plt.plot([])
plt.show(block=block)
return
old_background = plt.rcParams['figure.facecolor']
plt.rcParams['figure.facecolor'] = background
if acc_dir == None:
_, axs = plt.subplots(2,1, sharex=True)
ax1 = axs[0]
ax2 = axs[1]
else:
if acc_dir == Direction.LEFT:
_, ax1 = plt.subplots(1,1)
if acc_dir == Direction.RIGHT:
_, ax2 = plt.subplots(1,1)
len_prefer = len(prefer_group)
len_unprefer = len(unprefer_group)
prefer_group = sorted(prefer_group)
unprefer_group = sorted(unprefer_group)
x_a = prefer_group[::]
x_b = unprefer_group[::]
if normalise:
y_a = list(np.arange(len_prefer)/len_prefer*100)
y_b = list(np.arange(len_unprefer)/len_unprefer*100)
else:
y_a = list(range(len_prefer))
y_b = list(range(len_unprefer))
y_a_l = y_a[::-1]
y_b_l = y_b[::-1]
y_a_r = y_a[::]
y_b_r = y_b[::]
if acc_dir in [None, Direction.LEFT]:
ax1.plot(x_a, y_a_l, color=Clr.DEFAULT_BLUE)
ax1.plot(x_b, y_b_l, color=Clr.RED)
if acc_dir in [None, Direction.RIGHT]:
ax2.plot(x_a, y_a_r, color=Clr.DEFAULT_BLUE)
ax2.plot(x_b, y_b_r, color=Clr.RED)
min_x = int(min(x_a + x_b))
max_x = int(max(x_a + x_b))
range_x = range(min_x-1, max_x+1)
y_l = []
y_r = []
net_l = 0
net_r = 0
last_a_l = 0
last_a_r = 0
last_b_l = 0
last_b_r = 0
for x in range_x:
while x_a and x_a[0] <= x:
last_a_r = y_a_r[0]
last_a_l = y_a_l[0]
y_a_r.pop(0)
y_a_l.pop(0)
x_a.pop(0)
while x_b and x_b[0] <= x:
last_b_r = y_b_r[0]
last_b_l = y_b_l[0]
y_b_r.pop(0)
y_b_l.pop(0)
x_b.pop(0)
no_value = last_a_l == 0 or last_b_l == 0
no_value_r = last_a_r == 0 or last_b_r == 0
net_l = np.NaN if no_value else last_a_l - last_b_l
net_r = np.NaN if no_value_r else last_a_r - last_b_r
y_l.append(net_l)
y_r.append(net_r)
best_x_l = range_x[np.nanargmax(y_l)]
best_y_l = np.nanmax(y_l)
best_x_r = range_x[np.nanargmax(y_r)]
best_y_r = np.nanmax(y_r)
plt.suptitle(title)
if acc_dir in [None, Direction.LEFT]:
ax1.set_title(f'\n\nWHEN X > {best_x_l}, DELTA = {best_y_l}')
ax1.hlines(0, min(range_x), max(range_x))
ax1.plot(range_x, y_l, color=Clr.ROSE)
ax1.vlines(best_x_l, -10, 10)
if acc_dir in [None, Direction.RIGHT]:
ax2.set_title(f'\n\nWHEN X < {best_x_r}, DELTA = {best_y_r}')
ax2.hlines(0, min(range_x), max(range_x))
ax2.plot(range_x, y_r, color=Clr.LAVENDER)
ax2.vlines(best_x_r, -10, 10)
plt.rcParams['figure.facecolor'] = old_background
plt.tight_layout()
plt.show(block=block)
return best_y_l
def plot_for_stoploss(sample_data, profits, center_val=0, title="", block=False):
if not sample_data:
plt.figure()
plt.title(title)
plt.plot([])
plt.show(block=block)
return
_, axs = plt.subplots(3,1, sharex=True)
ax1 = axs[0]
ax2 = axs[1]
ax3 = axs[2]
profits = [x for _,x in sorted(zip(sample_data,profits))]
sample_data = sorted(sample_data)
center_index = min(range(len(sample_data))[::-1], key=lambda i: abs(center_val-sample_data[i]))
count_y = [abs(center_index-i)/len(sample_data)*100 for i,val in enumerate(sample_data)]
count_x = sample_data[::]
acc_dd_y = np.cumsum(sample_data)
norm_acc_dd_y = acc_dd_y/abs(sum(sample_data))*-100
acc_pf_y = np.cumsum(profits[::-1])[::-1]
norm_acc_pf_y = acc_pf_y / max(max(acc_pf_y), abs(min(acc_pf_y))) * 100
# print(acc_pf_y)
ax1.set_title(title)
BLUE = 'C0'
ORANGE = 'C1'
RED = 'C3'
GREEN = 'C2'
BLACK = "#000000"
# [OK]
ax1.plot(count_x, norm_acc_pf_y, GREEN, label="cumu prof at normal exit without SL (<<)") # cumulative profit if trades from the right exit at the end of holding period
# []
ax1.plot(count_x, norm_acc_dd_y, RED, label='cumu loss at DD (>>)') # cumulative loss if trades from the left exit at dd
# []
# cumulative loss if trades from the left exit at stoploss of X value
acc_qq_y = np.array([(i+1)*-sample_data[i] for i,acc_dd in enumerate(norm_acc_dd_y)])
norm_acc_qq_y = acc_qq_y / max(max(acc_qq_y), abs(min(acc_qq_y)))*100
ax1.plot(count_x, norm_acc_qq_y, ORANGE, label='cumu loss at SL set at DD (>>)')
# [OK]
ax1.plot(count_x, count_y, BLUE, label='cumulative trade dist by DD (<<)') # cumulative distribution of trades over worst dd
# [OK]
weights = np.ones_like(sample_data)/float(len(sample_data))*50
ax1.hist(sample_data, color=BLUE, weights=weights, bins=128, label='trade dist by DD') # trades distributed by worst dd
ax1.legend(fontsize="x-small")
ax2.plot(count_x, acc_pf_y, GREEN, label="cumu prof at normal exit without SL (<<)")
ax2.plot(count_x, acc_qq_y, ORANGE, label='cumu loss at SL set at DD (>>)')
net = acc_pf_y - acc_qq_y
ax2.plot(count_x, net, BLACK, linewidth=1.5, label='net')
# plt.plot(count_x, net/acc_qq_y/100, GREY, linewidth=1.5, label='recovery factor')
# [OK]
weights = np.ones_like(sample_data)/float(len(sample_data))*0.01
ax2.hist(sample_data, color=BLUE, weights=weights, bins=128, label='trade dist by DD') # trades distributed by worst dd
ax2.vlines(-0.0015, 0,max(acc_pf_y))
ax2.legend(fontsize='x-small')
recovery = net/acc_qq_y
recovery = np.clip(recovery, -5, 5)
ax3.plot(count_x, recovery)
ax3.vlines(-0.00001*200, 0, max(recovery), color=BLACK)
ax3.yaxis.set_major_locator(MultipleLocator(1))
ax3.grid(True)
plt.show(block=block)
def block():
plt.show() | en | 0.880389 | # use case 1: You know the RSI value at each trade and also the trade results. # The trades are categorised into 2 groups, gain and loss groups. # This plot visualises and help choosing which RSI thresholdis best # to exclude as many loss trades and remain as many gain trades. # print(acc_pf_y) # [OK] # cumulative profit if trades from the right exit at the end of holding period # [] # cumulative loss if trades from the left exit at dd # [] # cumulative loss if trades from the left exit at stoploss of X value # [OK] # cumulative distribution of trades over worst dd # [OK] # trades distributed by worst dd # plt.plot(count_x, net/acc_qq_y/100, GREY, linewidth=1.5, label='recovery factor') # [OK] # trades distributed by worst dd | 2.841706 | 3 |
commons/fl_init.py | gumazon/gumazonopensource | 0 | 6630244 | __version__ = '0.1.0'
# general:
__title__ = __file__
__author__ = 'Gumshoe Media Inc.'
__email__ = '<EMAIL>'
# license:
__license_setup_title__ = 'GNU General Public License v3'
__license_setup_classifier__ = 'GNU General Public License v3 (GPLv3)'
| __version__ = '0.1.0'
# general:
__title__ = __file__
__author__ = 'Gumshoe Media Inc.'
__email__ = '<EMAIL>'
# license:
__license_setup_title__ = 'GNU General Public License v3'
__license_setup_classifier__ = 'GNU General Public License v3 (GPLv3)'
| en | 0.267519 | # general: # license: | 0.941361 | 1 |
examples/chemu-gui/emulator.py | brenstar/chemu | 0 | 6630245 | from PyQt4 import QtCore, QtGui
import PyChemu
from display import ChipDisplay, ChipDisplayWidget
from worker import EmulatorWorker
from ctypes import pointer, c_char_p
from enum import Enum
# displays the state of the chip keyboard
class ChipInputWidget(QtGui.QWidget):
class InputButton(QtGui.QFrame):
def __init__(self, key, inputWidget):
QtGui.QFrame.__init__(self, inputWidget)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.__showContextMenu)
self.setAutoFillBackground(True)
self.setFrameStyle(QtGui.QFrame.Plain | QtGui.QFrame.StyledPanel)
self.__pals = tuple((QtGui.QPalette() for i in range(2)))
self.__pals[0].setColor(QtGui.QPalette.Active, QtGui.QPalette.Window, QtCore.Qt.white)
self.setPalette(self.__pals[0])
self.__pals[1].setColor(QtGui.QPalette.Active, QtGui.QPalette.Window, QtCore.Qt.black)
self.__pals[1].setColor(QtGui.QPalette.Active, QtGui.QPalette.WindowText, QtCore.Qt.white)
self.__inputWidget = inputWidget
self.__key = key
self.__keyString = ("%x" % key).upper()
self.__state = PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED
self.__cmenu = QtGui.QMenu()
stickAction = self.__cmenu.addAction("Stick")
stickAction.setCheckable(True)
stickAction.triggered.connect(self.__stickAction)
def __showContextMenu(self, point):
self.__cmenu.exec_(self.mapToGlobal(point))
def __stickAction(self, checked):
if checked:
self.__inputWidget.stick(self.__key)
else:
self.__inputWidget.unstick(self.__key)
@property
def state(self):
return self.__state
@state.setter
def state(self, value):
if value != self.__state:
self.__state = value
self.setPalette(self.__pals[int(value)])
self.repaint()
def mousePressEvent(self, e):
if e.button() == QtCore.Qt.LeftButton:
self.__inputWidget.setKeyState(self.__key, PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
def mouseReleaseEvent(self, e):
if e.button() == QtCore.Qt.LeftButton:
self.__inputWidget.setKeyState(self.__key, PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
def paintEvent(self, e):
QtGui.QFrame.paintEvent(self, e)
painter = QtGui.QPainter(self)
painter.drawText(self.contentsRect(), QtCore.Qt.AlignCenter,
self.__keyString)
KEYBOARD_LAYOUT = (
PyChemu.ChipKey.CHIP_KEY_1,
PyChemu.ChipKey.CHIP_KEY_2,
PyChemu.ChipKey.CHIP_KEY_3,
PyChemu.ChipKey.CHIP_KEY_C,
PyChemu.ChipKey.CHIP_KEY_4,
PyChemu.ChipKey.CHIP_KEY_5,
PyChemu.ChipKey.CHIP_KEY_6,
PyChemu.ChipKey.CHIP_KEY_D,
PyChemu.ChipKey.CHIP_KEY_7,
PyChemu.ChipKey.CHIP_KEY_8,
PyChemu.ChipKey.CHIP_KEY_9,
PyChemu.ChipKey.CHIP_KEY_E,
PyChemu.ChipKey.CHIP_KEY_A,
PyChemu.ChipKey.CHIP_KEY_0,
PyChemu.ChipKey.CHIP_KEY_B,
PyChemu.ChipKey.CHIP_KEY_F
)
inputChanged = QtCore.pyqtSignal(name="inputChanged")
def __init__(self, emuWin):
QtGui.QWidget.__init__(self, emuWin)
emuWin.installEventFilter(self)
self.setMinimumSize(100, 100)
self.__input = PyChemu.ChipInput()
self.__stuckKeys = set()
self.__emuWin = emuWin
# map of Qt keycodes to chip keycodes
self.__keymap = {
QtCore.Qt.Key_1 : PyChemu.ChipKey.CHIP_KEY_1,
QtCore.Qt.Key_2 : PyChemu.ChipKey.CHIP_KEY_2,
QtCore.Qt.Key_3 : PyChemu.ChipKey.CHIP_KEY_3,
QtCore.Qt.Key_4 : PyChemu.ChipKey.CHIP_KEY_C,
QtCore.Qt.Key_Q : PyChemu.ChipKey.CHIP_KEY_4,
QtCore.Qt.Key_W : PyChemu.ChipKey.CHIP_KEY_5,
QtCore.Qt.Key_E : PyChemu.ChipKey.CHIP_KEY_6,
QtCore.Qt.Key_R : PyChemu.ChipKey.CHIP_KEY_D,
QtCore.Qt.Key_A : PyChemu.ChipKey.CHIP_KEY_7,
QtCore.Qt.Key_S : PyChemu.ChipKey.CHIP_KEY_8,
QtCore.Qt.Key_D : PyChemu.ChipKey.CHIP_KEY_9,
QtCore.Qt.Key_F : PyChemu.ChipKey.CHIP_KEY_E,
QtCore.Qt.Key_Z : PyChemu.ChipKey.CHIP_KEY_A,
QtCore.Qt.Key_X : PyChemu.ChipKey.CHIP_KEY_0,
QtCore.Qt.Key_C : PyChemu.ChipKey.CHIP_KEY_B,
QtCore.Qt.Key_V : PyChemu.ChipKey.CHIP_KEY_F
}
self.__layout = QtGui.QGridLayout()
self.__keyWidgets = [None for i in range(16)]
for i, key in enumerate(self.KEYBOARD_LAYOUT):
widget = ChipInputWidget.InputButton(key, self)
self.__layout.addWidget(widget, i // 4, i % 4)
self.__keyWidgets[key] = widget
self.setLayout(self.__layout)
@property
def input(self):
return self.__input
# sets the given key to be stuck, stuck keys will always have a keystate
# of pressed
def stick(self, key):
if key not in self.__stuckKeys:
self.__stuckKeys.add(key)
self.setKeyState(key, PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
def unstick(self, key):
if key in self.__stuckKeys:
self.__stuckKeys.remove(key)
self.setKeyState(key, PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
def eventFilter(self, obj, event):
# pass keyboard events to this widget's handlers
if obj is self.__emuWin:
if event.type() == QtCore.QEvent.KeyPress:
print("press")
self.keyPressEvent(event)
#return True
elif event.type() == QtCore.QEvent.KeyRelease:
self.keyReleaseEvent(event)
#return True
return False
def __updateInput(self, key, state):
if key in self.__keymap:
chipkey = self.__keymap[key]
if chipkey not in self.__stuckKeys:
self.setKeyState(chipkey, state)
self.inputChanged.emit()
self.repaint()
def setKeyState(self, key, state):
PyChemu.chipin_set(pointer(self.__input), key, state)
self.__keyWidgets[key].state = state
def keyPressEvent(self, e):
if e.isAutoRepeat():
return
self.__updateInput(e.key(), PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
def keyReleaseEvent(self, e):
if e.isAutoRepeat():
return
self.__updateInput(e.key(), PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
def paintEvent(self, e):
QtGui.QWidget.paintEvent(self, e)
return
painter = QtGui.QPainter(self)
boxSize = 20
for i, key in enumerate(self.__layout):
x = (i % 4) * boxSize
y = (i // 4) * boxSize
if PyChemu.chipin_keystate(pointer(self.__input), key) == PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED:
painter.fillRect(x, y, boxSize, boxSize, QtCore.Qt.SolidPattern)
else:
painter.drawRect(x, y, boxSize, boxSize)
class EmulatorWindow(QtGui.QMainWindow):
def __init__(self, emu, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.__emu = emu
self.__emu.contents.drawHandler = self.getDrawHandlerCallback()
#self.__worker = EmulatorWorker(self)
#self.__worker.updateDisplay.connect(self.__updateDisplay)
self.setWindowTitle("CHIP-8 Emulator")
self.resize(640, 320)
# central widget
self.__display = ChipDisplay(self)
self.__displayWidget = ChipDisplayWidget(self.__display, parent=self)
self.setCentralWidget(self.__displayWidget)
# execution control dock
# self.__exeCtrlDock = QtGui.QDockWidget("Execution Control", self)
# self.__currentInstLabel = QtGui.QLabel()
# self.__testLabel = QtGui.QLabel()
# exeWidget = QtGui.QWidget(self.__exeCtrlDock)
# layout = QtGui.QGridLayout(exeWidget)
# layout.addWidget(QtGui.QLabel("Next Instruction:"), 0, 0)
# layout.addWidget(self.__currentInstLabel, 0, 1)
# layout.addWidget(self.__testLabel, 1, 0)
# self.__exeCtrlDock.setWidget(exeWidget)
# self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__exeCtrlDock)
#self.__exeCtrlDock.hide()
# emulation state dock
self.__emuStateDock = QtGui.QDockWidget("Emulator State", self)
self.__nextInstLabel = QtGui.QLabel()
self.__pcLabel = QtGui.QLabel()
self.__stLabel = QtGui.QLabel()
self.__dtLabel = QtGui.QLabel()
# self.__regTable = QtGui.QTreeWidget()
# self.__regTable.setHeaderLabels(["Register", "Value"])
# for reg in ["v{0:X}".format(i) for i in range(16)] + ["I"]:
# item = QtGui.QTreeWidgetItem([reg, "0"])
# self.__regTable.addTopLevelItem(item)
stateWidget = QtGui.QWidget(self.__emuStateDock)
layout = QtGui.QGridLayout()
layout.addWidget(QtGui.QLabel("Next Instruction: "), 0, 0)
layout.addWidget(self.__nextInstLabel, 0, 1)
layout.addWidget(QtGui.QLabel("PC:"), 1, 0)
layout.addWidget(self.__pcLabel, 1, 1)
layout.addWidget(QtGui.QLabel("ST:"), 1, 2)
layout.addWidget(self.__stLabel, 1, 3)
layout.addWidget(QtGui.QLabel("DT:"), 1, 4)
layout.addWidget(self.__dtLabel, 1, 5)
# layout.addWidget(QtGui.QLabel("Registers"), 2, 0)
# layout.addWidget(self.__regTable, 3, 0, 1, 6)
stateWidget.setLayout(layout)
self.__emuStateDock.setWidget(stateWidget)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__emuStateDock)
# keyboard dock
self.__keyboardDock = QtGui.QDockWidget("Keyboard", self)
self.__inputWidget = ChipInputWidget(self)
self.__keyboardDock.setWidget(self.__inputWidget)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__keyboardDock)
self.__input = PyChemu.ChipInput()
self.__lastKeyPressed = 0
self.__lastCollision = 0
self.__stepTimer = None
self.__sndDelTimer = None
self.__testSprite = PyChemu.ChipSprite()
self.__testSprite.x = 24
self.__testSprite.y = 12
self.__testSprite.rows = 4
for i in range(4):
self.__testSprite.data[i] = 0b00111100
#self.__display.drawSprite(self.__testSprite)
@property
def input(self):
return self.__inputWidget.input
@property
def emu(self):
return self.__emu
@property
def lastDrawHasCollision(self):
return self.__lastCollision
@property
def lastChipKeyPressed(self):
return self.__lastKeyPressed
def getDrawHandlerCallback(self):
def callback(op, sprite):
coll = 0
if op == PyChemu.ChipDrawOp.CHIP_DRAW_CLEAR.value:
self.__display.clear()
elif op == PyChemu.ChipDrawOp.CHIP_DRAW_SPRITE.value:
if self.__display.drawSprite(sprite.contents):
coll = 1
return coll
return PyChemu.DrawHandler(callback)
def getPollKeyHandlerCallback(self):
def callback():
return 0
return PyChemu.PollKeyHandler(callback)
def __updateTestLabel(self):
self.__testLabel.setText("{0:016b}".format(self.__input.value))
def __updateStateDock(self):
dp = self.__emu.contents.dp
nextinst = (self.__emu.contents.memory.array[dp.pc] << 8) | \
self.__emu.contents.memory.array[dp.pc + 1]
self.__nextInstLabel.setText("0x{0:04X}".format(nextinst))
self.__pcLabel.setText("0x{0:03X}".format(dp.pc))
self.__stLabel.setText(str(dp.sndTimer))
self.__dtLabel.setText(str(dp.delTimer))
def __updateDisplay(self, op, sprite):
self.__lastCollision = 0
if op == PyChemu.ChipDrawOp.CHIP_DRAW_CLEAR:
self.__display.clear()
elif op == PyChemu.ChipDrawOp.CHIP_DRAW_SPRITE:
if self.__display.drawSprite(sprite.contents):
self.__lastCollision = 1
self.__worker.displayWaitCondition.wakeAll()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_P:
self.__stepTimer = self.startTimer(10)
self.__sndDelTimer = self.startTimer(16)
# if e.isAutoRepeat():
# return
#
# key = e.key()
#
# if not self.__worker.isRunning() and e.key() == QtCore.Qt.Key_P:
# print("Starting thread")
# self.__worker.start()
#
# if e.key() == QtCore.Qt.Key_T:
# self.__display.drawSprite(self.__testSprite)
# self.__testSprite.x += 1
# self.__display.drawSprite(self.__testSprite)
#
#
# if key in self.__keymap:
# chipkey = self.__keymap[key]
# if self.__worker.awaitingKeyPress:
# self.__lastKeyPressed = chipkey
# self.__worker.pollKeyWaitCondition.wakeAll()
# PyChemu.chipin_set(pointer(self.__input), chipkey,
# PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
# self.__updateTestLabel()
#print(self.__input)
#print("Key %d is pressed" % chipkey)
def keyReleaseEvent(self, e):
pass
# if e.isAutoRepeat():
# return
# key = e.key()
# if key in self.__keymap:
# chipkey = self.__keymap[key]
# PyChemu.chipin_set(pointer(self.__input), chipkey,
# PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
# self.__updateTestLabel()
#print("Key %d is released" % chipkey)
#print(self.__input)
def timerEvent(self, e):
id = e.timerId()
if id == self.__stepTimer:
self.__emu.contents.input = self.__inputWidget.input
PyChemu.chipemu_step(self.__emu)
self.__updateStateDock()
elif id == self.__sndDelTimer:
if self.__emu.contents.dp.delTimer > 0:
self.__emu.contents.dp.delTimer -= 1
if self.__emu.contents.dp.sndTimer > 0:
self.__emu.contents.dp.sndTimer -= 1
if __name__ == "__main__":
app = QtGui.QApplication([])
app.setStyleSheet("""
#ChipDisplayWidget {
background-color: #404040;
}
""")
emu = PyChemu.chipemu_create()
# for testing purposes
if PyChemu.chipemu_loadROM(emu, c_char_p(b"invaders.c8")) == 1:
print("Failed to load ROM")
# this program will test the font sprites
# loops from 0 to 15 and draws each font sprite
# testProg = [
# 0x60, 0x00, # li v0, 0
# 0x61, 0x0a, # li v1, 10
# 0x62, 0x0a, # li v2, 10
# 0xf0, 0x29, # font v0
# 0xD1, 0x25, # draw v1, v2, 5
# 0x70, 0x01, # addi v0, 1
# 0x40, 0x10, # snei v0, 16
# 0x60, 0x00, # li v0, 0
# 0x00, 0xe0, # cls
# 0x12, 0x06 # j 0x206
# ]
# testProg = [
# 0x6a, 0x00, #li vA, 0
# 0x6b, 0x0a, #li vB, 10
# 0x6c, 0x0a, #li vC, 10
# 0xaf, 0x00, #la 0xf00
# 0xfa, 0x33, #bcd vA
# 0xf2, 0x65, # rest v2
# 0xf0, 0x29, #font v0
# 0xdb, 0xc5, #draw vB, vC, 5
# 0x7b, 0x0a, #addi vB, 1
# 0xf1, 0x29, #font v1
# 0xdb, 0xc5, #draw vB, vC, 5
# 0x7b, 0x0a, #addi vB, 1
# 0xf2, 0x29, #font v2
# 0xdb, 0xc5, #draw vB, vC, 5
# 0x6b, 0x0a, #li vB, 10
# 0x7a, 0x01, #addi vA, 1
# 0x00, 0xe0, # cls
# 0x12, 0x06 # j 0x206
# ]
#
# for i in range(len(testProg)):
# emu.contents.memory.data[i] = testProg[i]
win = EmulatorWindow(emu)
win.show()
#widget = ChipDisplayWidget()
#widget.resize(640, 320)
#widget.show()
app.exec_()
PyChemu.chipemu_destroy(emu)
| from PyQt4 import QtCore, QtGui
import PyChemu
from display import ChipDisplay, ChipDisplayWidget
from worker import EmulatorWorker
from ctypes import pointer, c_char_p
from enum import Enum
# displays the state of the chip keyboard
class ChipInputWidget(QtGui.QWidget):
class InputButton(QtGui.QFrame):
def __init__(self, key, inputWidget):
QtGui.QFrame.__init__(self, inputWidget)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.__showContextMenu)
self.setAutoFillBackground(True)
self.setFrameStyle(QtGui.QFrame.Plain | QtGui.QFrame.StyledPanel)
self.__pals = tuple((QtGui.QPalette() for i in range(2)))
self.__pals[0].setColor(QtGui.QPalette.Active, QtGui.QPalette.Window, QtCore.Qt.white)
self.setPalette(self.__pals[0])
self.__pals[1].setColor(QtGui.QPalette.Active, QtGui.QPalette.Window, QtCore.Qt.black)
self.__pals[1].setColor(QtGui.QPalette.Active, QtGui.QPalette.WindowText, QtCore.Qt.white)
self.__inputWidget = inputWidget
self.__key = key
self.__keyString = ("%x" % key).upper()
self.__state = PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED
self.__cmenu = QtGui.QMenu()
stickAction = self.__cmenu.addAction("Stick")
stickAction.setCheckable(True)
stickAction.triggered.connect(self.__stickAction)
def __showContextMenu(self, point):
self.__cmenu.exec_(self.mapToGlobal(point))
def __stickAction(self, checked):
if checked:
self.__inputWidget.stick(self.__key)
else:
self.__inputWidget.unstick(self.__key)
@property
def state(self):
return self.__state
@state.setter
def state(self, value):
if value != self.__state:
self.__state = value
self.setPalette(self.__pals[int(value)])
self.repaint()
def mousePressEvent(self, e):
if e.button() == QtCore.Qt.LeftButton:
self.__inputWidget.setKeyState(self.__key, PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
def mouseReleaseEvent(self, e):
if e.button() == QtCore.Qt.LeftButton:
self.__inputWidget.setKeyState(self.__key, PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
def paintEvent(self, e):
QtGui.QFrame.paintEvent(self, e)
painter = QtGui.QPainter(self)
painter.drawText(self.contentsRect(), QtCore.Qt.AlignCenter,
self.__keyString)
KEYBOARD_LAYOUT = (
PyChemu.ChipKey.CHIP_KEY_1,
PyChemu.ChipKey.CHIP_KEY_2,
PyChemu.ChipKey.CHIP_KEY_3,
PyChemu.ChipKey.CHIP_KEY_C,
PyChemu.ChipKey.CHIP_KEY_4,
PyChemu.ChipKey.CHIP_KEY_5,
PyChemu.ChipKey.CHIP_KEY_6,
PyChemu.ChipKey.CHIP_KEY_D,
PyChemu.ChipKey.CHIP_KEY_7,
PyChemu.ChipKey.CHIP_KEY_8,
PyChemu.ChipKey.CHIP_KEY_9,
PyChemu.ChipKey.CHIP_KEY_E,
PyChemu.ChipKey.CHIP_KEY_A,
PyChemu.ChipKey.CHIP_KEY_0,
PyChemu.ChipKey.CHIP_KEY_B,
PyChemu.ChipKey.CHIP_KEY_F
)
inputChanged = QtCore.pyqtSignal(name="inputChanged")
def __init__(self, emuWin):
QtGui.QWidget.__init__(self, emuWin)
emuWin.installEventFilter(self)
self.setMinimumSize(100, 100)
self.__input = PyChemu.ChipInput()
self.__stuckKeys = set()
self.__emuWin = emuWin
# map of Qt keycodes to chip keycodes
self.__keymap = {
QtCore.Qt.Key_1 : PyChemu.ChipKey.CHIP_KEY_1,
QtCore.Qt.Key_2 : PyChemu.ChipKey.CHIP_KEY_2,
QtCore.Qt.Key_3 : PyChemu.ChipKey.CHIP_KEY_3,
QtCore.Qt.Key_4 : PyChemu.ChipKey.CHIP_KEY_C,
QtCore.Qt.Key_Q : PyChemu.ChipKey.CHIP_KEY_4,
QtCore.Qt.Key_W : PyChemu.ChipKey.CHIP_KEY_5,
QtCore.Qt.Key_E : PyChemu.ChipKey.CHIP_KEY_6,
QtCore.Qt.Key_R : PyChemu.ChipKey.CHIP_KEY_D,
QtCore.Qt.Key_A : PyChemu.ChipKey.CHIP_KEY_7,
QtCore.Qt.Key_S : PyChemu.ChipKey.CHIP_KEY_8,
QtCore.Qt.Key_D : PyChemu.ChipKey.CHIP_KEY_9,
QtCore.Qt.Key_F : PyChemu.ChipKey.CHIP_KEY_E,
QtCore.Qt.Key_Z : PyChemu.ChipKey.CHIP_KEY_A,
QtCore.Qt.Key_X : PyChemu.ChipKey.CHIP_KEY_0,
QtCore.Qt.Key_C : PyChemu.ChipKey.CHIP_KEY_B,
QtCore.Qt.Key_V : PyChemu.ChipKey.CHIP_KEY_F
}
self.__layout = QtGui.QGridLayout()
self.__keyWidgets = [None for i in range(16)]
for i, key in enumerate(self.KEYBOARD_LAYOUT):
widget = ChipInputWidget.InputButton(key, self)
self.__layout.addWidget(widget, i // 4, i % 4)
self.__keyWidgets[key] = widget
self.setLayout(self.__layout)
@property
def input(self):
return self.__input
# sets the given key to be stuck, stuck keys will always have a keystate
# of pressed
def stick(self, key):
if key not in self.__stuckKeys:
self.__stuckKeys.add(key)
self.setKeyState(key, PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
def unstick(self, key):
if key in self.__stuckKeys:
self.__stuckKeys.remove(key)
self.setKeyState(key, PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
def eventFilter(self, obj, event):
# pass keyboard events to this widget's handlers
if obj is self.__emuWin:
if event.type() == QtCore.QEvent.KeyPress:
print("press")
self.keyPressEvent(event)
#return True
elif event.type() == QtCore.QEvent.KeyRelease:
self.keyReleaseEvent(event)
#return True
return False
def __updateInput(self, key, state):
if key in self.__keymap:
chipkey = self.__keymap[key]
if chipkey not in self.__stuckKeys:
self.setKeyState(chipkey, state)
self.inputChanged.emit()
self.repaint()
def setKeyState(self, key, state):
PyChemu.chipin_set(pointer(self.__input), key, state)
self.__keyWidgets[key].state = state
def keyPressEvent(self, e):
if e.isAutoRepeat():
return
self.__updateInput(e.key(), PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
def keyReleaseEvent(self, e):
if e.isAutoRepeat():
return
self.__updateInput(e.key(), PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
def paintEvent(self, e):
QtGui.QWidget.paintEvent(self, e)
return
painter = QtGui.QPainter(self)
boxSize = 20
for i, key in enumerate(self.__layout):
x = (i % 4) * boxSize
y = (i // 4) * boxSize
if PyChemu.chipin_keystate(pointer(self.__input), key) == PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED:
painter.fillRect(x, y, boxSize, boxSize, QtCore.Qt.SolidPattern)
else:
painter.drawRect(x, y, boxSize, boxSize)
class EmulatorWindow(QtGui.QMainWindow):
def __init__(self, emu, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.__emu = emu
self.__emu.contents.drawHandler = self.getDrawHandlerCallback()
#self.__worker = EmulatorWorker(self)
#self.__worker.updateDisplay.connect(self.__updateDisplay)
self.setWindowTitle("CHIP-8 Emulator")
self.resize(640, 320)
# central widget
self.__display = ChipDisplay(self)
self.__displayWidget = ChipDisplayWidget(self.__display, parent=self)
self.setCentralWidget(self.__displayWidget)
# execution control dock
# self.__exeCtrlDock = QtGui.QDockWidget("Execution Control", self)
# self.__currentInstLabel = QtGui.QLabel()
# self.__testLabel = QtGui.QLabel()
# exeWidget = QtGui.QWidget(self.__exeCtrlDock)
# layout = QtGui.QGridLayout(exeWidget)
# layout.addWidget(QtGui.QLabel("Next Instruction:"), 0, 0)
# layout.addWidget(self.__currentInstLabel, 0, 1)
# layout.addWidget(self.__testLabel, 1, 0)
# self.__exeCtrlDock.setWidget(exeWidget)
# self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__exeCtrlDock)
#self.__exeCtrlDock.hide()
# emulation state dock
self.__emuStateDock = QtGui.QDockWidget("Emulator State", self)
self.__nextInstLabel = QtGui.QLabel()
self.__pcLabel = QtGui.QLabel()
self.__stLabel = QtGui.QLabel()
self.__dtLabel = QtGui.QLabel()
# self.__regTable = QtGui.QTreeWidget()
# self.__regTable.setHeaderLabels(["Register", "Value"])
# for reg in ["v{0:X}".format(i) for i in range(16)] + ["I"]:
# item = QtGui.QTreeWidgetItem([reg, "0"])
# self.__regTable.addTopLevelItem(item)
stateWidget = QtGui.QWidget(self.__emuStateDock)
layout = QtGui.QGridLayout()
layout.addWidget(QtGui.QLabel("Next Instruction: "), 0, 0)
layout.addWidget(self.__nextInstLabel, 0, 1)
layout.addWidget(QtGui.QLabel("PC:"), 1, 0)
layout.addWidget(self.__pcLabel, 1, 1)
layout.addWidget(QtGui.QLabel("ST:"), 1, 2)
layout.addWidget(self.__stLabel, 1, 3)
layout.addWidget(QtGui.QLabel("DT:"), 1, 4)
layout.addWidget(self.__dtLabel, 1, 5)
# layout.addWidget(QtGui.QLabel("Registers"), 2, 0)
# layout.addWidget(self.__regTable, 3, 0, 1, 6)
stateWidget.setLayout(layout)
self.__emuStateDock.setWidget(stateWidget)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__emuStateDock)
# keyboard dock
self.__keyboardDock = QtGui.QDockWidget("Keyboard", self)
self.__inputWidget = ChipInputWidget(self)
self.__keyboardDock.setWidget(self.__inputWidget)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__keyboardDock)
self.__input = PyChemu.ChipInput()
self.__lastKeyPressed = 0
self.__lastCollision = 0
self.__stepTimer = None
self.__sndDelTimer = None
self.__testSprite = PyChemu.ChipSprite()
self.__testSprite.x = 24
self.__testSprite.y = 12
self.__testSprite.rows = 4
for i in range(4):
self.__testSprite.data[i] = 0b00111100
#self.__display.drawSprite(self.__testSprite)
@property
def input(self):
return self.__inputWidget.input
@property
def emu(self):
return self.__emu
@property
def lastDrawHasCollision(self):
return self.__lastCollision
@property
def lastChipKeyPressed(self):
return self.__lastKeyPressed
def getDrawHandlerCallback(self):
def callback(op, sprite):
coll = 0
if op == PyChemu.ChipDrawOp.CHIP_DRAW_CLEAR.value:
self.__display.clear()
elif op == PyChemu.ChipDrawOp.CHIP_DRAW_SPRITE.value:
if self.__display.drawSprite(sprite.contents):
coll = 1
return coll
return PyChemu.DrawHandler(callback)
def getPollKeyHandlerCallback(self):
def callback():
return 0
return PyChemu.PollKeyHandler(callback)
def __updateTestLabel(self):
self.__testLabel.setText("{0:016b}".format(self.__input.value))
def __updateStateDock(self):
dp = self.__emu.contents.dp
nextinst = (self.__emu.contents.memory.array[dp.pc] << 8) | \
self.__emu.contents.memory.array[dp.pc + 1]
self.__nextInstLabel.setText("0x{0:04X}".format(nextinst))
self.__pcLabel.setText("0x{0:03X}".format(dp.pc))
self.__stLabel.setText(str(dp.sndTimer))
self.__dtLabel.setText(str(dp.delTimer))
def __updateDisplay(self, op, sprite):
self.__lastCollision = 0
if op == PyChemu.ChipDrawOp.CHIP_DRAW_CLEAR:
self.__display.clear()
elif op == PyChemu.ChipDrawOp.CHIP_DRAW_SPRITE:
if self.__display.drawSprite(sprite.contents):
self.__lastCollision = 1
self.__worker.displayWaitCondition.wakeAll()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_P:
self.__stepTimer = self.startTimer(10)
self.__sndDelTimer = self.startTimer(16)
# if e.isAutoRepeat():
# return
#
# key = e.key()
#
# if not self.__worker.isRunning() and e.key() == QtCore.Qt.Key_P:
# print("Starting thread")
# self.__worker.start()
#
# if e.key() == QtCore.Qt.Key_T:
# self.__display.drawSprite(self.__testSprite)
# self.__testSprite.x += 1
# self.__display.drawSprite(self.__testSprite)
#
#
# if key in self.__keymap:
# chipkey = self.__keymap[key]
# if self.__worker.awaitingKeyPress:
# self.__lastKeyPressed = chipkey
# self.__worker.pollKeyWaitCondition.wakeAll()
# PyChemu.chipin_set(pointer(self.__input), chipkey,
# PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED)
# self.__updateTestLabel()
#print(self.__input)
#print("Key %d is pressed" % chipkey)
def keyReleaseEvent(self, e):
pass
# if e.isAutoRepeat():
# return
# key = e.key()
# if key in self.__keymap:
# chipkey = self.__keymap[key]
# PyChemu.chipin_set(pointer(self.__input), chipkey,
# PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED)
# self.__updateTestLabel()
#print("Key %d is released" % chipkey)
#print(self.__input)
def timerEvent(self, e):
id = e.timerId()
if id == self.__stepTimer:
self.__emu.contents.input = self.__inputWidget.input
PyChemu.chipemu_step(self.__emu)
self.__updateStateDock()
elif id == self.__sndDelTimer:
if self.__emu.contents.dp.delTimer > 0:
self.__emu.contents.dp.delTimer -= 1
if self.__emu.contents.dp.sndTimer > 0:
self.__emu.contents.dp.sndTimer -= 1
if __name__ == "__main__":
app = QtGui.QApplication([])
app.setStyleSheet("""
#ChipDisplayWidget {
background-color: #404040;
}
""")
emu = PyChemu.chipemu_create()
# for testing purposes
if PyChemu.chipemu_loadROM(emu, c_char_p(b"invaders.c8")) == 1:
print("Failed to load ROM")
# this program will test the font sprites
# loops from 0 to 15 and draws each font sprite
# testProg = [
# 0x60, 0x00, # li v0, 0
# 0x61, 0x0a, # li v1, 10
# 0x62, 0x0a, # li v2, 10
# 0xf0, 0x29, # font v0
# 0xD1, 0x25, # draw v1, v2, 5
# 0x70, 0x01, # addi v0, 1
# 0x40, 0x10, # snei v0, 16
# 0x60, 0x00, # li v0, 0
# 0x00, 0xe0, # cls
# 0x12, 0x06 # j 0x206
# ]
# testProg = [
# 0x6a, 0x00, #li vA, 0
# 0x6b, 0x0a, #li vB, 10
# 0x6c, 0x0a, #li vC, 10
# 0xaf, 0x00, #la 0xf00
# 0xfa, 0x33, #bcd vA
# 0xf2, 0x65, # rest v2
# 0xf0, 0x29, #font v0
# 0xdb, 0xc5, #draw vB, vC, 5
# 0x7b, 0x0a, #addi vB, 1
# 0xf1, 0x29, #font v1
# 0xdb, 0xc5, #draw vB, vC, 5
# 0x7b, 0x0a, #addi vB, 1
# 0xf2, 0x29, #font v2
# 0xdb, 0xc5, #draw vB, vC, 5
# 0x6b, 0x0a, #li vB, 10
# 0x7a, 0x01, #addi vA, 1
# 0x00, 0xe0, # cls
# 0x12, 0x06 # j 0x206
# ]
#
# for i in range(len(testProg)):
# emu.contents.memory.data[i] = testProg[i]
win = EmulatorWindow(emu)
win.show()
#widget = ChipDisplayWidget()
#widget.resize(640, 320)
#widget.show()
app.exec_()
PyChemu.chipemu_destroy(emu)
| en | 0.279082 | # displays the state of the chip keyboard # map of Qt keycodes to chip keycodes # sets the given key to be stuck, stuck keys will always have a keystate # of pressed # pass keyboard events to this widget's handlers #return True #return True #self.__worker = EmulatorWorker(self) #self.__worker.updateDisplay.connect(self.__updateDisplay) # central widget # execution control dock # self.__exeCtrlDock = QtGui.QDockWidget("Execution Control", self) # self.__currentInstLabel = QtGui.QLabel() # self.__testLabel = QtGui.QLabel() # exeWidget = QtGui.QWidget(self.__exeCtrlDock) # layout = QtGui.QGridLayout(exeWidget) # layout.addWidget(QtGui.QLabel("Next Instruction:"), 0, 0) # layout.addWidget(self.__currentInstLabel, 0, 1) # layout.addWidget(self.__testLabel, 1, 0) # self.__exeCtrlDock.setWidget(exeWidget) # self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.__exeCtrlDock) #self.__exeCtrlDock.hide() # emulation state dock # self.__regTable = QtGui.QTreeWidget() # self.__regTable.setHeaderLabels(["Register", "Value"]) # for reg in ["v{0:X}".format(i) for i in range(16)] + ["I"]: # item = QtGui.QTreeWidgetItem([reg, "0"]) # self.__regTable.addTopLevelItem(item) # layout.addWidget(QtGui.QLabel("Registers"), 2, 0) # layout.addWidget(self.__regTable, 3, 0, 1, 6) # keyboard dock #self.__display.drawSprite(self.__testSprite) # if e.isAutoRepeat(): # return # # key = e.key() # # if not self.__worker.isRunning() and e.key() == QtCore.Qt.Key_P: # print("Starting thread") # self.__worker.start() # # if e.key() == QtCore.Qt.Key_T: # self.__display.drawSprite(self.__testSprite) # self.__testSprite.x += 1 # self.__display.drawSprite(self.__testSprite) # # # if key in self.__keymap: # chipkey = self.__keymap[key] # if self.__worker.awaitingKeyPress: # self.__lastKeyPressed = chipkey # self.__worker.pollKeyWaitCondition.wakeAll() # PyChemu.chipin_set(pointer(self.__input), chipkey, # PyChemu.ChipKeyState.CHIP_KEYSTATE_PRESSED) # self.__updateTestLabel() #print(self.__input) #print("Key %d is pressed" % chipkey) # if e.isAutoRepeat(): # return # key = e.key() # if key in self.__keymap: # chipkey = self.__keymap[key] # PyChemu.chipin_set(pointer(self.__input), chipkey, # PyChemu.ChipKeyState.CHIP_KEYSTATE_RELEASED) # self.__updateTestLabel() #print("Key %d is released" % chipkey) #print(self.__input) #ChipDisplayWidget { background-color: #404040; } # for testing purposes # this program will test the font sprites # loops from 0 to 15 and draws each font sprite # testProg = [ # 0x60, 0x00, # li v0, 0 # 0x61, 0x0a, # li v1, 10 # 0x62, 0x0a, # li v2, 10 # 0xf0, 0x29, # font v0 # 0xD1, 0x25, # draw v1, v2, 5 # 0x70, 0x01, # addi v0, 1 # 0x40, 0x10, # snei v0, 16 # 0x60, 0x00, # li v0, 0 # 0x00, 0xe0, # cls # 0x12, 0x06 # j 0x206 # ] # testProg = [ # 0x6a, 0x00, #li vA, 0 # 0x6b, 0x0a, #li vB, 10 # 0x6c, 0x0a, #li vC, 10 # 0xaf, 0x00, #la 0xf00 # 0xfa, 0x33, #bcd vA # 0xf2, 0x65, # rest v2 # 0xf0, 0x29, #font v0 # 0xdb, 0xc5, #draw vB, vC, 5 # 0x7b, 0x0a, #addi vB, 1 # 0xf1, 0x29, #font v1 # 0xdb, 0xc5, #draw vB, vC, 5 # 0x7b, 0x0a, #addi vB, 1 # 0xf2, 0x29, #font v2 # 0xdb, 0xc5, #draw vB, vC, 5 # 0x6b, 0x0a, #li vB, 10 # 0x7a, 0x01, #addi vA, 1 # 0x00, 0xe0, # cls # 0x12, 0x06 # j 0x206 # ] # # for i in range(len(testProg)): # emu.contents.memory.data[i] = testProg[i] #widget = ChipDisplayWidget() #widget.resize(640, 320) #widget.show() | 2.751436 | 3 |
project_balls/main/migrations/0003_boardmodel_uploaddate.py | algebananazzzzz/ProjectBallsWeb | 0 | 6630246 | <reponame>algebananazzzzz/ProjectBallsWeb
# Generated by Django 3.1.4 on 2021-04-29 11:37
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20210214_0306'),
]
operations = [
migrations.AddField(
model_name='boardmodel',
name='UploadDate',
field=models.DateField(auto_now_add=True, default=datetime.datetime(2021, 4, 29, 11, 37, 9, 685941, tzinfo=utc)),
preserve_default=False,
),
]
| # Generated by Django 3.1.4 on 2021-04-29 11:37
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20210214_0306'),
]
operations = [
migrations.AddField(
model_name='boardmodel',
name='UploadDate',
field=models.DateField(auto_now_add=True, default=datetime.datetime(2021, 4, 29, 11, 37, 9, 685941, tzinfo=utc)),
preserve_default=False,
),
] | en | 0.820414 | # Generated by Django 3.1.4 on 2021-04-29 11:37 | 1.920326 | 2 |
python__advanced/04.tuples_and_sets_exercise/03.periodic_table.py | EmilianStoyanov/Projects-in-SoftUni | 1 | 6630247 | num = int(input())
save = set()
for i in range(num):
chemical = input().split()
for element in chemical:
save.add(element)
for j in save:
print(j)
| num = int(input())
save = set()
for i in range(num):
chemical = input().split()
for element in chemical:
save.add(element)
for j in save:
print(j)
| none | 1 | 3.643891 | 4 |
|
sympy/physics/mechanics/body.py | msgoff/sympy | 0 | 6630248 | <gh_stars>0
from sympy.core.backend import Symbol
from sympy.physics.vector import Point, Vector, ReferenceFrame
from sympy.physics.mechanics import RigidBody, Particle, inertia
__all__ = ["Body"]
# XXX: We use type:ignore because the classes RigidBody and Particle have
# inconsistent parallel axis methods that take different numbers of arguments.
class Body(RigidBody, Particle): # type: ignore
"""
Body is a common representation of either a RigidBody or a Particle SymPy
object depending on what is passed in during initialization. If a mass is
passed in and central_inertia is left as None, the Particle object is
created. Otherwise a RigidBody object will be created.
The attributes that Body possesses will be the same as a Particle instance
or a Rigid Body instance depending on which was created. Additional
attributes are listed below.
Attributes
==========
name : string
The body's name
masscenter : Point
The point which represents the center of mass of the rigid body
frame : ReferenceFrame
The reference frame which the body is fixed in
mass : Sympifyable
The body's mass
inertia : (Dyadic, Point)
The body's inertia around its center of mass. This attribute is specific
to the rigid body form of Body and is left undefined for the Particle
form
loads : iterable
This list contains information on the different loads acting on the
Body. Forces are listed as a (point, vector) tuple and torques are
listed as (reference frame, vector) tuples.
Parameters
==========
name : String
Defines the name of the body. It is used as the base for defining
body specific properties.
masscenter : Point, optional
A point that represents the center of mass of the body or particle.
If no point is given, a point is generated.
mass : Sympifyable, optional
A Sympifyable object which represents the mass of the body. If no
mass is passed, one is generated.
frame : ReferenceFrame, optional
The ReferenceFrame that represents the reference frame of the body.
If no frame is given, a frame is generated.
central_inertia : Dyadic, optional
Central inertia dyadic of the body. If none is passed while creating
RigidBody, a default inertia is generated.
Examples
========
Default behaviour. This results in the creation of a RigidBody object for
which the mass, mass center, frame and inertia attributes are given default
values. ::
>>> from sympy.physics.mechanics import Body
>>> body = Body('name_of_body')
This next example demonstrates the code required to specify all of the
values of the Body object. Note this will also create a RigidBody version of
the Body object. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import ReferenceFrame, Point, inertia
>>> from sympy.physics.mechanics import Body
>>> mass = Symbol('mass')
>>> masscenter = Point('masscenter')
>>> frame = ReferenceFrame('frame')
>>> ixx = Symbol('ixx')
>>> body_inertia = inertia(frame, ixx, 0, 0)
>>> body = Body('name_of_body', masscenter, mass, frame, body_inertia)
The minimal code required to create a Particle version of the Body object
involves simply passing in a name and a mass. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> mass = Symbol('mass')
>>> body = Body('name_of_body', mass=mass)
The Particle version of the Body object can also receive a masscenter point
and a reference frame, just not an inertia.
"""
def __init__(
self, name, masscenter=None, mass=None, frame=None, central_inertia=None
):
self.name = name
self.loads = []
if frame is None:
frame = ReferenceFrame(name + "_frame")
if masscenter is None:
masscenter = Point(name + "_masscenter")
if central_inertia is None and mass is None:
ixx = Symbol(name + "_ixx")
iyy = Symbol(name + "_iyy")
izz = Symbol(name + "_izz")
izx = Symbol(name + "_izx")
ixy = Symbol(name + "_ixy")
iyz = Symbol(name + "_iyz")
_inertia = (inertia(frame, ixx, iyy, izz, ixy, iyz, izx), masscenter)
else:
_inertia = (central_inertia, masscenter)
if mass is None:
_mass = Symbol(name + "_mass")
else:
_mass = mass
masscenter.set_vel(frame, 0)
# If user passes masscenter and mass then a particle is created
# otherwise a rigidbody. As a result a body may or may not have inertia.
if central_inertia is None and mass is not None:
self.frame = frame
self.masscenter = masscenter
Particle.__init__(self, name, masscenter, _mass)
else:
RigidBody.__init__(self, name, masscenter, frame, _mass, _inertia)
def apply_force(self, vec, point=None):
"""
Adds a force to a point (center of mass by default) on the body.
Parameters
==========
vec: Vector
Defines the force vector. Can be any vector w.r.t any frame or
combinations of frames.
point: Point, optional
Defines the point on which the force is applied. Default is the
Body's center of mass.
Example
=======
The first example applies a gravitational force in the x direction of
Body's frame to the body's center of mass. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> g = Symbol('g')
>>> body.apply_force(body.mass * g * body.frame.x)
To apply force to any other point than center of mass, pass that point
as well. This example applies a gravitational force to a point a
distance l from the body's center of mass in the y direction. The
force is again applied in the x direction. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> g = Symbol('g')
>>> l = Symbol('l')
>>> point = body.masscenter.locatenew('force_point', l *
... body.frame.y)
>>> body.apply_force(body.mass * g * body.frame.x, point)
"""
if not isinstance(point, Point):
if point is None:
point = self.masscenter # masscenter
else:
raise TypeError("A Point must be supplied to apply force to.")
if not isinstance(vec, Vector):
raise TypeError("A Vector must be supplied to apply force.")
self.loads.append((point, vec))
def apply_torque(self, vec):
"""
Adds a torque to the body.
Parameters
==========
vec: Vector
Defines the torque vector. Can be any vector w.r.t any frame or
combinations of frame.
Example
=======
This example adds a simple torque around the body's z axis. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> T = Symbol('T')
>>> body.apply_torque(T * body.frame.z)
"""
if not isinstance(vec, Vector):
raise TypeError("A Vector must be supplied to add torque.")
self.loads.append((self.frame, vec))
| from sympy.core.backend import Symbol
from sympy.physics.vector import Point, Vector, ReferenceFrame
from sympy.physics.mechanics import RigidBody, Particle, inertia
__all__ = ["Body"]
# XXX: We use type:ignore because the classes RigidBody and Particle have
# inconsistent parallel axis methods that take different numbers of arguments.
class Body(RigidBody, Particle): # type: ignore
"""
Body is a common representation of either a RigidBody or a Particle SymPy
object depending on what is passed in during initialization. If a mass is
passed in and central_inertia is left as None, the Particle object is
created. Otherwise a RigidBody object will be created.
The attributes that Body possesses will be the same as a Particle instance
or a Rigid Body instance depending on which was created. Additional
attributes are listed below.
Attributes
==========
name : string
The body's name
masscenter : Point
The point which represents the center of mass of the rigid body
frame : ReferenceFrame
The reference frame which the body is fixed in
mass : Sympifyable
The body's mass
inertia : (Dyadic, Point)
The body's inertia around its center of mass. This attribute is specific
to the rigid body form of Body and is left undefined for the Particle
form
loads : iterable
This list contains information on the different loads acting on the
Body. Forces are listed as a (point, vector) tuple and torques are
listed as (reference frame, vector) tuples.
Parameters
==========
name : String
Defines the name of the body. It is used as the base for defining
body specific properties.
masscenter : Point, optional
A point that represents the center of mass of the body or particle.
If no point is given, a point is generated.
mass : Sympifyable, optional
A Sympifyable object which represents the mass of the body. If no
mass is passed, one is generated.
frame : ReferenceFrame, optional
The ReferenceFrame that represents the reference frame of the body.
If no frame is given, a frame is generated.
central_inertia : Dyadic, optional
Central inertia dyadic of the body. If none is passed while creating
RigidBody, a default inertia is generated.
Examples
========
Default behaviour. This results in the creation of a RigidBody object for
which the mass, mass center, frame and inertia attributes are given default
values. ::
>>> from sympy.physics.mechanics import Body
>>> body = Body('name_of_body')
This next example demonstrates the code required to specify all of the
values of the Body object. Note this will also create a RigidBody version of
the Body object. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import ReferenceFrame, Point, inertia
>>> from sympy.physics.mechanics import Body
>>> mass = Symbol('mass')
>>> masscenter = Point('masscenter')
>>> frame = ReferenceFrame('frame')
>>> ixx = Symbol('ixx')
>>> body_inertia = inertia(frame, ixx, 0, 0)
>>> body = Body('name_of_body', masscenter, mass, frame, body_inertia)
The minimal code required to create a Particle version of the Body object
involves simply passing in a name and a mass. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> mass = Symbol('mass')
>>> body = Body('name_of_body', mass=mass)
The Particle version of the Body object can also receive a masscenter point
and a reference frame, just not an inertia.
"""
def __init__(
self, name, masscenter=None, mass=None, frame=None, central_inertia=None
):
self.name = name
self.loads = []
if frame is None:
frame = ReferenceFrame(name + "_frame")
if masscenter is None:
masscenter = Point(name + "_masscenter")
if central_inertia is None and mass is None:
ixx = Symbol(name + "_ixx")
iyy = Symbol(name + "_iyy")
izz = Symbol(name + "_izz")
izx = Symbol(name + "_izx")
ixy = Symbol(name + "_ixy")
iyz = Symbol(name + "_iyz")
_inertia = (inertia(frame, ixx, iyy, izz, ixy, iyz, izx), masscenter)
else:
_inertia = (central_inertia, masscenter)
if mass is None:
_mass = Symbol(name + "_mass")
else:
_mass = mass
masscenter.set_vel(frame, 0)
# If user passes masscenter and mass then a particle is created
# otherwise a rigidbody. As a result a body may or may not have inertia.
if central_inertia is None and mass is not None:
self.frame = frame
self.masscenter = masscenter
Particle.__init__(self, name, masscenter, _mass)
else:
RigidBody.__init__(self, name, masscenter, frame, _mass, _inertia)
def apply_force(self, vec, point=None):
"""
Adds a force to a point (center of mass by default) on the body.
Parameters
==========
vec: Vector
Defines the force vector. Can be any vector w.r.t any frame or
combinations of frames.
point: Point, optional
Defines the point on which the force is applied. Default is the
Body's center of mass.
Example
=======
The first example applies a gravitational force in the x direction of
Body's frame to the body's center of mass. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> g = Symbol('g')
>>> body.apply_force(body.mass * g * body.frame.x)
To apply force to any other point than center of mass, pass that point
as well. This example applies a gravitational force to a point a
distance l from the body's center of mass in the y direction. The
force is again applied in the x direction. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> g = Symbol('g')
>>> l = Symbol('l')
>>> point = body.masscenter.locatenew('force_point', l *
... body.frame.y)
>>> body.apply_force(body.mass * g * body.frame.x, point)
"""
if not isinstance(point, Point):
if point is None:
point = self.masscenter # masscenter
else:
raise TypeError("A Point must be supplied to apply force to.")
if not isinstance(vec, Vector):
raise TypeError("A Vector must be supplied to apply force.")
self.loads.append((point, vec))
def apply_torque(self, vec):
"""
Adds a torque to the body.
Parameters
==========
vec: Vector
Defines the torque vector. Can be any vector w.r.t any frame or
combinations of frame.
Example
=======
This example adds a simple torque around the body's z axis. ::
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> T = Symbol('T')
>>> body.apply_torque(T * body.frame.z)
"""
if not isinstance(vec, Vector):
raise TypeError("A Vector must be supplied to add torque.")
self.loads.append((self.frame, vec)) | en | 0.743606 | # XXX: We use type:ignore because the classes RigidBody and Particle have # inconsistent parallel axis methods that take different numbers of arguments. # type: ignore Body is a common representation of either a RigidBody or a Particle SymPy object depending on what is passed in during initialization. If a mass is passed in and central_inertia is left as None, the Particle object is created. Otherwise a RigidBody object will be created. The attributes that Body possesses will be the same as a Particle instance or a Rigid Body instance depending on which was created. Additional attributes are listed below. Attributes ========== name : string The body's name masscenter : Point The point which represents the center of mass of the rigid body frame : ReferenceFrame The reference frame which the body is fixed in mass : Sympifyable The body's mass inertia : (Dyadic, Point) The body's inertia around its center of mass. This attribute is specific to the rigid body form of Body and is left undefined for the Particle form loads : iterable This list contains information on the different loads acting on the Body. Forces are listed as a (point, vector) tuple and torques are listed as (reference frame, vector) tuples. Parameters ========== name : String Defines the name of the body. It is used as the base for defining body specific properties. masscenter : Point, optional A point that represents the center of mass of the body or particle. If no point is given, a point is generated. mass : Sympifyable, optional A Sympifyable object which represents the mass of the body. If no mass is passed, one is generated. frame : ReferenceFrame, optional The ReferenceFrame that represents the reference frame of the body. If no frame is given, a frame is generated. central_inertia : Dyadic, optional Central inertia dyadic of the body. If none is passed while creating RigidBody, a default inertia is generated. Examples ======== Default behaviour. This results in the creation of a RigidBody object for which the mass, mass center, frame and inertia attributes are given default values. :: >>> from sympy.physics.mechanics import Body >>> body = Body('name_of_body') This next example demonstrates the code required to specify all of the values of the Body object. Note this will also create a RigidBody version of the Body object. :: >>> from sympy import Symbol >>> from sympy.physics.mechanics import ReferenceFrame, Point, inertia >>> from sympy.physics.mechanics import Body >>> mass = Symbol('mass') >>> masscenter = Point('masscenter') >>> frame = ReferenceFrame('frame') >>> ixx = Symbol('ixx') >>> body_inertia = inertia(frame, ixx, 0, 0) >>> body = Body('name_of_body', masscenter, mass, frame, body_inertia) The minimal code required to create a Particle version of the Body object involves simply passing in a name and a mass. :: >>> from sympy import Symbol >>> from sympy.physics.mechanics import Body >>> mass = Symbol('mass') >>> body = Body('name_of_body', mass=mass) The Particle version of the Body object can also receive a masscenter point and a reference frame, just not an inertia. # If user passes masscenter and mass then a particle is created # otherwise a rigidbody. As a result a body may or may not have inertia. Adds a force to a point (center of mass by default) on the body. Parameters ========== vec: Vector Defines the force vector. Can be any vector w.r.t any frame or combinations of frames. point: Point, optional Defines the point on which the force is applied. Default is the Body's center of mass. Example ======= The first example applies a gravitational force in the x direction of Body's frame to the body's center of mass. :: >>> from sympy import Symbol >>> from sympy.physics.mechanics import Body >>> body = Body('body') >>> g = Symbol('g') >>> body.apply_force(body.mass * g * body.frame.x) To apply force to any other point than center of mass, pass that point as well. This example applies a gravitational force to a point a distance l from the body's center of mass in the y direction. The force is again applied in the x direction. :: >>> from sympy import Symbol >>> from sympy.physics.mechanics import Body >>> body = Body('body') >>> g = Symbol('g') >>> l = Symbol('l') >>> point = body.masscenter.locatenew('force_point', l * ... body.frame.y) >>> body.apply_force(body.mass * g * body.frame.x, point) # masscenter Adds a torque to the body. Parameters ========== vec: Vector Defines the torque vector. Can be any vector w.r.t any frame or combinations of frame. Example ======= This example adds a simple torque around the body's z axis. :: >>> from sympy import Symbol >>> from sympy.physics.mechanics import Body >>> body = Body('body') >>> T = Symbol('T') >>> body.apply_torque(T * body.frame.z) | 3.185813 | 3 |
nmrglue/analysis/segmentation.py | genematx/nmrglue | 150 | 6630249 | <gh_stars>100-1000
"""
Functions to perform segmentation of NMR spectrum.
"""
import numpy as np
import numpy.ma as ma
import scipy.ndimage as ndimage
from .analysisbase import neighbors
# Connected segmenting method:
# The connected segmentation method finds all nodes which are above a given
# threshold and connected to the initial point. For finding all segments
# the scipy.ndimage.label function is used for speed.
def label_connected(data, thres, structure):
"""
Label connected features in data. Returns labeled_array, num_features
"""
return ndimage.label(data > thres, structure)
def find_all_connected(data, thres, find_segs=False, diag=False):
"""
Find all connected segments.
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_connected(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.maximum_position(data, labels, range(1, num_features +
1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
# nconnected method:
# The nconnected method is identical to the connected method except nodes must
# be below the threshold and local minimum are reported. This is useful for
# finding negative peaks by setting thres to the negative of the noise level.
def label_nconnected(data, thres, structure):
"""
Label nconnected features in data. Returns labeled_array, num_features
"""
return ndimage.label(data < thres, structure)
def find_all_nconnected(data, thres, find_segs=False, diag=False):
"""
Find all negatively connected segments in data.
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_nconnected(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.minimum_position(data, labels, range(1,
num_features + 1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
# downward segmentation method:
# The downward segmenting method uses the flood fill algorithm to find
# all points connected to an initial node which are above a given threshold
# and to which a path exists in which each step of the path moves lower in
# intensity. This can be though of as all points accessible by a water drop
# following downward slopes from the initial node.
# Upward segmentation uses the same priciple except nodes must be below
# the threshold an upward path must exist.
def mark_dseg(mdata, map, pt, mark, structure):
"""
Mark downward-connected region on segment map starting at node pt.
Modifies mdata mask and map.
Parameters
----------
mdata : masked ndarray
Masked data array.
map :
Array mapping out segments.
pt : tuple of ints
Index of starting node
mark : int
Integer to mark map with.
"""
if mdata.mask[pt] is True:
return
else:
map[pt] = mark
mdata[pt] = ma.masked
Q = [pt]
while Q:
pt = Q.pop(0)
v = mdata.data[pt]
# Check all neightbors
for new_pt in neighbors(pt, mdata.shape, structure):
if mdata.mask[new_pt] == False and mdata[new_pt] < v:
Q.append(new_pt)
map[new_pt] = mark
mdata[new_pt] = ma.masked
return
def label_downward_seg(data, labels, seg_slice, seg_index, max_index,
structure):
""" Label a segment which is downward connected """
slabels = labels[seg_slice]
msdata = np.ma.masked_array(data[seg_slice], mask=(slabels != seg_index))
# mark the downward connected segment with the highest peak in the
# selected segment with the segment index.
argmax = np.unravel_index(msdata.argmax(), msdata.shape)
mark_dseg(msdata, slabels, argmax, seg_index, structure)
# mark any
while msdata.mask.all() == False:
argmax = np.unravel_index(msdata.argmax(), msdata.shape)
mark_dseg(msdata, slabels, argmax, max_index, structure)
max_index = max_index + 1
return max_index
def label_downward(data, thres, structure):
"""
Label connected features in data. Returns labeled_array, num_features
"""
# find connected segments
labels, num_features = ndimage.label(data > thres, structure)
seg_slices = ndimage.find_objects(labels)
max_index = int(num_features + 1)
# loop over the segments and perform a downward segment on each
for i, s in enumerate(seg_slices):
max_index = label_downward_seg(data, labels, s, i + 1, max_index,
structure)
return labels, max_index - 1
def find_all_downward(data, thres, find_segs=False, diag=False):
"""
Find all downward connected segments in data
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_downward(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.maximum_position(data, labels, range(1,
num_features + 1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
def mark_useg(mdata, map, pt, mark, structure):
"""
Mark upward-connected region on segment map starting at node pt
Modifies mdata mask and map.
Parameters
----------
mdata : masked ndarray
Masked data array.
map :
Array mapping out segments.
pt : tuple of ints
Index of starting node
mark : int
Integer to mark map with.
"""
if mdata.mask[pt] is True:
return
else:
map[pt] = mark
mdata[pt] = ma.masked
Q = [pt]
while Q:
pt = Q.pop(0)
v = mdata.data[pt]
# Check all neightbors
for new_pt in neighbors(pt, mdata.shape, structure):
if mdata.mask[new_pt] == False and mdata[new_pt] > v:
Q.append(new_pt)
map[new_pt] = mark
mdata[new_pt] = ma.masked
return
def label_upward_seg(data, labels, seg_slice, seg_index, max_index,
structure):
""" Label a segment which is upward connected """
slabels = labels[seg_slice]
msdata = np.ma.masked_array(data[seg_slice],
mask=(slabels != seg_index))
# mark the upward connected segment with the highest peak in the
# selected segment with the segment index.
argmin = np.unravel_index(msdata.argmin(), msdata.shape)
mark_useg(msdata, slabels, argmin, seg_index, structure)
# mark any
while msdata.mask.all() == False:
argmin = np.unravel_index(msdata.argmin(), msdata.shape)
mark_useg(msdata, slabels, argmin, max_index, structure)
max_index = max_index + 1
return max_index
def label_upward(data, thres, structure):
"""
Label upward connected features in data. Returns labeled_array,
num_features
"""
# find connected segments
labels, num_features = ndimage.label(data < thres, structure)
seg_slices = ndimage.find_objects(labels)
max_index = int(num_features + 1)
# loop over the segments and perform a downward segment on each
for i, s in enumerate(seg_slices):
max_index = label_upward_seg(data, labels, s, i + 1, max_index,
structure)
return labels, max_index - 1
def find_all_upward(data, thres, find_segs=False, diag=False):
"""
Find all upward connected segments in data
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_upward(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.minimum_position(data, labels,
range(1, num_features + 1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
##########################
# Single point functions #
##########################
def find_downward(data, pt, thres, diag=False):
"""
Find points downward-connected to a point in data.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, below this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of downward-connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] < thres: # check that the initial point is above threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
v = data[pt] # value at current node
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if thres < data[new_pt] < v and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
def find_connected(data, pt, thres, diag=False):
"""
Find points connected to a point in data.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, below this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] < thres: # check that the initial point is above threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if data[new_pt] > thres and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
def find_nconnected(data, pt, thres, diag=False):
"""
Find points connected to pt in data below threshold.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, above this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] > thres: # check that the initial point is above threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if data[new_pt] < thres and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
def find_upward(data, pt, thres, diag=False):
"""
Find points upward-connected to pt in data.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, below this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of upward-connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] > thres: # check that the initial point is below threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
v = data[pt] # value at current node
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if thres > data[new_pt] > v and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
| """
Functions to perform segmentation of NMR spectrum.
"""
import numpy as np
import numpy.ma as ma
import scipy.ndimage as ndimage
from .analysisbase import neighbors
# Connected segmenting method:
# The connected segmentation method finds all nodes which are above a given
# threshold and connected to the initial point. For finding all segments
# the scipy.ndimage.label function is used for speed.
def label_connected(data, thres, structure):
"""
Label connected features in data. Returns labeled_array, num_features
"""
return ndimage.label(data > thres, structure)
def find_all_connected(data, thres, find_segs=False, diag=False):
"""
Find all connected segments.
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_connected(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.maximum_position(data, labels, range(1, num_features +
1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
# nconnected method:
# The nconnected method is identical to the connected method except nodes must
# be below the threshold and local minimum are reported. This is useful for
# finding negative peaks by setting thres to the negative of the noise level.
def label_nconnected(data, thres, structure):
"""
Label nconnected features in data. Returns labeled_array, num_features
"""
return ndimage.label(data < thres, structure)
def find_all_nconnected(data, thres, find_segs=False, diag=False):
"""
Find all negatively connected segments in data.
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_nconnected(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.minimum_position(data, labels, range(1,
num_features + 1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
# downward segmentation method:
# The downward segmenting method uses the flood fill algorithm to find
# all points connected to an initial node which are above a given threshold
# and to which a path exists in which each step of the path moves lower in
# intensity. This can be though of as all points accessible by a water drop
# following downward slopes from the initial node.
# Upward segmentation uses the same priciple except nodes must be below
# the threshold an upward path must exist.
def mark_dseg(mdata, map, pt, mark, structure):
"""
Mark downward-connected region on segment map starting at node pt.
Modifies mdata mask and map.
Parameters
----------
mdata : masked ndarray
Masked data array.
map :
Array mapping out segments.
pt : tuple of ints
Index of starting node
mark : int
Integer to mark map with.
"""
if mdata.mask[pt] is True:
return
else:
map[pt] = mark
mdata[pt] = ma.masked
Q = [pt]
while Q:
pt = Q.pop(0)
v = mdata.data[pt]
# Check all neightbors
for new_pt in neighbors(pt, mdata.shape, structure):
if mdata.mask[new_pt] == False and mdata[new_pt] < v:
Q.append(new_pt)
map[new_pt] = mark
mdata[new_pt] = ma.masked
return
def label_downward_seg(data, labels, seg_slice, seg_index, max_index,
structure):
""" Label a segment which is downward connected """
slabels = labels[seg_slice]
msdata = np.ma.masked_array(data[seg_slice], mask=(slabels != seg_index))
# mark the downward connected segment with the highest peak in the
# selected segment with the segment index.
argmax = np.unravel_index(msdata.argmax(), msdata.shape)
mark_dseg(msdata, slabels, argmax, seg_index, structure)
# mark any
while msdata.mask.all() == False:
argmax = np.unravel_index(msdata.argmax(), msdata.shape)
mark_dseg(msdata, slabels, argmax, max_index, structure)
max_index = max_index + 1
return max_index
def label_downward(data, thres, structure):
"""
Label connected features in data. Returns labeled_array, num_features
"""
# find connected segments
labels, num_features = ndimage.label(data > thres, structure)
seg_slices = ndimage.find_objects(labels)
max_index = int(num_features + 1)
# loop over the segments and perform a downward segment on each
for i, s in enumerate(seg_slices):
max_index = label_downward_seg(data, labels, s, i + 1, max_index,
structure)
return labels, max_index - 1
def find_all_downward(data, thres, find_segs=False, diag=False):
"""
Find all downward connected segments in data
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_downward(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.maximum_position(data, labels, range(1,
num_features + 1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
def mark_useg(mdata, map, pt, mark, structure):
"""
Mark upward-connected region on segment map starting at node pt
Modifies mdata mask and map.
Parameters
----------
mdata : masked ndarray
Masked data array.
map :
Array mapping out segments.
pt : tuple of ints
Index of starting node
mark : int
Integer to mark map with.
"""
if mdata.mask[pt] is True:
return
else:
map[pt] = mark
mdata[pt] = ma.masked
Q = [pt]
while Q:
pt = Q.pop(0)
v = mdata.data[pt]
# Check all neightbors
for new_pt in neighbors(pt, mdata.shape, structure):
if mdata.mask[new_pt] == False and mdata[new_pt] > v:
Q.append(new_pt)
map[new_pt] = mark
mdata[new_pt] = ma.masked
return
def label_upward_seg(data, labels, seg_slice, seg_index, max_index,
structure):
""" Label a segment which is upward connected """
slabels = labels[seg_slice]
msdata = np.ma.masked_array(data[seg_slice],
mask=(slabels != seg_index))
# mark the upward connected segment with the highest peak in the
# selected segment with the segment index.
argmin = np.unravel_index(msdata.argmin(), msdata.shape)
mark_useg(msdata, slabels, argmin, seg_index, structure)
# mark any
while msdata.mask.all() == False:
argmin = np.unravel_index(msdata.argmin(), msdata.shape)
mark_useg(msdata, slabels, argmin, max_index, structure)
max_index = max_index + 1
return max_index
def label_upward(data, thres, structure):
"""
Label upward connected features in data. Returns labeled_array,
num_features
"""
# find connected segments
labels, num_features = ndimage.label(data < thres, structure)
seg_slices = ndimage.find_objects(labels)
max_index = int(num_features + 1)
# loop over the segments and perform a downward segment on each
for i, s in enumerate(seg_slices):
max_index = label_upward_seg(data, labels, s, i + 1, max_index,
structure)
return labels, max_index - 1
def find_all_upward(data, thres, find_segs=False, diag=False):
"""
Find all upward connected segments in data
Parameters
----------
data : ndarray
Data to perform segmentation on.
thres : float
Threshold, below this nodes are considered noise.
find_segs : bool, optional
True to return a list of slices for the segments.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
locations : list
List of indicies of local maximum in each segment.
seg_slices : list, optional
List of slices which extract a given segment from the data. Only
returned when fig_segs is True.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
# determine labeled array of segments
labels, num_features = label_upward(data, thres, structure)
# determine locations of segment maxima
locations = ndimage.minimum_position(data, labels,
range(1, num_features + 1))
# find segment slices if requested and return
if find_segs is True:
seg_slices = ndimage.find_objects(labels)
return locations, seg_slices
else:
return locations
##########################
# Single point functions #
##########################
def find_downward(data, pt, thres, diag=False):
"""
Find points downward-connected to a point in data.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, below this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of downward-connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] < thres: # check that the initial point is above threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
v = data[pt] # value at current node
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if thres < data[new_pt] < v and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
def find_connected(data, pt, thres, diag=False):
"""
Find points connected to a point in data.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, below this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] < thres: # check that the initial point is above threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if data[new_pt] > thres and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
def find_nconnected(data, pt, thres, diag=False):
"""
Find points connected to pt in data below threshold.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, above this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] > thres: # check that the initial point is above threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if data[new_pt] < thres and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment
def find_upward(data, pt, thres, diag=False):
"""
Find points upward-connected to pt in data.
Parameters
----------
data : ndarray
Array of data.
pt : tuple of ints
Starting point of peak.
thres : float
Threshold, below this nodes are considered noise.
diag : bool, optional
True to include diagonal neighbors in connection.
Returns
-------
nodes : list
Indicies of upward-connected nodes.
"""
# build structure array for defining feature connections
ndim = data.ndim
if diag:
structure = ndimage.generate_binary_structure(ndim, ndim)
else:
structure = ndimage.generate_binary_structure(ndim, 1)
if isinstance(pt, int):
pt = (pt, )
pt = tuple(pt)
shape = data.shape
if data[pt] > thres: # check that the initial point is below threshold.
return []
Q = [pt] # queue
segment = [pt]
while Q: # loop until Q is empty
pt = Q.pop(0) # remove first element of queue
v = data[pt] # value at current node
for new_pt in neighbors(pt, shape, structure): # check all neightbors
if thres > data[new_pt] > v and new_pt not in segment:
Q.append(new_pt)
segment.append(new_pt)
return segment | en | 0.77954 | Functions to perform segmentation of NMR spectrum. # Connected segmenting method: # The connected segmentation method finds all nodes which are above a given # threshold and connected to the initial point. For finding all segments # the scipy.ndimage.label function is used for speed. Label connected features in data. Returns labeled_array, num_features Find all connected segments. Parameters ---------- data : ndarray Data to perform segmentation on. thres : float Threshold, below this nodes are considered noise. find_segs : bool, optional True to return a list of slices for the segments. diag : bool True to include diagonal neighbors in connection. Returns ------- locations : list List of indicies of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. # build structure array for defining feature connections # determine labeled array of segments # determine locations of segment maxima # find segment slices if requested and return # nconnected method: # The nconnected method is identical to the connected method except nodes must # be below the threshold and local minimum are reported. This is useful for # finding negative peaks by setting thres to the negative of the noise level. Label nconnected features in data. Returns labeled_array, num_features Find all negatively connected segments in data. Parameters ---------- data : ndarray Data to perform segmentation on. thres : float Threshold, below this nodes are considered noise. find_segs : bool, optional True to return a list of slices for the segments. diag : bool True to include diagonal neighbors in connection. Returns ------- locations : list List of indicies of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. # build structure array for defining feature connections # determine labeled array of segments # determine locations of segment maxima # find segment slices if requested and return # downward segmentation method: # The downward segmenting method uses the flood fill algorithm to find # all points connected to an initial node which are above a given threshold # and to which a path exists in which each step of the path moves lower in # intensity. This can be though of as all points accessible by a water drop # following downward slopes from the initial node. # Upward segmentation uses the same priciple except nodes must be below # the threshold an upward path must exist. Mark downward-connected region on segment map starting at node pt. Modifies mdata mask and map. Parameters ---------- mdata : masked ndarray Masked data array. map : Array mapping out segments. pt : tuple of ints Index of starting node mark : int Integer to mark map with. # Check all neightbors Label a segment which is downward connected # mark the downward connected segment with the highest peak in the # selected segment with the segment index. # mark any Label connected features in data. Returns labeled_array, num_features # find connected segments # loop over the segments and perform a downward segment on each Find all downward connected segments in data Parameters ---------- data : ndarray Data to perform segmentation on. thres : float Threshold, below this nodes are considered noise. find_segs : bool, optional True to return a list of slices for the segments. diag : bool, optional True to include diagonal neighbors in connection. Returns ------- locations : list List of indicies of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. # build structure array for defining feature connections # determine labeled array of segments # determine locations of segment maxima # find segment slices if requested and return Mark upward-connected region on segment map starting at node pt Modifies mdata mask and map. Parameters ---------- mdata : masked ndarray Masked data array. map : Array mapping out segments. pt : tuple of ints Index of starting node mark : int Integer to mark map with. # Check all neightbors Label a segment which is upward connected # mark the upward connected segment with the highest peak in the # selected segment with the segment index. # mark any Label upward connected features in data. Returns labeled_array, num_features # find connected segments # loop over the segments and perform a downward segment on each Find all upward connected segments in data Parameters ---------- data : ndarray Data to perform segmentation on. thres : float Threshold, below this nodes are considered noise. find_segs : bool, optional True to return a list of slices for the segments. diag : bool, optional True to include diagonal neighbors in connection. Returns ------- locations : list List of indicies of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. # build structure array for defining feature connections # determine labeled array of segments # determine locations of segment maxima # find segment slices if requested and return ########################## # Single point functions # ########################## Find points downward-connected to a point in data. Parameters ---------- data : ndarray Array of data. pt : tuple of ints Starting point of peak. thres : float Threshold, below this nodes are considered noise. diag : bool, optional True to include diagonal neighbors in connection. Returns ------- nodes : list Indicies of downward-connected nodes. # build structure array for defining feature connections # check that the initial point is above threshold. # queue # loop until Q is empty # remove first element of queue # value at current node # check all neightbors Find points connected to a point in data. Parameters ---------- data : ndarray Array of data. pt : tuple of ints Starting point of peak. thres : float Threshold, below this nodes are considered noise. diag : bool, optional True to include diagonal neighbors in connection. Returns ------- nodes : list Indicies of connected nodes. # build structure array for defining feature connections # check that the initial point is above threshold. # queue # loop until Q is empty # remove first element of queue # check all neightbors Find points connected to pt in data below threshold. Parameters ---------- data : ndarray Array of data. pt : tuple of ints Starting point of peak. thres : float Threshold, above this nodes are considered noise. diag : bool, optional True to include diagonal neighbors in connection. Returns ------- nodes : list Indicies of connected nodes. # build structure array for defining feature connections # check that the initial point is above threshold. # queue # loop until Q is empty # remove first element of queue # check all neightbors Find points upward-connected to pt in data. Parameters ---------- data : ndarray Array of data. pt : tuple of ints Starting point of peak. thres : float Threshold, below this nodes are considered noise. diag : bool, optional True to include diagonal neighbors in connection. Returns ------- nodes : list Indicies of upward-connected nodes. # build structure array for defining feature connections # check that the initial point is below threshold. # queue # loop until Q is empty # remove first element of queue # value at current node # check all neightbors | 3.467483 | 3 |
scratch.py | thomasgnuttall/cae-invar-mod | 0 | 6630250 | %load_ext autoreload
%autoreload 2
import datetime
import os
import pickle
import skimage.io
from complex_auto.motives_extractor import *
from complex_auto.motives_extractor.extractor import *
from exploration.pitch import extract_pitch_track
from exploration.img import (
remove_diagonal, convolve_array_tile, binarize, diagonal_gaussian,
hough_transform, hough_transform_new, scharr, sobel,
apply_bin_op, make_symmetric, edges_to_contours)
from exploration.segments import (
extract_segments_new, get_all_segments, break_all_segments, do_patterns_overlap, reduce_duplicates,
remove_short, extend_segments, compare_segments)
from exploration.sequence import (
apply_exclusions, contains_silence, min_gap, too_stable,
convert_seqs_to_timestep, get_stability_mask, add_center_to_mask,
remove_below_length)
from exploration.evaluation import evaluate_quick, load_annotations_new, evaluate_all_tiers, evaluation_report, get_coverage
from exploration.visualisation import plot_all_sequences, plot_pitch
from exploration.io import load_sim_matrix, write_all_sequence_audio, load_yaml
from exploration.pitch import cents_to_pitch, pitch_seq_to_cents, pitch_to_cents
################
## Parameters ##
################
# Output paths of each step in pipeline
out_dir = os.path.join("output", 'hpc')
sim_path = 'output/full_dataset/Koti Janmani.multitrack-vocal.mp3.npy'
### Pitch Extraction
# Sample rate of audio
sr = 44100
# size in frames of cqt window from convolution model
cqt_window = 1988 # was previously set to 1988
# Take sample of data, set to None to use all data
s1 = None # lower bound index (5000 has been used for testing)
s2 = None # higher bound index (9000 has been used for testing)
# pitch track extraction
frameSize = 2048 # For Melodia pitch extraction
hopSize = 128 # For Melodia pitch extraction
gap_interp = 250*0.001 # Interpolate pitch tracks gaps of <gap_interp>seconds or less [set to None to skip]
smooth = 7 # sigma for gaussian smoothing of pitch track [set to None to skip]
audio_path_vocal = 'audio/Akkarai Sisters at Arkay by Akkarai Sisters/<NAME>/Koti Janmani.multitrack-vocal.mp3'
audio_path = "audio/full_dataset/Akkarai_Sisters_at_Arkay/Akkarai Sisters - Akkarai Sisters at Arkay/1 - 3 - Akkarai Sisters - Koti Janmani.mp3"
# stability identification
stab_hop_secs = 0.2 # window size for stab computations in seconds
min_stability_length_secs = 1.0 # minimum legnth of region to be considered stable in seconds
freq_var_thresh_stab = 10 # max variation in pitch to be considered stable region
print('Extracting pitch track')
pitch, raw_pitch, timestep, time = extract_pitch_track(audio_path_vocal, frameSize, hopSize, gap_interp, smooth, sr)
print('Computing stability/silence mask')
stable_mask = get_stability_mask(raw_pitch, min_stability_length_secs, stab_hop_secs, freq_var_thresh_stab, timestep)
silence_mask = (raw_pitch == 0).astype(int)
silence_mask = add_center_to_mask(silence_mask)
### Image Processing
# convolutional filter
conv_filter = sobel
# Binarize raw sim array 0/1 below and above this value...
# depends completely on filter passed to convolutional step
# Best...
# scharr, 0.56
# sobel unidrectional, 0.1
# sobel bidirectional, 0.15
bin_thresh = 0.11
# lower bin_thresh for areas surrounding segments
bin_thresh_segment = 0.085
# percentage either size of a segment considered for lower bin thresh
perc_tail = 0.5
# Gaussian filter along diagonals with sigma...
gauss_sigma = None
# After gaussian, re-binarize with this threshold
cont_thresh = 0.15
# morphology params
etc_kernel_size = 10 # For closing
binop_dim = 3 # square dimension of binary opening structure (square matrix of zeros with 1 across the diagonal)
# Hough transform parameters
min_dist_sec = 0 # min dist in seconds between lines
hough_threshold = 25
# Only search for lines between these angles (45 corresponds to main diagonal)
hough_high_angle = 45.01
hough_low_angle = 44.99
# Distance between consecutive diagonals to be joined in seconds
min_diff_trav = 0.5
# Two segments must overlap in both x and y by <dupl_perc_overlap>
# to be considered the same, only the longest is returned
dupl_perc_overlap = 0.75
# Grouping diagonals
min_pattern_length_seconds = 1.5
min_length_cqt = min_pattern_length_seconds*sr/cqt_window
min_in_group = 2 # minimum number of patterns to be included in pattern group
# Exclusions
exclusion_functions = [contains_silence]
# Evaluation
annotations_path = 'annotations/koti_janmani.txt'
eval_tol = 0.5 # how much leniancy on each side of an annotated pattern before considering it a match (seconds)
partial_perc = 0.75 # how much overlap does an annotated and identified pattern needed to be considered a partial match
# Output
svara_cent_path = "conf/svara_cents.yaml"
svara_freq_path = "conf/svara_lookup.yaml"
tonic = 195.99
svara_cent = load_yaml(svara_cent_path)
svara_freq = load_yaml(svara_freq_path)
yticks_dict = {k:cents_to_pitch(v, tonic) for k,v in svara_cent.items()}
yticks_dict = {k:v for k,v in yticks_dict.items() if any([x in k for x in ['S', 'R2', 'G2', 'M1', 'P', 'D2', 'N2', 'S']])}
plot_kwargs = {
'yticks_dict':yticks_dict,
'cents':True,
'tonic':195.997718,
'emphasize':['S', 'S^'],
'figsize':(15,4)
}
# limit the number of groups outputted
top_n = 1000
####################
## Load sim array ##
####################
# Get similarity Matrix
print(f'Loading sim matrix from {sim_path}')
X = load_sim_matrix(sim_path)
# Sample for development
if all([s1,s2]):
save_imgs = s2-s1 <= 4000
X_samp = X.copy()[s1:s2,s1:s2]
else:
save_imgs = False
X_samp = X.copy()
sim_filename = os.path.join(out_dir, '1_Koti Janmani_simsave.png') if save_imgs else None
conv_filename = os.path.join(out_dir, '2_Koti Janmani_conv.png') if save_imgs else None
bin_filename = os.path.join(out_dir, '3_Koti Janmani_binary.png') if save_imgs else None
diag_filename = os.path.join(out_dir, '4_Koti Janmani_diag.png') if save_imgs else None
gauss_filename = os.path.join(out_dir, '5_Koti Janmani_gauss.png') if save_imgs else None
cont_filename = os.path.join(out_dir, '6_Koti Janmani_cont.png') if save_imgs else None
binop_filename = os.path.join(out_dir, '7_Koti Janmani_binop.png') if save_imgs else None
hough_filename = os.path.join(out_dir, '8_Koti Janmani_hough.png') if save_imgs else None
ext_filename = os.path.join(out_dir, '9_Koti Janmani_cont_ext.png') if save_imgs else None
if save_imgs:
skimage.io.imsave(sim_filename, X_samp)
##############
## Pipeline ##
##############
print('Convolving similarity matrix')
X_conv = convolve_array_tile(X_samp, cfilter=conv_filter)
if save_imgs:
skimage.io.imsave(conv_filename, X_conv)
print('Binarizing convolved array')
X_bin = binarize(X_conv, bin_thresh, filename=bin_filename)
#X_bin = binarize(X_conv, 0.05, filename=bin_filename)
print('Removing diagonal')
X_diag = remove_diagonal(X_bin)
if save_imgs:
skimage.io.imsave(diag_filename, X_diag)
if gauss_sigma:
print('Applying diagonal gaussian filter')
diagonal_gaussian(X_bin, gauss_sigma, filename=gauss_filename)
print('Binarize gaussian blurred similarity matrix')
binarize(X_gauss, cont_thresh, filename=cont_filename)
else:
X_gauss = X_diag
X_cont = X_gauss
print('Ensuring symmetry between upper and lower triangle in array')
X_sym = make_symmetric(X_cont)
print('Applying Hough Transform to edges')
peaks = hough_transform_new(X_sym, hough_high_angle, hough_low_angle, hough_threshold)
print(f'Extending edges in convolved array along Hough Lines with lower threshold of {bin_thresh_segment}, (previously {bin_thresh})')
X_ext = extend_segments(X_conv, X_sym, peaks, min_diff_trav, cqt_window, sr, bin_thresh_segment, perc_tail)
print('Identifying and isolating regions between edges')
X_fill = edges_to_contours(X_ext, etc_kernel_size)
print('Cleaning isolated non-directional regions using morphological opening')
X_binop = apply_bin_op(X_fill, binop_dim)
print('Ensuring symmetry between upper and lower triangle in array')
X_binop = make_symmetric(X_binop)
if save_imgs:
skimage.io.imsave(binop_filename, X_binop)
## Join segments that are sufficiently close
print('Extracting segments using flood fill and centroid')
# Format - [[(x,y), (x1,y1)],...]
t1 = datetime.datetime.now()
all_segments = extract_segments_new(X_binop)
print(f' {len(all_segments)} found...')
t2 = datetime.datetime.now()
print(f"time taken: {t2-t1}")
def save_object(obj, filename):
with open(filename, 'wb') as outp: # Overwrites any existing file.
pickle.dump(obj, outp, pickle.HIGHEST_PROTOCOL)
save_object(all_segments, 'output/all_segments.pkl')
import pickle
file = open('output/all_segments.pkl','rb')
all_segments = pickle.load(file)
silence_and_stable_mask = np.array([int(any([i,j])) for i,j in zip(silence_mask, stable_mask)])
print('Breaking segments with silent/stable regions')
# Format - [[(x,y), (x1,y1)],...]
all_broken_segments = break_all_segments(all_segments, silence_and_stable_mask, cqt_window, sr, timestep)
print(f' {len(all_broken_segments)} broken segments...')
#[(i,((x0,y0), (x1,y1))) for i,((x0,y0), (x1,y1)) in enumerate(all_segments) if x1-x0>10000]
print('Reducing Segments')
all_segments_reduced = remove_short(all_broken_segments, min_length_cqt)
print(f' {len(all_segments_reduced)} segments above minimum length of {min_pattern_length_seconds}s...')
from exploration.segments import *
all_segs = all_segments_reduced
# sort by shortest -> longest
ordered_segments = sorted(all_segs, key=lambda y: y[1][0]-y[0][0])
types_dict = {i:0 for i in range(1,10)}
matches_dict = {}
all_new_segs = []
# connect segments based on symmetry
for i, ((x0, y0), (x1, y1)) in enumerate(ordered_segments):
# x0==y0 and x1==y1
this = [j for j,x in enumerate(ordered_segments) if x0==x[0][1] and x1==x[1][1]]
for t in this:
update_dict(matches_dict, i, t)
update_dict(matches_dict, t, i)
# match segment with itself
update_dict(matches_dict, i, i)
# to indicate whether a segment has been grouped
used = [0]*len(ordered_segments)
# For when we create new segments
max_i = len(ordered_segments)-1
import tqdm
for i, ((Qx0, Qy0), (Qx1, Qy1)) in tqdm.tqdm(list(enumerate(ordered_segments))):
for j, [(Rx0, Ry0), (Rx1, Ry1)] in enumerate(ordered_segments):
# horizontal pass
res = compare_segments(i, j, Qx0, Qy0, Qx1, Qy1, Rx0, Ry0, Rx1, Ry1, min_length_cqt, all_new_segs, max_i, matches_dict)
all_new_segs, max_i, matches_dict = res
# vertical pass (swap xs and ys)
res2 = compare_segments(i, j, Qx0, Qy0, Qx1, Qy1, Ry0, Rx0, Ry1, Rx1, min_length_cqt, all_new_segs, max_i, matches_dict)
all_new_segs, max_i, matches_dict = res2
old_and_new_segs = {i:((x0,y0), (x1,y1)) for i,((x0,y0), (x1,y1)) in enumerate(ordered_segments + all_new_segs) if is_good_segment(x0, y0, x1, y1, 0.6, silence_and_stable_mask, cqt_window, timestep, sr)}
# join segments that are sufficiently close to each other (if they are small)
# extend segments to silence
# remove diagonal from returned patterns
#all_segments_reduced = [((x0,y0), (x1,y1)) for ((x0,y0), (x1,y1)) in all_segments_reduced if not ((y0-100 < x0 < y0+100) or (x0-100 < y0 < x0+100))]
#print(f' {len(all_segments_reduced)} segments not along diagonal')
# remove duplicates properly
# within group alignment using dtw
print('Grouping Segments')
all_groups = matches_dict_to_groups(matches_dict)
check_groups_unique(all_groups)
all_groups = [[old_and_new_segs[i] for i in ag if i in old_and_new_segs] for ag in all_groups]
all_groups = [[((x0,x1),(y0,y1)) for ((x0,y0),(x1,y1)) in ag] for ag in all_groups]
all_groups = [sorted([x for y in ag for x in y]) for ag in all_groups]
all_groups = [remove_group_duplicates(g, 0.50) for g in all_groups]
all_groups = [sorted(ag, key=lambda y:y[0]) for ag in all_groups]
# sort groups
all_groups = [sorted(arr, key=lambda y: y[0]) for arr in all_groups]
all_groups = sorted(all_groups, key=lambda y: -len(y))
all_groups = [x for x in all_groups if len(x) > 0]
def same_group(group1, group2, perc_overlap=dupl_perc_overlap):
for x0,x1 in group1:
for y0,y1 in group2:
overlap = do_patterns_overlap(x0, x1, y0, y1, perc_overlap=perc_overlap)
if overlap:
return True
## Remove those that are identical
group_match_dict = {}
for i, ag1 in enumerate(all_groups):
for j, ag2 in enumerate(all_groups):
if same_group(ag1, ag2):
update_dict(group_match_dict, i, j)
update_dict(group_match_dict, j, i)
all_groups_ix = matches_dict_to_groups(group_match_dict)
all_groups_ix = [list(set(x)) for x in all_groups_ix]
all_groups = [[x for i in group for x in all_groups[i]] for group in all_groups_ix]
all_groups = [remove_group_duplicates(g, 0.50) for g in all_groups]
print(f' {len(all_groups)} groups found...')
print('Convert sequences to pitch track timesteps')
starts_seq, lengths_seq = convert_seqs_to_timestep(all_groups, cqt_window, sr, timestep)
print('Applying exclusion functions')
#starts_seq_exc, lengths_seq_exc = apply_exclusions(raw_pitch, starts_seq, lengths_seq, exclusion_functions, min_in_group)
starts_seq_exc, lengths_seq_exc = remove_below_length(starts_seq, lengths_seq, timestep, min_pattern_length_seconds)
starts_seq_exc = [p for p in starts_seq_exc if len(p)>min_in_group]
lengths_seq_exc = [p for p in lengths_seq_exc if len(p)>min_in_group]
print('Extend all segments to stable or silence')
silence_and_stable_mask_2 = np.array([1 if any([i==2,j==2]) else 0 for i,j in zip(silence_mask, stable_mask)])
def extend_to_mask(starts_seq_exc, lengths_seq_exc, mask, toler=0.5):
mask_i = list(range(len(mask)))
starts_seq_ext = []
lengths_seq_ext = []
for i in range(len(starts_seq_exc)):
s_group = starts_seq_exc[i]
l_group = lengths_seq_exc[i]
this_group_s = []
this_group_l = []
for j in range(len(s_group)):
l = l_group[j]
s1 = s_group[j]
s2 = s1 + l
s1_ = s1 - round(l*toler)
s2_ = s2 + round(l*toler)
midpoint = s1 + round(l/2)
s1_mask = list(mask[s1_:s1])
s2_mask = list(mask[s2:s2_])
s1_mask_i = list(mask_i[s1_:s1])
s2_mask_i = list(mask_i[s2:s2_])
if 1 in s1_mask:
ix = len(s1_mask) - s1_mask[::-1].index(1) - 1
s1 = s1_mask_i[ix]
if 1 in s2_mask:
ix = s2_mask.index(1)
s2 = s2_mask_i[ix]
l = s2 - s1
this_group_s.append(s1)
this_group_l.append(l)
starts_seq_ext.append(this_group_s)
lengths_seq_ext.append(this_group_l)
return starts_seq_ext, lengths_seq_ext
starts_seq_ext, lengths_seq_ext = extend_to_mask(starts_seq_exc, lengths_seq_exc, silence_and_stable_mask_2)
starts_sec_ext = [[x*timestep for x in p] for p in starts_seq_ext]
lengths_sec_ext = [[x*timestep for x in l] for l in lengths_seq_ext]
print('Evaluating')
annotations_orig = load_annotations_new(annotations_path)
if s1:
annotations_filt = annotations_orig[(annotations_orig['s1']>=s1*cqt_window/sr) & (annotations_orig['s1']<=s2*cqt_window/sr)]
annotations_filt['s1'] = annotations_filt['s1']-s1*cqt_window/sr
annotations_filt['s2'] = annotations_filt['s2']-s1*cqt_window/sr
else:
annotations_filt = annotations_orig
annotations_filt = annotations_filt[annotations_filt['tier']!='short_motif']
#metrics, annotations_tag = evaluate_all_tiers(annotations_filt, starts_sec_exc, lengths_sec_exc, eval_tol, partial_perc)
print('')
n_patterns = sum([len(x) for x in starts_seq_ext])
coverage = get_coverage(pitch, starts_seq_ext, lengths_seq_ext)
print(f'Number of Patterns: {n_patterns}')
print(f'Number of Groups: {len(starts_sec_ext)}')
print(f'Coverage: {round(coverage,2)}')
#evaluation_report(metrics)
annotations_tagged = evaluate_quick(annotations_filt, starts_sec_ext, lengths_sec_ext, eval_tol, partial_perc)
############
## Output ##
############
print('Writing all sequences')
plot_all_sequences(raw_pitch, time, lengths_seq_ext[:top_n], starts_seq_ext[:top_n], 'output/new_group', clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, starts_seq_ext[:top_n], lengths_seq_ext[:top_n], timestep, 'output/new_group')
annotations_tagged.to_csv('output/new_group/annotations.csv', index=False)
# all_recalls =[]
# partial_percs = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
# for p in partial_percs:
# metrics, annotations_tag = evaluate_all_tiers(annotations_filt, starts_sec_exc, lengths_sec_exc, eval_tol, partial_perc=p)
# all_recalls.append(metrics['pasrtial_match_recall_all'])
# plt.figure(figsize=(10,5))
# plt.plot(partial_percs, all_recalls)
# plt.xlabel('Partial precision overlap')
# plt.ylabel('Partial recall')
# plt.grid()
# plt.savefig('images/recall_against_partial_perc.png')
# plt.close('all')
# all_recalls = []
# all_evals = [0.05*i for i in range(int(10/0.05))]
# for e in all_evals:
# these_metrics = evaluate_all_tiers(annotations_orig, starts_sec_exc, lengths_sec_exc, e)
# all_recalls.append(these_metrics['full_match_recall_all'])
# plt.figure(figsize=(10,5))
# plt.plot(all_evals, all_recalls)
# plt.title('Performance with varying evaluation tolerance')
# plt.xlabel('Evaluation tolerance')
# plt.ylabel('Recall for all patterns')
# plt.grid()
# plt.savefig('images/eval_tol_experiment.png')
# plt.close('all')
# Plot annotation on self sim
annotations_orig_filt = annotations_orig[annotations_orig['text']=='ma ga ma pa ma ga ri sa']
X_annotate_single = add_annotations_to_plot(X_canvas, annotations_orig_filt, sr, cqt_window)
X_joined = join_plots(X_orig, X_annotate_single[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/5_self_sim_annotate_single.png', X_joined.astype(np.uint8))
single_group = annotations_orig_filt[['s1','s2']].values
len_single = [[int((y-x)/timestep) for x,y in single_group]]
start_single = [[int(x/timestep) for x,y in single_group]]
print('Writing single sequences')
plot_all_sequences(raw_pitch, time, len_single, start_single, 'output/single_test', clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, start_single, len_single, timestep, 'output/single_test')
# Output annotation
#####################################################
## Plotting annotations and Results on Sim Matrix ##
#####################################################
from exploration.visualisation import add_line_to_plot, get_lines, add_annotations_to_plot, add_patterns_to_plot, add_segments_to_plot, join_plots
import matplotlib
X_canvas = X.copy()
X_canvas[:] = 0
samp1 = 5000
samp2 = 9000
# Orig matrix
X_orig = X.copy()[samp1:samp2,samp1:samp2]
# Annotations
X_annotate = add_annotations_to_plot(X_canvas, annotations_orig, sr, cqt_window)
# Found segments from image processing
X_segments = add_segments_to_plot(X_canvas, all_segments)
# Found segments broken from image processing
X_segments_reduced = add_segments_to_plot(X_canvas, all_segments_reduced)
# Patterns from full pipeline
X_patterns = add_patterns_to_plot(X_canvas, starts_sec_exc, lengths_sec_exc, sr, cqt_window)
X_joined = join_plots(X_orig, X_canvas[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/0_self_sim.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_segments[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/1_self_sim_segments.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_segments_reduced[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/1_self_sim_segments_reduced.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_binop[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/2_self_sim_binop.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_annotate[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/3_self_sim_annotate.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_annotate[samp1:samp2,samp1:samp2], X_patterns[samp1:samp2,samp1:samp2])
matplotlib.image.imsave('images/4_annotations_patterns.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_patterns[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/5_self_sim_patterns.png', X_joined.astype(np.uint8))
X_test = np.zeros((50,50))
x0 = 5
y0 = 34
x1 = 15
y1 = 47
X_test[x0,y0] = 1
X_test[x1,y1] = 1
from exploration.segments import line_through_points
get_x, get_y = line_through_points(x0,y0,x1,y1)
line_x = [round(get_x(y)) for y in range(y0,y1)]
line_y = [round(get_y(x)) for x in range(x0,x1)]
line_x = [line_x[i] for i in range(len(line_x)) if line_x[i-1] != line_x[i]]
line_y = [line_y[i] for i in range(len(line_y)) if line_y[i-1] != line_y[i]]
X_test[line_x,line_y] = 1
matplotlib.image.imsave('images/line_through_points.png', X_test)
###########################
## All Patterns Grouping ##
###########################
import itertools
import fastdtw
from scipy.spatial.distance import euclidean
import tqdm
dtw_radius_frac = 45
all_seq_separated = [x for y in starts_seq_exc for x in y]
all_len_separated = [x for y in lengths_seq_exc for x in y]
all_indices = list(range(len(all_seq_separated)))
all_seq_dtw = pd.DataFrame(columns=['i1', 'i2', 'dtw', 'cos', 'cos_recip', 'cos_zero', 'cos_zero_recip', 'dtw_min_length', 'len_seq1_dtw', 'len_seq2_dtw', 'len_cos'])
for i1, i2 in tqdm.tqdm(list(itertools.combinations(all_indices, 2))):
# DTW From pitch track
s1 = all_seq_separated[i1]
s2 = all_seq_separated[i2]
l1 = all_len_separated[i1]
l2 = all_len_separated[i2]
seq1 = pitch[s1:s1+l1]
seq2 = pitch[s2:s2+l2]
min_length = min([len(seq1), len(seq2)])
dtw = fastdtw.fastdtw(seq1, seq2, radius=int(min_length/dtw_radius_frac), dist=euclidean)[0]/min_length
# Cosine from similarity matrix
scqt1 = int(s1*(sr*timestep)/cqt_window)
scqt2 = int(s2*(sr*timestep)/cqt_window)
lcqt1 = int(l1*(sr*timestep)/cqt_window)
lcqt2 = int(l2*(sr*timestep)/cqt_window)
x0 = scqt1
y0 = scqt2
x1 = scqt1 + lcqt1
y1 = scqt2 + lcqt2
length = int(np.hypot(x1-x0, y1-y0))
x, y = np.linspace(x0, x1, length), np.linspace(y0, y1, length)
x = x.astype(int)
y = y.astype(int)
# Extract the values along the line
zi = X[x, y]
# X stores reciprocal of the cosine distance
cos = np.mean(1/zi)
cos_recip = np.mean(zi)
zi[zi<0] = 0
cos_zero = np.mean(1/zi)
cos_zero_recip = np.mean(zi)
row = {
'i1':i1,
'i2':i2,
'dtw':dtw,
'cos_recip':cos_recip,
'cos':cos,
'cos_zero_recip':cos_zero_recip,
'cos_zero':cos_zero,
'dtw_min_length':min_length,
'len_seq1_dtw':len(seq1),
'len_seq2_dtw':len(seq2),
'len_cos':len(zi)
}
all_seq_dtw = all_seq_dtw.append(row, ignore_index=True)
# add self similarity
for i in all_indices:
row = {
'i1':i,
'i2':i,
'dtw':0,
'cos_recip':np.Inf,
'cos':0,
'cos_zero_recip':np.Inf,
'cos_zero':0,
'dtw_min_length':all_len_separated[i],
'len_seq1_dtw':all_len_separated[i],
'len_seq2_dtw':all_len_separated[i],
'len_cos':all_len_separated[i]
}
all_seq_dtw = all_seq_dtw.append(row, ignore_index=True)
all_seq_dtw.to_csv('results_tables/new_model_dtw_all_pairs.csv', index=False)
# Similarity Distribution Plots
plt.hist(all_seq_dtw['dtw'].values, bins=500, color='darkgreen')
plt.title('Distribution of inter-sequence DTW')
plt.xlabel('DTW bin')
plt.ylabel('Population')
plt.savefig('images/dtw_histogram.png')
plt.close('all')
for_plot = all_seq_dtw[all_seq_dtw['cos_zero']!=np.Inf]
plt.hist(for_plot['cos_zero'].values, bins=250, color='darkgreen')
plt.title('Distribution of inter-sequence cosine distance')
plt.xlabel('Cosine distance bin')
plt.ylabel('Population')
plt.savefig('images/cos_histogram.png')
plt.close('all')
# Clustering
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as hcluster
distance = all_seq_dtw\
.pivot(index='i1', columns='i2', values='dtw')\
.fillna(0)
data = distance.values
for i in range(data.shape[0]):
for j in range(i, data.shape[0]):
data[j][i] = data[i][j]
distVec = ssd.squareform(data)
linkage = hcluster.linkage(distVec, method='ward')
clustering = hcluster.cut_tree(linkage, n_clusters=range(len(linkage)))
from scipy.spatial.distance import euclidean
def DaviesBouldin(X, labels):
n_cluster = len(np.bincount(labels))
cluster_k = [X[labels == k] for k in range(n_cluster)]
centroids = [np.mean(k, axis = 0) for k in cluster_k]
variances = [np.mean([euclidean(p, centroids[i]) for p in k]) for i, k in enumerate(cluster_k)]
db = []
for i in range(n_cluster):
for j in range(n_cluster):
if j != i:
db.append((variances[i] + variances[j]) / euclidean(centroids[i], centroids[j]))
return(np.max(db) / n_cluster)
def evaluate(disance, clustering_results, k_min, k_max):
X = distance.values
return [DaviesBouldin(X, clustering_results[:,i]) \
for i in range(len(clustering_results))[k_min:k_max]]
k_min = 2
k_max = 100
evaluation = evaluate(distance, clustering, k_min, k_max)
from kneed import KneeLocator
x = list(range(k_min, k_max))
knee = KneeLocator(x, evaluation, S=0.4, curve="convex", direction="decreasing").knee
plt.figure(figsize=(12,5))
plt.plot(x, evaluation,color='darkgreen')
plt.xticks(np.arange(min(x), max(x)+1, 2.0),size=8)
plt.axvline(knee, linestyle='--', color='darkred', linewidth=0.7)
plt.title('Davies Bouldin Score for n clusters')
plt.xlabel('Number of Clusters, n')
plt.ylabel('DBS')
plt.grid()
plt.savefig('images/DaviesBouldin.png')
plt.close('all')
## Best model
n = 24
cluster_seqs = {}
cluster_lens = {}
for ix,c in enumerate(clustering[:,n]):
if c in cluster_seqs:
cluster_seqs[c].append(all_seq_separated[ix])
cluster_lens[c].append(all_len_separated[ix])
else:
cluster_seqs[c] = [all_seq_separated[ix]]
cluster_lens[c] = [all_len_separated[ix]]
cluster_seqs = [v for k,v in cluster_seqs.items()]
cluster_lens = [v for k,v in cluster_lens.items()]
plot_all_sequences(raw_pitch, time, cluster_lens[:top_n], cluster_seqs[:top_n], 'output/clustering', clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, cluster_seqs[:top_n], cluster_lens[:top_n], timestep, 'output/clustering')
############################
# Plot individual sequence #
############################
from exploration.visualisation import plot_subsequence_w_stability
sp = 5300
l = 1000
plot_subsequence_w_stability(sp, l, raw_pitch, time, stable_mask, timestep, path='images/stab_check.png', plot_kwargs=plot_kwargs)
sp = x_start_ts
l = x_end_ts - x_start_ts
plot_subsequence_w_stability(sp, l, raw_pitch, time, stable_mask, timestep, path='images/seqx_stab.png', plot_kwargs=plot_kwargs)
sp = y_start_ts
l = y_end_ts - y_start_ts
plot_subsequence_w_stability(sp, l, raw_pitch, time, stable_mask, timestep, path='images/seqy_stab.png', plot_kwargs=plot_kwargs)
############
# Database #
############
from exploration.utils import sql
from credentials import settings
import psycopg2
def insertResults(records, params):
try:
connection = psycopg2.connect(**settings)
cursor = connection.cursor()
# Update single record now
sql_insert_query = """
INSERT INTO results
(patternnumber, recordingid, elementnumber, durationelements, starttimeseconds, durationseconds, patterngroup, rankingroup)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.executemany(sql_insert_query, records)
connection.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def insertSimilarity(records, params):
try:
connection = psycopg2.connect(**settings)
cursor = connection.cursor()
# Update single record now
sql_insert_query = """
INSERT INTO similarity
(patternnumberone, patternnumbertwo, similarityname, similarity)
VALUES(%s, %s, %s, %s)"""
cursor.executemany(sql_insert_query, records)
connection.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
recording_id = 'brovabarama'
records = []
pattern_num = 0
pattern_num_lookup = {}
for i, seq in enumerate(starts_seq_cut):
for j, s in enumerate(seq):
length = lengths_seq_cut[i][j]
length_secs = round(length*timestep,2)
start_time_secs = round(s*timestep,2)
records.append((pattern_num, recording_id, s, length, start_time_secs, length_secs, i, j))
pattern_num_lookup[pattern_num] = (i,j)
pattern_num += 1
insertTable(records, settings)
import itertools
similarities = []
for s1, s2 in itertools.combinations(pattern_num_lookup.keys(), 2):
for n in ['cosine', 'dtw', 'eucliedean']:
similarities.append((s1, s2, n, np.random.random()))
# train model more
# - parameters
# Tune frequency bands
# for this music, perhaps a standard fourier transform would work better?
# what is fmin
# how many octaves
# frequency distribution across all tracks can inform parameters
# - check graphs
# - no further test performance increase after ~1250 epochs
# link features to annotations from Lara for phrase onset detection
# load features and annotations
from complex_auto.util import load_pyc_bz
import textgrid
import pandas as pd
import math
def load_annotations(path):
"""
Load text grid annotations from <path>
return pandas df
"""
tg = textgrid.TextGrid.fromFile(path)
df = pd.DataFrame(columns=['tier','s1', 's2', 'text'])
for tier in tg:
name = tier.name
intervals = tier.intervals
for i in intervals:
d = {
'tier':name,
's1': i.minTime,
's2': i.maxTime,
'text': i.mark
}
df = df.append(d, ignore_index=True)
return df
def transform_features(features):
amp_arr = features[0].detach().numpy()
phase_arr = features[1].detach().numpy()
nbins = amp_arr.shape[1]
amp_cols = [f'amp_{i}' for i in range(nbins)]
phase_cols = [f'phase_{i}' for i in range(nbins)]
amp_df = pd.DataFrame(amp_arr, columns=amp_cols)
phase_df = pd.DataFrame(phase_arr, columns=phase_cols)
df = pd.concat([amp_df, phase_df], axis=1)
df['window_num'] = df.index
return df
def second_to_window(onset, sr, hop_size):
onset_el = onset*sr
window_num = math.floor(onset_el/hop_size)
return window_num
features_paths = [
'output/hpc/Koti Janmani.multitrack-vocal.mp3_repres.pyc.bz',
'output/hpc/Shankari Shankuru.multitrack-vocal.mp3_repres.pyc.bz',
'output/hpc/Sharanu Janakana.multitrack-vocal.mp3_repres.pyc.bz'
]
annotations_paths = [
'../carnatic-motifs/Akkarai_Sisters_-_Koti_Janmani_multitrack-vocal_-_ritigowla.TextGrid',
'../carnatic-motifs/Akkarai_Sisters_-_Shankari_Shankuru_multitrack-vocal_-_saveri.TextGrid',
'../carnatic-motifs/Salem_Gayatri_Venkatesan_-_Sharanu_Janakana_multitrack-vocal_-_bilahari_copy.TextGrid'
]
all_features = pd.DataFrame()
for i,(fp, ap) in enumerate(zip(features_paths, annotations_paths)):
# array of [amplitude, phase]
features_raw = load_pyc_bz(fp)
features = transform_features(features_raw)
annotations = load_annotations(ap)
hop_size = cqt_window # 1984
annotations['window_num'] = annotations['s1'].apply(lambda y: second_to_window(y, sr, hop_size))
features['is_onset'] = features['window_num'].isin(annotations['window_num'])
features['is_test'] = i==2
all_features = all_features.append(features, ignore_index=True)
# Classification
import lightgbm as lgb
from scipy.stats import randint as sp_randint
from sklearn.model_selection import (GridSearchCV, GroupKFold, KFold,
RandomizedSearchCV, TimeSeriesSplit,
cross_val_score, train_test_split)
from sklearn.metrics import recall_score, precision_score, f1_score, roc_auc_score
def random_float_inrange(N,a=0.005,b=0.1):
return[((b - a) * np.random.random_sample()) + a for _ in range(N)]
#df_train, df_test = train_test_split(all_features, test_size=0.4, random_state=42)
df_train = all_features[all_features['is_test']==False]
df_test = all_features[all_features['is_test']==True]
# resample
# Resample to account for huge sparsity
pos_frame = df_train[df_train['is_onset']==1]
neg_frame = df_train[df_train['is_onset']!=1]
while sum(df_train['is_onset'])/len(df_train) < 0.3:
print(sum(df_train['is_onset'])/len(df_train))
random_rec = pos_frame.sample(1000)
df_train = df_train.append(random_rec, ignore_index=True)
# shuffle frame
df_train = df_train.iloc[np.random.permutation(len(df_train))].reset_index(drop=True)
feat_names = [c for c in df_train if c not in ['is_onset', 'window_num', 'is_test']]
X_train = df_train[feat_names].values
y_train = df_train['is_onset'].values
X_test = df_test[feat_names].values
y_test = df_test['is_onset'].values
param_dist = {'reg_sqrt':[True],
'learning_rate':[0.001,0.01,0.1, 0.5],
'max_depth':[2,4,8,12],
'min_data_in_leaf':[1,5,10],
'num_leaves':[5,10,15,20,25],
'n_estimators':[100,200,300,400],
'colsample_bytree':[0.6, 0.75, 0.9]}
# Final features from gridsearch
final_params = {
'colsample_bytree': 0.6463615939999198,
'learning_rate': 0.1280212488889668,
'max_depth': 40,
'min_data_in_leaf': 27,
'n_estimators': 982,
'num_leaves': 46,
'reg_sqrt': True
}
lgb_model = lgb.LGBMClassifier(**final_params)
# Gridsearch
lgb_model = lgb.LGBMClassifier()
lgb_model = RandomizedSearchCV(lgb_model, param_distributions=param_dist,
n_iter=1000, cv=3, n_jobs=-1,
scoring='recall', random_state=42)
lgb_model.fit(X_train, y_train)
y_pred = lgb_model.predict(X_test)
for scorer in recall_score, precision_score, f1_score, roc_auc_score:
print(f'{scorer.__name__}: {scorer(y_test, y_pred)}')
importances = list(sorted(zip(feat_names, lgb_model.feature_importances_), key=lambda y: -y[1]))
importances[:10]
# black out similarity grid based on
# consonant onset
# silence
# stability
# link db to ladylane
sql("""
SELECT
results.patternnumber,
results.patterngroup,
results.rankingroup,
results.starttimeseconds,
results.durationseconds
FROM results
WHERE results.recordingid = 'brovabarama'
AND results.patterngroup = 1
""")
sql("""
SELECT
patternnumberone,
patternnumbertwo,
similarity,
similarityname
FROM similarity
WHERE similarityname = 'cosine'
AND (patternnumberone = 4 OR patternnumbertwo = 4)
ORDER BY similarity
""")
insertSimilarity(similarities, settings)
#######################
# Output subsequences #
#######################
from exploration.visualisation import plot_all_sequences, plot_pitch
from exploration.io import write_all_sequence_audio
plot_kwargs = {
'yticks_dict':{},
'cents':True,
'tonic':195.997718,
'emphasize':{},#['S', 'S^'],
'figsize':(15,4)
}
starts_seq_cut = [[a,c] for a,b,c,d in patterns_seq]
lengths_seq_cut = [[max([b-a, d-c])]*2 for a,b,c,d in patterns_seq]
plot_all_sequences(pitch, time, lengths_seq_cut, starts_seq_cut, out_dir, clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, starts_seq_cut, lengths_seq_cut, timestep, out_dir)
# x Exclusion mask apply
# - Output patterns and audio with plots
# - Store in database
# - recording_id, seq_num, duration_seq, seq_sec, duration_sec, group number, group rank
# - Quick get next pattern
| %load_ext autoreload
%autoreload 2
import datetime
import os
import pickle
import skimage.io
from complex_auto.motives_extractor import *
from complex_auto.motives_extractor.extractor import *
from exploration.pitch import extract_pitch_track
from exploration.img import (
remove_diagonal, convolve_array_tile, binarize, diagonal_gaussian,
hough_transform, hough_transform_new, scharr, sobel,
apply_bin_op, make_symmetric, edges_to_contours)
from exploration.segments import (
extract_segments_new, get_all_segments, break_all_segments, do_patterns_overlap, reduce_duplicates,
remove_short, extend_segments, compare_segments)
from exploration.sequence import (
apply_exclusions, contains_silence, min_gap, too_stable,
convert_seqs_to_timestep, get_stability_mask, add_center_to_mask,
remove_below_length)
from exploration.evaluation import evaluate_quick, load_annotations_new, evaluate_all_tiers, evaluation_report, get_coverage
from exploration.visualisation import plot_all_sequences, plot_pitch
from exploration.io import load_sim_matrix, write_all_sequence_audio, load_yaml
from exploration.pitch import cents_to_pitch, pitch_seq_to_cents, pitch_to_cents
################
## Parameters ##
################
# Output paths of each step in pipeline
out_dir = os.path.join("output", 'hpc')
sim_path = 'output/full_dataset/Koti Janmani.multitrack-vocal.mp3.npy'
### Pitch Extraction
# Sample rate of audio
sr = 44100
# size in frames of cqt window from convolution model
cqt_window = 1988 # was previously set to 1988
# Take sample of data, set to None to use all data
s1 = None # lower bound index (5000 has been used for testing)
s2 = None # higher bound index (9000 has been used for testing)
# pitch track extraction
frameSize = 2048 # For Melodia pitch extraction
hopSize = 128 # For Melodia pitch extraction
gap_interp = 250*0.001 # Interpolate pitch tracks gaps of <gap_interp>seconds or less [set to None to skip]
smooth = 7 # sigma for gaussian smoothing of pitch track [set to None to skip]
audio_path_vocal = 'audio/Akkarai Sisters at Arkay by Akkarai Sisters/<NAME>/Koti Janmani.multitrack-vocal.mp3'
audio_path = "audio/full_dataset/Akkarai_Sisters_at_Arkay/Akkarai Sisters - Akkarai Sisters at Arkay/1 - 3 - Akkarai Sisters - Koti Janmani.mp3"
# stability identification
stab_hop_secs = 0.2 # window size for stab computations in seconds
min_stability_length_secs = 1.0 # minimum legnth of region to be considered stable in seconds
freq_var_thresh_stab = 10 # max variation in pitch to be considered stable region
print('Extracting pitch track')
pitch, raw_pitch, timestep, time = extract_pitch_track(audio_path_vocal, frameSize, hopSize, gap_interp, smooth, sr)
print('Computing stability/silence mask')
stable_mask = get_stability_mask(raw_pitch, min_stability_length_secs, stab_hop_secs, freq_var_thresh_stab, timestep)
silence_mask = (raw_pitch == 0).astype(int)
silence_mask = add_center_to_mask(silence_mask)
### Image Processing
# convolutional filter
conv_filter = sobel
# Binarize raw sim array 0/1 below and above this value...
# depends completely on filter passed to convolutional step
# Best...
# scharr, 0.56
# sobel unidrectional, 0.1
# sobel bidirectional, 0.15
bin_thresh = 0.11
# lower bin_thresh for areas surrounding segments
bin_thresh_segment = 0.085
# percentage either size of a segment considered for lower bin thresh
perc_tail = 0.5
# Gaussian filter along diagonals with sigma...
gauss_sigma = None
# After gaussian, re-binarize with this threshold
cont_thresh = 0.15
# morphology params
etc_kernel_size = 10 # For closing
binop_dim = 3 # square dimension of binary opening structure (square matrix of zeros with 1 across the diagonal)
# Hough transform parameters
min_dist_sec = 0 # min dist in seconds between lines
hough_threshold = 25
# Only search for lines between these angles (45 corresponds to main diagonal)
hough_high_angle = 45.01
hough_low_angle = 44.99
# Distance between consecutive diagonals to be joined in seconds
min_diff_trav = 0.5
# Two segments must overlap in both x and y by <dupl_perc_overlap>
# to be considered the same, only the longest is returned
dupl_perc_overlap = 0.75
# Grouping diagonals
min_pattern_length_seconds = 1.5
min_length_cqt = min_pattern_length_seconds*sr/cqt_window
min_in_group = 2 # minimum number of patterns to be included in pattern group
# Exclusions
exclusion_functions = [contains_silence]
# Evaluation
annotations_path = 'annotations/koti_janmani.txt'
eval_tol = 0.5 # how much leniancy on each side of an annotated pattern before considering it a match (seconds)
partial_perc = 0.75 # how much overlap does an annotated and identified pattern needed to be considered a partial match
# Output
svara_cent_path = "conf/svara_cents.yaml"
svara_freq_path = "conf/svara_lookup.yaml"
tonic = 195.99
svara_cent = load_yaml(svara_cent_path)
svara_freq = load_yaml(svara_freq_path)
yticks_dict = {k:cents_to_pitch(v, tonic) for k,v in svara_cent.items()}
yticks_dict = {k:v for k,v in yticks_dict.items() if any([x in k for x in ['S', 'R2', 'G2', 'M1', 'P', 'D2', 'N2', 'S']])}
plot_kwargs = {
'yticks_dict':yticks_dict,
'cents':True,
'tonic':195.997718,
'emphasize':['S', 'S^'],
'figsize':(15,4)
}
# limit the number of groups outputted
top_n = 1000
####################
## Load sim array ##
####################
# Get similarity Matrix
print(f'Loading sim matrix from {sim_path}')
X = load_sim_matrix(sim_path)
# Sample for development
if all([s1,s2]):
save_imgs = s2-s1 <= 4000
X_samp = X.copy()[s1:s2,s1:s2]
else:
save_imgs = False
X_samp = X.copy()
sim_filename = os.path.join(out_dir, '1_Koti Janmani_simsave.png') if save_imgs else None
conv_filename = os.path.join(out_dir, '2_Koti Janmani_conv.png') if save_imgs else None
bin_filename = os.path.join(out_dir, '3_Koti Janmani_binary.png') if save_imgs else None
diag_filename = os.path.join(out_dir, '4_Koti Janmani_diag.png') if save_imgs else None
gauss_filename = os.path.join(out_dir, '5_Koti Janmani_gauss.png') if save_imgs else None
cont_filename = os.path.join(out_dir, '6_Koti Janmani_cont.png') if save_imgs else None
binop_filename = os.path.join(out_dir, '7_Koti Janmani_binop.png') if save_imgs else None
hough_filename = os.path.join(out_dir, '8_Koti Janmani_hough.png') if save_imgs else None
ext_filename = os.path.join(out_dir, '9_Koti Janmani_cont_ext.png') if save_imgs else None
if save_imgs:
skimage.io.imsave(sim_filename, X_samp)
##############
## Pipeline ##
##############
print('Convolving similarity matrix')
X_conv = convolve_array_tile(X_samp, cfilter=conv_filter)
if save_imgs:
skimage.io.imsave(conv_filename, X_conv)
print('Binarizing convolved array')
X_bin = binarize(X_conv, bin_thresh, filename=bin_filename)
#X_bin = binarize(X_conv, 0.05, filename=bin_filename)
print('Removing diagonal')
X_diag = remove_diagonal(X_bin)
if save_imgs:
skimage.io.imsave(diag_filename, X_diag)
if gauss_sigma:
print('Applying diagonal gaussian filter')
diagonal_gaussian(X_bin, gauss_sigma, filename=gauss_filename)
print('Binarize gaussian blurred similarity matrix')
binarize(X_gauss, cont_thresh, filename=cont_filename)
else:
X_gauss = X_diag
X_cont = X_gauss
print('Ensuring symmetry between upper and lower triangle in array')
X_sym = make_symmetric(X_cont)
print('Applying Hough Transform to edges')
peaks = hough_transform_new(X_sym, hough_high_angle, hough_low_angle, hough_threshold)
print(f'Extending edges in convolved array along Hough Lines with lower threshold of {bin_thresh_segment}, (previously {bin_thresh})')
X_ext = extend_segments(X_conv, X_sym, peaks, min_diff_trav, cqt_window, sr, bin_thresh_segment, perc_tail)
print('Identifying and isolating regions between edges')
X_fill = edges_to_contours(X_ext, etc_kernel_size)
print('Cleaning isolated non-directional regions using morphological opening')
X_binop = apply_bin_op(X_fill, binop_dim)
print('Ensuring symmetry between upper and lower triangle in array')
X_binop = make_symmetric(X_binop)
if save_imgs:
skimage.io.imsave(binop_filename, X_binop)
## Join segments that are sufficiently close
print('Extracting segments using flood fill and centroid')
# Format - [[(x,y), (x1,y1)],...]
t1 = datetime.datetime.now()
all_segments = extract_segments_new(X_binop)
print(f' {len(all_segments)} found...')
t2 = datetime.datetime.now()
print(f"time taken: {t2-t1}")
def save_object(obj, filename):
with open(filename, 'wb') as outp: # Overwrites any existing file.
pickle.dump(obj, outp, pickle.HIGHEST_PROTOCOL)
save_object(all_segments, 'output/all_segments.pkl')
import pickle
file = open('output/all_segments.pkl','rb')
all_segments = pickle.load(file)
silence_and_stable_mask = np.array([int(any([i,j])) for i,j in zip(silence_mask, stable_mask)])
print('Breaking segments with silent/stable regions')
# Format - [[(x,y), (x1,y1)],...]
all_broken_segments = break_all_segments(all_segments, silence_and_stable_mask, cqt_window, sr, timestep)
print(f' {len(all_broken_segments)} broken segments...')
#[(i,((x0,y0), (x1,y1))) for i,((x0,y0), (x1,y1)) in enumerate(all_segments) if x1-x0>10000]
print('Reducing Segments')
all_segments_reduced = remove_short(all_broken_segments, min_length_cqt)
print(f' {len(all_segments_reduced)} segments above minimum length of {min_pattern_length_seconds}s...')
from exploration.segments import *
all_segs = all_segments_reduced
# sort by shortest -> longest
ordered_segments = sorted(all_segs, key=lambda y: y[1][0]-y[0][0])
types_dict = {i:0 for i in range(1,10)}
matches_dict = {}
all_new_segs = []
# connect segments based on symmetry
for i, ((x0, y0), (x1, y1)) in enumerate(ordered_segments):
# x0==y0 and x1==y1
this = [j for j,x in enumerate(ordered_segments) if x0==x[0][1] and x1==x[1][1]]
for t in this:
update_dict(matches_dict, i, t)
update_dict(matches_dict, t, i)
# match segment with itself
update_dict(matches_dict, i, i)
# to indicate whether a segment has been grouped
used = [0]*len(ordered_segments)
# For when we create new segments
max_i = len(ordered_segments)-1
import tqdm
for i, ((Qx0, Qy0), (Qx1, Qy1)) in tqdm.tqdm(list(enumerate(ordered_segments))):
for j, [(Rx0, Ry0), (Rx1, Ry1)] in enumerate(ordered_segments):
# horizontal pass
res = compare_segments(i, j, Qx0, Qy0, Qx1, Qy1, Rx0, Ry0, Rx1, Ry1, min_length_cqt, all_new_segs, max_i, matches_dict)
all_new_segs, max_i, matches_dict = res
# vertical pass (swap xs and ys)
res2 = compare_segments(i, j, Qx0, Qy0, Qx1, Qy1, Ry0, Rx0, Ry1, Rx1, min_length_cqt, all_new_segs, max_i, matches_dict)
all_new_segs, max_i, matches_dict = res2
old_and_new_segs = {i:((x0,y0), (x1,y1)) for i,((x0,y0), (x1,y1)) in enumerate(ordered_segments + all_new_segs) if is_good_segment(x0, y0, x1, y1, 0.6, silence_and_stable_mask, cqt_window, timestep, sr)}
# join segments that are sufficiently close to each other (if they are small)
# extend segments to silence
# remove diagonal from returned patterns
#all_segments_reduced = [((x0,y0), (x1,y1)) for ((x0,y0), (x1,y1)) in all_segments_reduced if not ((y0-100 < x0 < y0+100) or (x0-100 < y0 < x0+100))]
#print(f' {len(all_segments_reduced)} segments not along diagonal')
# remove duplicates properly
# within group alignment using dtw
print('Grouping Segments')
all_groups = matches_dict_to_groups(matches_dict)
check_groups_unique(all_groups)
all_groups = [[old_and_new_segs[i] for i in ag if i in old_and_new_segs] for ag in all_groups]
all_groups = [[((x0,x1),(y0,y1)) for ((x0,y0),(x1,y1)) in ag] for ag in all_groups]
all_groups = [sorted([x for y in ag for x in y]) for ag in all_groups]
all_groups = [remove_group_duplicates(g, 0.50) for g in all_groups]
all_groups = [sorted(ag, key=lambda y:y[0]) for ag in all_groups]
# sort groups
all_groups = [sorted(arr, key=lambda y: y[0]) for arr in all_groups]
all_groups = sorted(all_groups, key=lambda y: -len(y))
all_groups = [x for x in all_groups if len(x) > 0]
def same_group(group1, group2, perc_overlap=dupl_perc_overlap):
for x0,x1 in group1:
for y0,y1 in group2:
overlap = do_patterns_overlap(x0, x1, y0, y1, perc_overlap=perc_overlap)
if overlap:
return True
## Remove those that are identical
group_match_dict = {}
for i, ag1 in enumerate(all_groups):
for j, ag2 in enumerate(all_groups):
if same_group(ag1, ag2):
update_dict(group_match_dict, i, j)
update_dict(group_match_dict, j, i)
all_groups_ix = matches_dict_to_groups(group_match_dict)
all_groups_ix = [list(set(x)) for x in all_groups_ix]
all_groups = [[x for i in group for x in all_groups[i]] for group in all_groups_ix]
all_groups = [remove_group_duplicates(g, 0.50) for g in all_groups]
print(f' {len(all_groups)} groups found...')
print('Convert sequences to pitch track timesteps')
starts_seq, lengths_seq = convert_seqs_to_timestep(all_groups, cqt_window, sr, timestep)
print('Applying exclusion functions')
#starts_seq_exc, lengths_seq_exc = apply_exclusions(raw_pitch, starts_seq, lengths_seq, exclusion_functions, min_in_group)
starts_seq_exc, lengths_seq_exc = remove_below_length(starts_seq, lengths_seq, timestep, min_pattern_length_seconds)
starts_seq_exc = [p for p in starts_seq_exc if len(p)>min_in_group]
lengths_seq_exc = [p for p in lengths_seq_exc if len(p)>min_in_group]
print('Extend all segments to stable or silence')
silence_and_stable_mask_2 = np.array([1 if any([i==2,j==2]) else 0 for i,j in zip(silence_mask, stable_mask)])
def extend_to_mask(starts_seq_exc, lengths_seq_exc, mask, toler=0.5):
mask_i = list(range(len(mask)))
starts_seq_ext = []
lengths_seq_ext = []
for i in range(len(starts_seq_exc)):
s_group = starts_seq_exc[i]
l_group = lengths_seq_exc[i]
this_group_s = []
this_group_l = []
for j in range(len(s_group)):
l = l_group[j]
s1 = s_group[j]
s2 = s1 + l
s1_ = s1 - round(l*toler)
s2_ = s2 + round(l*toler)
midpoint = s1 + round(l/2)
s1_mask = list(mask[s1_:s1])
s2_mask = list(mask[s2:s2_])
s1_mask_i = list(mask_i[s1_:s1])
s2_mask_i = list(mask_i[s2:s2_])
if 1 in s1_mask:
ix = len(s1_mask) - s1_mask[::-1].index(1) - 1
s1 = s1_mask_i[ix]
if 1 in s2_mask:
ix = s2_mask.index(1)
s2 = s2_mask_i[ix]
l = s2 - s1
this_group_s.append(s1)
this_group_l.append(l)
starts_seq_ext.append(this_group_s)
lengths_seq_ext.append(this_group_l)
return starts_seq_ext, lengths_seq_ext
starts_seq_ext, lengths_seq_ext = extend_to_mask(starts_seq_exc, lengths_seq_exc, silence_and_stable_mask_2)
starts_sec_ext = [[x*timestep for x in p] for p in starts_seq_ext]
lengths_sec_ext = [[x*timestep for x in l] for l in lengths_seq_ext]
print('Evaluating')
annotations_orig = load_annotations_new(annotations_path)
if s1:
annotations_filt = annotations_orig[(annotations_orig['s1']>=s1*cqt_window/sr) & (annotations_orig['s1']<=s2*cqt_window/sr)]
annotations_filt['s1'] = annotations_filt['s1']-s1*cqt_window/sr
annotations_filt['s2'] = annotations_filt['s2']-s1*cqt_window/sr
else:
annotations_filt = annotations_orig
annotations_filt = annotations_filt[annotations_filt['tier']!='short_motif']
#metrics, annotations_tag = evaluate_all_tiers(annotations_filt, starts_sec_exc, lengths_sec_exc, eval_tol, partial_perc)
print('')
n_patterns = sum([len(x) for x in starts_seq_ext])
coverage = get_coverage(pitch, starts_seq_ext, lengths_seq_ext)
print(f'Number of Patterns: {n_patterns}')
print(f'Number of Groups: {len(starts_sec_ext)}')
print(f'Coverage: {round(coverage,2)}')
#evaluation_report(metrics)
annotations_tagged = evaluate_quick(annotations_filt, starts_sec_ext, lengths_sec_ext, eval_tol, partial_perc)
############
## Output ##
############
print('Writing all sequences')
plot_all_sequences(raw_pitch, time, lengths_seq_ext[:top_n], starts_seq_ext[:top_n], 'output/new_group', clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, starts_seq_ext[:top_n], lengths_seq_ext[:top_n], timestep, 'output/new_group')
annotations_tagged.to_csv('output/new_group/annotations.csv', index=False)
# all_recalls =[]
# partial_percs = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
# for p in partial_percs:
# metrics, annotations_tag = evaluate_all_tiers(annotations_filt, starts_sec_exc, lengths_sec_exc, eval_tol, partial_perc=p)
# all_recalls.append(metrics['pasrtial_match_recall_all'])
# plt.figure(figsize=(10,5))
# plt.plot(partial_percs, all_recalls)
# plt.xlabel('Partial precision overlap')
# plt.ylabel('Partial recall')
# plt.grid()
# plt.savefig('images/recall_against_partial_perc.png')
# plt.close('all')
# all_recalls = []
# all_evals = [0.05*i for i in range(int(10/0.05))]
# for e in all_evals:
# these_metrics = evaluate_all_tiers(annotations_orig, starts_sec_exc, lengths_sec_exc, e)
# all_recalls.append(these_metrics['full_match_recall_all'])
# plt.figure(figsize=(10,5))
# plt.plot(all_evals, all_recalls)
# plt.title('Performance with varying evaluation tolerance')
# plt.xlabel('Evaluation tolerance')
# plt.ylabel('Recall for all patterns')
# plt.grid()
# plt.savefig('images/eval_tol_experiment.png')
# plt.close('all')
# Plot annotation on self sim
annotations_orig_filt = annotations_orig[annotations_orig['text']=='ma ga ma pa ma ga ri sa']
X_annotate_single = add_annotations_to_plot(X_canvas, annotations_orig_filt, sr, cqt_window)
X_joined = join_plots(X_orig, X_annotate_single[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/5_self_sim_annotate_single.png', X_joined.astype(np.uint8))
single_group = annotations_orig_filt[['s1','s2']].values
len_single = [[int((y-x)/timestep) for x,y in single_group]]
start_single = [[int(x/timestep) for x,y in single_group]]
print('Writing single sequences')
plot_all_sequences(raw_pitch, time, len_single, start_single, 'output/single_test', clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, start_single, len_single, timestep, 'output/single_test')
# Output annotation
#####################################################
## Plotting annotations and Results on Sim Matrix ##
#####################################################
from exploration.visualisation import add_line_to_plot, get_lines, add_annotations_to_plot, add_patterns_to_plot, add_segments_to_plot, join_plots
import matplotlib
X_canvas = X.copy()
X_canvas[:] = 0
samp1 = 5000
samp2 = 9000
# Orig matrix
X_orig = X.copy()[samp1:samp2,samp1:samp2]
# Annotations
X_annotate = add_annotations_to_plot(X_canvas, annotations_orig, sr, cqt_window)
# Found segments from image processing
X_segments = add_segments_to_plot(X_canvas, all_segments)
# Found segments broken from image processing
X_segments_reduced = add_segments_to_plot(X_canvas, all_segments_reduced)
# Patterns from full pipeline
X_patterns = add_patterns_to_plot(X_canvas, starts_sec_exc, lengths_sec_exc, sr, cqt_window)
X_joined = join_plots(X_orig, X_canvas[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/0_self_sim.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_segments[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/1_self_sim_segments.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_segments_reduced[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/1_self_sim_segments_reduced.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_binop[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/2_self_sim_binop.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_annotate[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/3_self_sim_annotate.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_annotate[samp1:samp2,samp1:samp2], X_patterns[samp1:samp2,samp1:samp2])
matplotlib.image.imsave('images/4_annotations_patterns.png', X_joined.astype(np.uint8))
X_joined = join_plots(X_orig, X_patterns[samp1:samp2,samp1:samp2], False)
matplotlib.image.imsave('images/5_self_sim_patterns.png', X_joined.astype(np.uint8))
X_test = np.zeros((50,50))
x0 = 5
y0 = 34
x1 = 15
y1 = 47
X_test[x0,y0] = 1
X_test[x1,y1] = 1
from exploration.segments import line_through_points
get_x, get_y = line_through_points(x0,y0,x1,y1)
line_x = [round(get_x(y)) for y in range(y0,y1)]
line_y = [round(get_y(x)) for x in range(x0,x1)]
line_x = [line_x[i] for i in range(len(line_x)) if line_x[i-1] != line_x[i]]
line_y = [line_y[i] for i in range(len(line_y)) if line_y[i-1] != line_y[i]]
X_test[line_x,line_y] = 1
matplotlib.image.imsave('images/line_through_points.png', X_test)
###########################
## All Patterns Grouping ##
###########################
import itertools
import fastdtw
from scipy.spatial.distance import euclidean
import tqdm
dtw_radius_frac = 45
all_seq_separated = [x for y in starts_seq_exc for x in y]
all_len_separated = [x for y in lengths_seq_exc for x in y]
all_indices = list(range(len(all_seq_separated)))
all_seq_dtw = pd.DataFrame(columns=['i1', 'i2', 'dtw', 'cos', 'cos_recip', 'cos_zero', 'cos_zero_recip', 'dtw_min_length', 'len_seq1_dtw', 'len_seq2_dtw', 'len_cos'])
for i1, i2 in tqdm.tqdm(list(itertools.combinations(all_indices, 2))):
# DTW From pitch track
s1 = all_seq_separated[i1]
s2 = all_seq_separated[i2]
l1 = all_len_separated[i1]
l2 = all_len_separated[i2]
seq1 = pitch[s1:s1+l1]
seq2 = pitch[s2:s2+l2]
min_length = min([len(seq1), len(seq2)])
dtw = fastdtw.fastdtw(seq1, seq2, radius=int(min_length/dtw_radius_frac), dist=euclidean)[0]/min_length
# Cosine from similarity matrix
scqt1 = int(s1*(sr*timestep)/cqt_window)
scqt2 = int(s2*(sr*timestep)/cqt_window)
lcqt1 = int(l1*(sr*timestep)/cqt_window)
lcqt2 = int(l2*(sr*timestep)/cqt_window)
x0 = scqt1
y0 = scqt2
x1 = scqt1 + lcqt1
y1 = scqt2 + lcqt2
length = int(np.hypot(x1-x0, y1-y0))
x, y = np.linspace(x0, x1, length), np.linspace(y0, y1, length)
x = x.astype(int)
y = y.astype(int)
# Extract the values along the line
zi = X[x, y]
# X stores reciprocal of the cosine distance
cos = np.mean(1/zi)
cos_recip = np.mean(zi)
zi[zi<0] = 0
cos_zero = np.mean(1/zi)
cos_zero_recip = np.mean(zi)
row = {
'i1':i1,
'i2':i2,
'dtw':dtw,
'cos_recip':cos_recip,
'cos':cos,
'cos_zero_recip':cos_zero_recip,
'cos_zero':cos_zero,
'dtw_min_length':min_length,
'len_seq1_dtw':len(seq1),
'len_seq2_dtw':len(seq2),
'len_cos':len(zi)
}
all_seq_dtw = all_seq_dtw.append(row, ignore_index=True)
# add self similarity
for i in all_indices:
row = {
'i1':i,
'i2':i,
'dtw':0,
'cos_recip':np.Inf,
'cos':0,
'cos_zero_recip':np.Inf,
'cos_zero':0,
'dtw_min_length':all_len_separated[i],
'len_seq1_dtw':all_len_separated[i],
'len_seq2_dtw':all_len_separated[i],
'len_cos':all_len_separated[i]
}
all_seq_dtw = all_seq_dtw.append(row, ignore_index=True)
all_seq_dtw.to_csv('results_tables/new_model_dtw_all_pairs.csv', index=False)
# Similarity Distribution Plots
plt.hist(all_seq_dtw['dtw'].values, bins=500, color='darkgreen')
plt.title('Distribution of inter-sequence DTW')
plt.xlabel('DTW bin')
plt.ylabel('Population')
plt.savefig('images/dtw_histogram.png')
plt.close('all')
for_plot = all_seq_dtw[all_seq_dtw['cos_zero']!=np.Inf]
plt.hist(for_plot['cos_zero'].values, bins=250, color='darkgreen')
plt.title('Distribution of inter-sequence cosine distance')
plt.xlabel('Cosine distance bin')
plt.ylabel('Population')
plt.savefig('images/cos_histogram.png')
plt.close('all')
# Clustering
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as hcluster
distance = all_seq_dtw\
.pivot(index='i1', columns='i2', values='dtw')\
.fillna(0)
data = distance.values
for i in range(data.shape[0]):
for j in range(i, data.shape[0]):
data[j][i] = data[i][j]
distVec = ssd.squareform(data)
linkage = hcluster.linkage(distVec, method='ward')
clustering = hcluster.cut_tree(linkage, n_clusters=range(len(linkage)))
from scipy.spatial.distance import euclidean
def DaviesBouldin(X, labels):
n_cluster = len(np.bincount(labels))
cluster_k = [X[labels == k] for k in range(n_cluster)]
centroids = [np.mean(k, axis = 0) for k in cluster_k]
variances = [np.mean([euclidean(p, centroids[i]) for p in k]) for i, k in enumerate(cluster_k)]
db = []
for i in range(n_cluster):
for j in range(n_cluster):
if j != i:
db.append((variances[i] + variances[j]) / euclidean(centroids[i], centroids[j]))
return(np.max(db) / n_cluster)
def evaluate(disance, clustering_results, k_min, k_max):
X = distance.values
return [DaviesBouldin(X, clustering_results[:,i]) \
for i in range(len(clustering_results))[k_min:k_max]]
k_min = 2
k_max = 100
evaluation = evaluate(distance, clustering, k_min, k_max)
from kneed import KneeLocator
x = list(range(k_min, k_max))
knee = KneeLocator(x, evaluation, S=0.4, curve="convex", direction="decreasing").knee
plt.figure(figsize=(12,5))
plt.plot(x, evaluation,color='darkgreen')
plt.xticks(np.arange(min(x), max(x)+1, 2.0),size=8)
plt.axvline(knee, linestyle='--', color='darkred', linewidth=0.7)
plt.title('Davies Bouldin Score for n clusters')
plt.xlabel('Number of Clusters, n')
plt.ylabel('DBS')
plt.grid()
plt.savefig('images/DaviesBouldin.png')
plt.close('all')
## Best model
n = 24
cluster_seqs = {}
cluster_lens = {}
for ix,c in enumerate(clustering[:,n]):
if c in cluster_seqs:
cluster_seqs[c].append(all_seq_separated[ix])
cluster_lens[c].append(all_len_separated[ix])
else:
cluster_seqs[c] = [all_seq_separated[ix]]
cluster_lens[c] = [all_len_separated[ix]]
cluster_seqs = [v for k,v in cluster_seqs.items()]
cluster_lens = [v for k,v in cluster_lens.items()]
plot_all_sequences(raw_pitch, time, cluster_lens[:top_n], cluster_seqs[:top_n], 'output/clustering', clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, cluster_seqs[:top_n], cluster_lens[:top_n], timestep, 'output/clustering')
############################
# Plot individual sequence #
############################
from exploration.visualisation import plot_subsequence_w_stability
sp = 5300
l = 1000
plot_subsequence_w_stability(sp, l, raw_pitch, time, stable_mask, timestep, path='images/stab_check.png', plot_kwargs=plot_kwargs)
sp = x_start_ts
l = x_end_ts - x_start_ts
plot_subsequence_w_stability(sp, l, raw_pitch, time, stable_mask, timestep, path='images/seqx_stab.png', plot_kwargs=plot_kwargs)
sp = y_start_ts
l = y_end_ts - y_start_ts
plot_subsequence_w_stability(sp, l, raw_pitch, time, stable_mask, timestep, path='images/seqy_stab.png', plot_kwargs=plot_kwargs)
############
# Database #
############
from exploration.utils import sql
from credentials import settings
import psycopg2
def insertResults(records, params):
try:
connection = psycopg2.connect(**settings)
cursor = connection.cursor()
# Update single record now
sql_insert_query = """
INSERT INTO results
(patternnumber, recordingid, elementnumber, durationelements, starttimeseconds, durationseconds, patterngroup, rankingroup)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.executemany(sql_insert_query, records)
connection.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def insertSimilarity(records, params):
try:
connection = psycopg2.connect(**settings)
cursor = connection.cursor()
# Update single record now
sql_insert_query = """
INSERT INTO similarity
(patternnumberone, patternnumbertwo, similarityname, similarity)
VALUES(%s, %s, %s, %s)"""
cursor.executemany(sql_insert_query, records)
connection.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
recording_id = 'brovabarama'
records = []
pattern_num = 0
pattern_num_lookup = {}
for i, seq in enumerate(starts_seq_cut):
for j, s in enumerate(seq):
length = lengths_seq_cut[i][j]
length_secs = round(length*timestep,2)
start_time_secs = round(s*timestep,2)
records.append((pattern_num, recording_id, s, length, start_time_secs, length_secs, i, j))
pattern_num_lookup[pattern_num] = (i,j)
pattern_num += 1
insertTable(records, settings)
import itertools
similarities = []
for s1, s2 in itertools.combinations(pattern_num_lookup.keys(), 2):
for n in ['cosine', 'dtw', 'eucliedean']:
similarities.append((s1, s2, n, np.random.random()))
# train model more
# - parameters
# Tune frequency bands
# for this music, perhaps a standard fourier transform would work better?
# what is fmin
# how many octaves
# frequency distribution across all tracks can inform parameters
# - check graphs
# - no further test performance increase after ~1250 epochs
# link features to annotations from Lara for phrase onset detection
# load features and annotations
from complex_auto.util import load_pyc_bz
import textgrid
import pandas as pd
import math
def load_annotations(path):
"""
Load text grid annotations from <path>
return pandas df
"""
tg = textgrid.TextGrid.fromFile(path)
df = pd.DataFrame(columns=['tier','s1', 's2', 'text'])
for tier in tg:
name = tier.name
intervals = tier.intervals
for i in intervals:
d = {
'tier':name,
's1': i.minTime,
's2': i.maxTime,
'text': i.mark
}
df = df.append(d, ignore_index=True)
return df
def transform_features(features):
amp_arr = features[0].detach().numpy()
phase_arr = features[1].detach().numpy()
nbins = amp_arr.shape[1]
amp_cols = [f'amp_{i}' for i in range(nbins)]
phase_cols = [f'phase_{i}' for i in range(nbins)]
amp_df = pd.DataFrame(amp_arr, columns=amp_cols)
phase_df = pd.DataFrame(phase_arr, columns=phase_cols)
df = pd.concat([amp_df, phase_df], axis=1)
df['window_num'] = df.index
return df
def second_to_window(onset, sr, hop_size):
onset_el = onset*sr
window_num = math.floor(onset_el/hop_size)
return window_num
features_paths = [
'output/hpc/Koti Janmani.multitrack-vocal.mp3_repres.pyc.bz',
'output/hpc/Shankari Shankuru.multitrack-vocal.mp3_repres.pyc.bz',
'output/hpc/Sharanu Janakana.multitrack-vocal.mp3_repres.pyc.bz'
]
annotations_paths = [
'../carnatic-motifs/Akkarai_Sisters_-_Koti_Janmani_multitrack-vocal_-_ritigowla.TextGrid',
'../carnatic-motifs/Akkarai_Sisters_-_Shankari_Shankuru_multitrack-vocal_-_saveri.TextGrid',
'../carnatic-motifs/Salem_Gayatri_Venkatesan_-_Sharanu_Janakana_multitrack-vocal_-_bilahari_copy.TextGrid'
]
all_features = pd.DataFrame()
for i,(fp, ap) in enumerate(zip(features_paths, annotations_paths)):
# array of [amplitude, phase]
features_raw = load_pyc_bz(fp)
features = transform_features(features_raw)
annotations = load_annotations(ap)
hop_size = cqt_window # 1984
annotations['window_num'] = annotations['s1'].apply(lambda y: second_to_window(y, sr, hop_size))
features['is_onset'] = features['window_num'].isin(annotations['window_num'])
features['is_test'] = i==2
all_features = all_features.append(features, ignore_index=True)
# Classification
import lightgbm as lgb
from scipy.stats import randint as sp_randint
from sklearn.model_selection import (GridSearchCV, GroupKFold, KFold,
RandomizedSearchCV, TimeSeriesSplit,
cross_val_score, train_test_split)
from sklearn.metrics import recall_score, precision_score, f1_score, roc_auc_score
def random_float_inrange(N,a=0.005,b=0.1):
return[((b - a) * np.random.random_sample()) + a for _ in range(N)]
#df_train, df_test = train_test_split(all_features, test_size=0.4, random_state=42)
df_train = all_features[all_features['is_test']==False]
df_test = all_features[all_features['is_test']==True]
# resample
# Resample to account for huge sparsity
pos_frame = df_train[df_train['is_onset']==1]
neg_frame = df_train[df_train['is_onset']!=1]
while sum(df_train['is_onset'])/len(df_train) < 0.3:
print(sum(df_train['is_onset'])/len(df_train))
random_rec = pos_frame.sample(1000)
df_train = df_train.append(random_rec, ignore_index=True)
# shuffle frame
df_train = df_train.iloc[np.random.permutation(len(df_train))].reset_index(drop=True)
feat_names = [c for c in df_train if c not in ['is_onset', 'window_num', 'is_test']]
X_train = df_train[feat_names].values
y_train = df_train['is_onset'].values
X_test = df_test[feat_names].values
y_test = df_test['is_onset'].values
param_dist = {'reg_sqrt':[True],
'learning_rate':[0.001,0.01,0.1, 0.5],
'max_depth':[2,4,8,12],
'min_data_in_leaf':[1,5,10],
'num_leaves':[5,10,15,20,25],
'n_estimators':[100,200,300,400],
'colsample_bytree':[0.6, 0.75, 0.9]}
# Final features from gridsearch
final_params = {
'colsample_bytree': 0.6463615939999198,
'learning_rate': 0.1280212488889668,
'max_depth': 40,
'min_data_in_leaf': 27,
'n_estimators': 982,
'num_leaves': 46,
'reg_sqrt': True
}
lgb_model = lgb.LGBMClassifier(**final_params)
# Gridsearch
lgb_model = lgb.LGBMClassifier()
lgb_model = RandomizedSearchCV(lgb_model, param_distributions=param_dist,
n_iter=1000, cv=3, n_jobs=-1,
scoring='recall', random_state=42)
lgb_model.fit(X_train, y_train)
y_pred = lgb_model.predict(X_test)
for scorer in recall_score, precision_score, f1_score, roc_auc_score:
print(f'{scorer.__name__}: {scorer(y_test, y_pred)}')
importances = list(sorted(zip(feat_names, lgb_model.feature_importances_), key=lambda y: -y[1]))
importances[:10]
# black out similarity grid based on
# consonant onset
# silence
# stability
# link db to ladylane
sql("""
SELECT
results.patternnumber,
results.patterngroup,
results.rankingroup,
results.starttimeseconds,
results.durationseconds
FROM results
WHERE results.recordingid = 'brovabarama'
AND results.patterngroup = 1
""")
sql("""
SELECT
patternnumberone,
patternnumbertwo,
similarity,
similarityname
FROM similarity
WHERE similarityname = 'cosine'
AND (patternnumberone = 4 OR patternnumbertwo = 4)
ORDER BY similarity
""")
insertSimilarity(similarities, settings)
#######################
# Output subsequences #
#######################
from exploration.visualisation import plot_all_sequences, plot_pitch
from exploration.io import write_all_sequence_audio
plot_kwargs = {
'yticks_dict':{},
'cents':True,
'tonic':195.997718,
'emphasize':{},#['S', 'S^'],
'figsize':(15,4)
}
starts_seq_cut = [[a,c] for a,b,c,d in patterns_seq]
lengths_seq_cut = [[max([b-a, d-c])]*2 for a,b,c,d in patterns_seq]
plot_all_sequences(pitch, time, lengths_seq_cut, starts_seq_cut, out_dir, clear_dir=True, plot_kwargs=plot_kwargs)
write_all_sequence_audio(audio_path, starts_seq_cut, lengths_seq_cut, timestep, out_dir)
# x Exclusion mask apply
# - Output patterns and audio with plots
# - Store in database
# - recording_id, seq_num, duration_seq, seq_sec, duration_sec, group number, group rank
# - Quick get next pattern
| en | 0.718829 | ################ ## Parameters ## ################ # Output paths of each step in pipeline ### Pitch Extraction # Sample rate of audio # size in frames of cqt window from convolution model # was previously set to 1988 # Take sample of data, set to None to use all data # lower bound index (5000 has been used for testing) # higher bound index (9000 has been used for testing) # pitch track extraction # For Melodia pitch extraction # For Melodia pitch extraction # Interpolate pitch tracks gaps of <gap_interp>seconds or less [set to None to skip] # sigma for gaussian smoothing of pitch track [set to None to skip] # stability identification # window size for stab computations in seconds # minimum legnth of region to be considered stable in seconds # max variation in pitch to be considered stable region ### Image Processing # convolutional filter # Binarize raw sim array 0/1 below and above this value... # depends completely on filter passed to convolutional step # Best... # scharr, 0.56 # sobel unidrectional, 0.1 # sobel bidirectional, 0.15 # lower bin_thresh for areas surrounding segments # percentage either size of a segment considered for lower bin thresh # Gaussian filter along diagonals with sigma... # After gaussian, re-binarize with this threshold # morphology params # For closing # square dimension of binary opening structure (square matrix of zeros with 1 across the diagonal) # Hough transform parameters # min dist in seconds between lines # Only search for lines between these angles (45 corresponds to main diagonal) # Distance between consecutive diagonals to be joined in seconds # Two segments must overlap in both x and y by <dupl_perc_overlap> # to be considered the same, only the longest is returned # Grouping diagonals # minimum number of patterns to be included in pattern group # Exclusions # Evaluation # how much leniancy on each side of an annotated pattern before considering it a match (seconds) # how much overlap does an annotated and identified pattern needed to be considered a partial match # Output # limit the number of groups outputted #################### ## Load sim array ## #################### # Get similarity Matrix # Sample for development ############## ## Pipeline ## ############## #X_bin = binarize(X_conv, 0.05, filename=bin_filename) ## Join segments that are sufficiently close # Format - [[(x,y), (x1,y1)],...] # Overwrites any existing file. # Format - [[(x,y), (x1,y1)],...] #[(i,((x0,y0), (x1,y1))) for i,((x0,y0), (x1,y1)) in enumerate(all_segments) if x1-x0>10000] # sort by shortest -> longest # connect segments based on symmetry # x0==y0 and x1==y1 # match segment with itself # to indicate whether a segment has been grouped # For when we create new segments # horizontal pass # vertical pass (swap xs and ys) # join segments that are sufficiently close to each other (if they are small) # extend segments to silence # remove diagonal from returned patterns #all_segments_reduced = [((x0,y0), (x1,y1)) for ((x0,y0), (x1,y1)) in all_segments_reduced if not ((y0-100 < x0 < y0+100) or (x0-100 < y0 < x0+100))] #print(f' {len(all_segments_reduced)} segments not along diagonal') # remove duplicates properly # within group alignment using dtw # sort groups ## Remove those that are identical #starts_seq_exc, lengths_seq_exc = apply_exclusions(raw_pitch, starts_seq, lengths_seq, exclusion_functions, min_in_group) #metrics, annotations_tag = evaluate_all_tiers(annotations_filt, starts_sec_exc, lengths_sec_exc, eval_tol, partial_perc) #evaluation_report(metrics) ############ ## Output ## ############ # all_recalls =[] # partial_percs = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1] # for p in partial_percs: # metrics, annotations_tag = evaluate_all_tiers(annotations_filt, starts_sec_exc, lengths_sec_exc, eval_tol, partial_perc=p) # all_recalls.append(metrics['pasrtial_match_recall_all']) # plt.figure(figsize=(10,5)) # plt.plot(partial_percs, all_recalls) # plt.xlabel('Partial precision overlap') # plt.ylabel('Partial recall') # plt.grid() # plt.savefig('images/recall_against_partial_perc.png') # plt.close('all') # all_recalls = [] # all_evals = [0.05*i for i in range(int(10/0.05))] # for e in all_evals: # these_metrics = evaluate_all_tiers(annotations_orig, starts_sec_exc, lengths_sec_exc, e) # all_recalls.append(these_metrics['full_match_recall_all']) # plt.figure(figsize=(10,5)) # plt.plot(all_evals, all_recalls) # plt.title('Performance with varying evaluation tolerance') # plt.xlabel('Evaluation tolerance') # plt.ylabel('Recall for all patterns') # plt.grid() # plt.savefig('images/eval_tol_experiment.png') # plt.close('all') # Plot annotation on self sim # Output annotation ##################################################### ## Plotting annotations and Results on Sim Matrix ## ##################################################### # Orig matrix # Annotations # Found segments from image processing # Found segments broken from image processing # Patterns from full pipeline ########################### ## All Patterns Grouping ## ########################### # DTW From pitch track # Cosine from similarity matrix # Extract the values along the line # X stores reciprocal of the cosine distance # add self similarity # Similarity Distribution Plots # Clustering ## Best model ############################ # Plot individual sequence # ############################ ############ # Database # ############ # Update single record now INSERT INTO results (patternnumber, recordingid, elementnumber, durationelements, starttimeseconds, durationseconds, patterngroup, rankingroup) VALUES(%s, %s, %s, %s, %s, %s, %s, %s) # closing database connection. # Update single record now INSERT INTO similarity (patternnumberone, patternnumbertwo, similarityname, similarity) VALUES(%s, %s, %s, %s) # closing database connection. # train model more # - parameters # Tune frequency bands # for this music, perhaps a standard fourier transform would work better? # what is fmin # how many octaves # frequency distribution across all tracks can inform parameters # - check graphs # - no further test performance increase after ~1250 epochs # link features to annotations from Lara for phrase onset detection # load features and annotations Load text grid annotations from <path> return pandas df # array of [amplitude, phase] # 1984 # Classification #df_train, df_test = train_test_split(all_features, test_size=0.4, random_state=42) # resample # Resample to account for huge sparsity # shuffle frame # Final features from gridsearch # Gridsearch # black out similarity grid based on # consonant onset # silence # stability # link db to ladylane SELECT results.patternnumber, results.patterngroup, results.rankingroup, results.starttimeseconds, results.durationseconds FROM results WHERE results.recordingid = 'brovabarama' AND results.patterngroup = 1 SELECT patternnumberone, patternnumbertwo, similarity, similarityname FROM similarity WHERE similarityname = 'cosine' AND (patternnumberone = 4 OR patternnumbertwo = 4) ORDER BY similarity ####################### # Output subsequences # ####################### #['S', 'S^'], # x Exclusion mask apply # - Output patterns and audio with plots # - Store in database # - recording_id, seq_num, duration_seq, seq_sec, duration_sec, group number, group rank # - Quick get next pattern | 1.880508 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.