code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import tensorflow as tf
from ..fastspeech.model import (
TFFastSpeechEncoder,
TFTacotronPostnet,
TFFastSpeechLayer,
)
from ..speechsplit.model import InterpLnr
import numpy as np
import copy
class Encoder_6(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_6, self).__init__(name='Encoder_6', **kwargs)
self.dim_neck_3 = hparams.dim_neck_3
self.freq_3 = hparams.freq_3
self.dim_f0 = hparams.dim_f0
self.dim_enc_3 = hparams.dim_enc_3
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_1'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc_3
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_1',
)
self.interp = InterpLnr(hparams)
def call(self, x, attention_mask, training=True):
x = self.before_dense_1(x)
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
x = self.interp(
x,
tf.tile([tf.shape(x)[1]], [tf.shape(x)[0]]),
training=training,
)
x = self.encoder_dense_1(x)
return x
class Encoder_7(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_7, self).__init__(name='Encoder_7', **kwargs)
self.config = config
self.dim_neck = hparams.dim_neck
self.dim_neck_3 = hparams.dim_neck_3
self.dim_freq = hparams.dim_freq
self.dim_enc = hparams.dim_enc
self.dim_enc_3 = hparams.dim_enc_3
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc, dtype=tf.float32, name='before_dense_1'
)
self.before_dense_2 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_2'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
config_2 = copy.deepcopy(config)
config_2.hidden_size = self.dim_enc_3
self.layer_2 = [
TFFastSpeechLayer(config_2, name='layer_._{}'.format(i))
for i in range(config_2.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck, dtype=tf.float32, name='encoder_dense_1'
)
self.encoder_dense_2 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_2',
)
self.interp = InterpLnr(hparams)
def call(self, x_f0, attention_mask, training=True):
x = x_f0[:, :, : self.dim_freq]
f0 = x_f0[:, :, self.dim_freq:]
x = self.before_dense_1(x)
f0 = self.before_dense_2(f0)
seq_length = tf.shape(x_f0)[1]
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
f0 = self.layer_2[no]([f0, attention_mask], training=training)[0]
x_f0 = tf.concat((x, f0), axis=2)
x_f0 = self.interp(
x_f0,
tf.tile([tf.shape(x_f0)[1]], [tf.shape(x)[0]]),
training=training,
)
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = self.encoder_dense_1(x)
f0 = self.encoder_dense_2(f0)
return x, f0
class Encoder_t(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_t, self).__init__(name='Encoder_t', **kwargs)
self.dim_neck_2 = hparams.dim_neck_2
self.freq_2 = hparams.freq_2
self.dim_freq = hparams.dim_freq
self.dim_enc_2 = hparams.dim_enc_2
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
config = copy.deepcopy(config)
config.num_hidden_layers = 1
config.hidden_size = self.dim_enc_2
self.config = config
self.before_dense = tf.keras.layers.Dense(
units=self.dim_enc_2, dtype=tf.float32, name='before_dense_1'
)
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.encoder_dense = tf.keras.layers.Dense(
units=self.dim_neck_2, dtype=tf.float32, name='encoder_dense'
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.encoder_dense(f)
class Decoder_3(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_3, self).__init__(name='Decoder_3', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_freq,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Decoder_4(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_4, self).__init__(name='Decoder_4', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_f0,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Model(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model, self).__init__(name='speechsplit', **kwargs)
self.encoder_1 = Encoder_7(
config.encoder_self_attention_params, hparams
)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_3(config.decoder_self_attention_params, hparams)
self.freq = hparams.freq
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_f0, x_org, c_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_x, codes_f0 = self.encoder_1(
x_f0, attention_mask, training=training
)
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_1 = codes_x
code_exp_3 = codes_f0
code_exp_2 = codes_2
c_trg = tf.tile(tf.expand_dims(c_trg, 1), (1, tf.shape(x_f0)[1], 1))
encoder_outputs = tf.concat(
(code_exp_1, code_exp_2, code_exp_3, c_trg), axis=-1
)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_x, codes_f0, codes_2, encoder_outputs, mel_outputs
class Model_F0(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model_F0, self).__init__(name='speechsplit_f0', **kwargs)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.encoder_3 = Encoder_6(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_4(config.decoder_self_attention_params, hparams)
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_org, f0_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_2 = codes_2
codes_3 = self.encoder_3(f0_trg, attention_mask, training=training)
code_exp_3 = codes_3
self.o = [code_exp_2, code_exp_3]
encoder_outputs = tf.concat((code_exp_2, code_exp_3), axis=-1)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_2, codes_3, encoder_outputs, mel_outputs
|
[
"tensorflow.shape",
"tensorflow.reduce_max",
"tensorflow.concat",
"tensorflow.keras.layers.Dense",
"copy.deepcopy",
"tensorflow.expand_dims",
"tensorflow.sequence_mask"
] |
[((639, 728), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_enc_3', 'dtype': 'tf.float32', 'name': '"""before_dense_1"""'}), "(units=self.dim_enc_3, dtype=tf.float32, name=\n 'before_dense_1')\n", (660, 728), True, 'import tensorflow as tf\n'), ((766, 787), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (779, 787), False, 'import copy\n'), ((1025, 1116), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_neck_3', 'dtype': 'tf.float32', 'name': '"""encoder_dense_1"""'}), "(units=self.dim_neck_3, dtype=tf.float32, name=\n 'encoder_dense_1')\n", (1046, 1116), True, 'import tensorflow as tf\n'), ((2061, 2148), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_enc', 'dtype': 'tf.float32', 'name': '"""before_dense_1"""'}), "(units=self.dim_enc, dtype=tf.float32, name=\n 'before_dense_1')\n", (2082, 2148), True, 'import tensorflow as tf\n'), ((2196, 2285), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_enc_3', 'dtype': 'tf.float32', 'name': '"""before_dense_2"""'}), "(units=self.dim_enc_3, dtype=tf.float32, name=\n 'before_dense_2')\n", (2217, 2285), True, 'import tensorflow as tf\n'), ((2323, 2344), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (2336, 2344), False, 'import copy\n'), ((2568, 2589), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (2581, 2589), False, 'import copy\n'), ((2827, 2916), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_neck', 'dtype': 'tf.float32', 'name': '"""encoder_dense_1"""'}), "(units=self.dim_neck, dtype=tf.float32, name=\n 'encoder_dense_1')\n", (2848, 2916), True, 'import tensorflow as tf\n'), ((2965, 3056), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_neck_3', 'dtype': 'tf.float32', 'name': '"""encoder_dense_2"""'}), "(units=self.dim_neck_3, dtype=tf.float32, name=\n 'encoder_dense_2')\n", (2986, 3056), True, 'import tensorflow as tf\n'), ((4499, 4520), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (4512, 4520), False, 'import copy\n'), ((4660, 4749), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_enc_2', 'dtype': 'tf.float32', 'name': '"""before_dense_1"""'}), "(units=self.dim_enc_2, dtype=tf.float32, name=\n 'before_dense_1')\n", (4681, 4749), True, 'import tensorflow as tf\n'), ((4864, 4953), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.dim_neck_2', 'dtype': 'tf.float32', 'name': '"""encoder_dense"""'}), "(units=self.dim_neck_2, dtype=tf.float32, name=\n 'encoder_dense')\n", (4885, 4953), True, 'import tensorflow as tf\n'), ((5488, 5581), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'config.hidden_size', 'dtype': 'tf.float32', 'name': '"""before_dense_1"""'}), "(units=config.hidden_size, dtype=tf.float32, name=\n 'before_dense_1')\n", (5509, 5581), True, 'import tensorflow as tf\n'), ((5657, 5756), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'hparams.dim_freq', 'dtype': 'tf.float32', 'name': '"""self.linear_projection"""'}), "(units=hparams.dim_freq, dtype=tf.float32, name=\n 'self.linear_projection')\n", (5678, 5756), True, 'import tensorflow as tf\n'), ((6321, 6414), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'config.hidden_size', 'dtype': 'tf.float32', 'name': '"""before_dense_1"""'}), "(units=config.hidden_size, dtype=tf.float32, name=\n 'before_dense_1')\n", (6342, 6414), True, 'import tensorflow as tf\n'), ((6490, 6587), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'hparams.dim_f0', 'dtype': 'tf.float32', 'name': '"""self.linear_projection"""'}), "(units=hparams.dim_f0, dtype=tf.float32, name=\n 'self.linear_projection')\n", (6511, 6587), True, 'import tensorflow as tf\n'), ((7570, 7644), 'tensorflow.sequence_mask', 'tf.sequence_mask', ([], {'lengths': 'mel_lengths', 'maxlen': 'max_length', 'dtype': 'tf.float32'}), '(lengths=mel_lengths, maxlen=max_length, dtype=tf.float32)\n', (7586, 7644), True, 'import tensorflow as tf\n'), ((8088, 8151), 'tensorflow.concat', 'tf.concat', (['(code_exp_1, code_exp_2, code_exp_3, c_trg)'], {'axis': '(-1)'}), '((code_exp_1, code_exp_2, code_exp_3, c_trg), axis=-1)\n', (8097, 8151), True, 'import tensorflow as tf\n'), ((9031, 9105), 'tensorflow.sequence_mask', 'tf.sequence_mask', ([], {'lengths': 'mel_lengths', 'maxlen': 'max_length', 'dtype': 'tf.float32'}), '(lengths=mel_lengths, maxlen=max_length, dtype=tf.float32)\n', (9047, 9105), True, 'import tensorflow as tf\n'), ((9453, 9497), 'tensorflow.concat', 'tf.concat', (['(code_exp_2, code_exp_3)'], {'axis': '(-1)'}), '((code_exp_2, code_exp_3), axis=-1)\n', (9462, 9497), True, 'import tensorflow as tf\n'), ((3373, 3387), 'tensorflow.shape', 'tf.shape', (['x_f0'], {}), '(x_f0)\n', (3381, 3387), True, 'import tensorflow as tf\n'), ((3618, 3644), 'tensorflow.concat', 'tf.concat', (['(x, f0)'], {'axis': '(2)'}), '((x, f0), axis=2)\n', (3627, 3644), True, 'import tensorflow as tf\n'), ((5082, 5093), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (5090, 5093), True, 'import tensorflow as tf\n'), ((5910, 5921), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (5918, 5921), True, 'import tensorflow as tf\n'), ((6741, 6752), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (6749, 6752), True, 'import tensorflow as tf\n'), ((7507, 7533), 'tensorflow.reduce_max', 'tf.reduce_max', (['mel_lengths'], {}), '(mel_lengths)\n', (7520, 7533), True, 'import tensorflow as tf\n'), ((8008, 8032), 'tensorflow.expand_dims', 'tf.expand_dims', (['c_trg', '(1)'], {}), '(c_trg, 1)\n', (8022, 8032), True, 'import tensorflow as tf\n'), ((8968, 8994), 'tensorflow.reduce_max', 'tf.reduce_max', (['mel_lengths'], {}), '(mel_lengths)\n', (8981, 8994), True, 'import tensorflow as tf\n'), ((8038, 8052), 'tensorflow.shape', 'tf.shape', (['x_f0'], {}), '(x_f0)\n', (8046, 8052), True, 'import tensorflow as tf\n'), ((1493, 1504), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1501, 1504), True, 'import tensorflow as tf\n'), ((1511, 1522), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1519, 1522), True, 'import tensorflow as tf\n'), ((3724, 3738), 'tensorflow.shape', 'tf.shape', (['x_f0'], {}), '(x_f0)\n', (3732, 3738), True, 'import tensorflow as tf\n'), ((3745, 3756), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3753, 3756), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
#
# $Id$
#
"""
Note: this is targeted for python 2.x.
To run it under python 3.x you need to use 2to3 tool first:
$ 2to3 -w test/test_memory_leaks.py
"""
import os
import gc
import sys
import unittest
import psutil
from test_psutil import reap_children, skipUnless, skipIf, \
POSIX, LINUX, WINDOWS, OSX, BSD
LOOPS = 1000
TOLERANCE = 4096
class TestProcessObjectLeaks(unittest.TestCase):
"""Test leaks of Process class methods and properties"""
def setUp(self):
gc.collect()
def tearDown(self):
reap_children()
def execute(self, method, *args, **kwarks):
# step 1
p = psutil.Process(os.getpid())
for x in xrange(LOOPS):
obj = getattr(p, method)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, p, obj, retvalue
gc.collect()
rss1 = psutil.Process(os.getpid()).get_memory_info()[0]
# step 2
p = psutil.Process(os.getpid())
for x in xrange(LOOPS):
obj = getattr(p, method)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, p, obj, retvalue
gc.collect()
rss2 = psutil.Process(os.getpid()).get_memory_info()[0]
# comparison
difference = rss2 - rss1
if difference > TOLERANCE:
self.fail("rss1=%s, rss2=%s, difference=%s" %(rss1, rss2, difference))
def test_name(self):
self.execute('name')
def test_cmdline(self):
self.execute('cmdline')
def test_ppid(self):
self.execute('ppid')
def test_uid(self):
self.execute('uid')
def test_uid(self):
self.execute('gid')
@skipIf(POSIX)
def test_username(self):
self.execute('username')
def test_create_time(self):
self.execute('create_time')
def test_get_num_threads(self):
self.execute('get_num_threads')
def test_get_threads(self):
self.execute('get_num_threads')
def test_get_cpu_times(self):
self.execute('get_cpu_times')
def test_get_memory_info(self):
self.execute('get_memory_info')
def test_is_running(self):
self.execute('is_running')
@skipUnless(WINDOWS)
def test_resume(self):
self.execute('resume')
@skipUnless(WINDOWS)
def test_getcwd(self):
self.execute('getcwd')
@skipUnless(WINDOWS)
def test_get_open_files(self):
self.execute('get_open_files')
@skipUnless(WINDOWS)
def test_get_connections(self):
self.execute('get_connections')
class TestModuleFunctionsLeaks(unittest.TestCase):
"""Test leaks of psutil module functions."""
def setUp(self):
gc.collect()
def execute(self, function, *args, **kwarks):
# step 1
for x in xrange(LOOPS):
obj = getattr(psutil, function)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, obj, retvalue
gc.collect()
rss1 = psutil.Process(os.getpid()).get_memory_info()[0]
# step 2
for x in xrange(LOOPS):
obj = getattr(psutil, function)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, obj, retvalue
gc.collect()
rss2 = psutil.Process(os.getpid()).get_memory_info()[0]
# comparison
difference = rss2 - rss1
if difference > TOLERANCE:
self.fail("rss1=%s, rss2=%s, difference=%s" %(rss1, rss2, difference))
def test_get_pid_list(self):
self.execute('get_pid_list')
@skipIf(POSIX)
def test_pid_exists(self):
self.execute('pid_exists', os.getpid())
def test_process_iter(self):
self.execute('process_iter')
def test_used_phymem(self):
self.execute('used_phymem')
def test_avail_phymem(self):
self.execute('avail_phymem')
def test_total_virtmem(self):
self.execute('total_virtmem')
def test_used_virtmem(self):
self.execute('used_virtmem')
def test_avail_virtmem(self):
self.execute('avail_virtmem')
def test_cpu_times(self):
self.execute('cpu_times')
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestProcessObjectLeaks))
test_suite.addTest(unittest.makeSuite(TestModuleFunctionsLeaks))
unittest.TextTestRunner(verbosity=2).run(test_suite)
if __name__ == '__main__':
test_main()
|
[
"unittest.TestSuite",
"unittest.makeSuite",
"test_psutil.skipUnless",
"test_psutil.skipIf",
"gc.collect",
"os.getpid",
"test_psutil.reap_children",
"unittest.TextTestRunner"
] |
[((1864, 1877), 'test_psutil.skipIf', 'skipIf', (['POSIX'], {}), '(POSIX)\n', (1870, 1877), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((2382, 2401), 'test_psutil.skipUnless', 'skipUnless', (['WINDOWS'], {}), '(WINDOWS)\n', (2392, 2401), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((2466, 2485), 'test_psutil.skipUnless', 'skipUnless', (['WINDOWS'], {}), '(WINDOWS)\n', (2476, 2485), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((2550, 2569), 'test_psutil.skipUnless', 'skipUnless', (['WINDOWS'], {}), '(WINDOWS)\n', (2560, 2569), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((2650, 2669), 'test_psutil.skipUnless', 'skipUnless', (['WINDOWS'], {}), '(WINDOWS)\n', (2660, 2669), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((3885, 3898), 'test_psutil.skipIf', 'skipIf', (['POSIX'], {}), '(POSIX)\n', (3891, 3898), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((4507, 4527), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (4525, 4527), False, 'import unittest\n'), ((532, 544), 'gc.collect', 'gc.collect', ([], {}), '()\n', (542, 544), False, 'import gc\n'), ((578, 593), 'test_psutil.reap_children', 'reap_children', ([], {}), '()\n', (591, 593), False, 'from test_psutil import reap_children, skipUnless, skipIf, POSIX, LINUX, WINDOWS, OSX, BSD\n'), ((948, 960), 'gc.collect', 'gc.collect', ([], {}), '()\n', (958, 960), False, 'import gc\n'), ((1331, 1343), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1341, 1343), False, 'import gc\n'), ((2878, 2890), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2888, 2890), False, 'import gc\n'), ((3211, 3223), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3221, 3223), False, 'import gc\n'), ((3558, 3570), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3568, 3570), False, 'import gc\n'), ((4551, 4593), 'unittest.makeSuite', 'unittest.makeSuite', (['TestProcessObjectLeaks'], {}), '(TestProcessObjectLeaks)\n', (4569, 4593), False, 'import unittest\n'), ((4618, 4662), 'unittest.makeSuite', 'unittest.makeSuite', (['TestModuleFunctionsLeaks'], {}), '(TestModuleFunctionsLeaks)\n', (4636, 4662), False, 'import unittest\n'), ((687, 698), 'os.getpid', 'os.getpid', ([], {}), '()\n', (696, 698), False, 'import os\n'), ((1070, 1081), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1079, 1081), False, 'import os\n'), ((3965, 3976), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3974, 3976), False, 'import os\n'), ((4668, 4704), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4691, 4704), False, 'import unittest\n'), ((991, 1002), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1000, 1002), False, 'import os\n'), ((1374, 1385), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1383, 1385), False, 'import os\n'), ((3254, 3265), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3263, 3265), False, 'import os\n'), ((3601, 3612), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3610, 3612), False, 'import os\n')]
|
import csv
import pandas as pd
import numpy as np
import networkx as nx
class RouteFinder():
def __init__(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def reset_graph(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def remove_node(self,nodes):
self.G.remove_nodes_from(nodes)
def optimal_route(self,source,target):
return nx.shortest_path(self.G, source, target)
def optimal_entry_route(self,target):
exits = ['Exit_4','Exit_3','Exit_2','Exit_1']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, exit, target)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
def optimal_exit_route(self,source):
exits = ['Exit_1','Exit_2','Exit_3','Exit_4']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, source, exit)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
|
[
"networkx.shortest_path",
"networkx.Graph",
"csv.reader"
] |
[((132, 142), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (140, 142), True, 'import networkx as nx\n'), ((417, 427), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (425, 427), True, 'import networkx as nx\n'), ((796, 836), 'networkx.shortest_path', 'nx.shortest_path', (['self.G', 'source', 'target'], {}), '(self.G, source, target)\n', (812, 836), True, 'import networkx as nx\n'), ((219, 253), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (229, 253), False, 'import csv\n'), ((504, 538), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (514, 538), False, 'import csv\n'), ((1069, 1107), 'networkx.shortest_path', 'nx.shortest_path', (['self.G', 'exit', 'target'], {}), '(self.G, exit, target)\n', (1085, 1107), True, 'import networkx as nx\n'), ((1723, 1761), 'networkx.shortest_path', 'nx.shortest_path', (['self.G', 'source', 'exit'], {}), '(self.G, source, exit)\n', (1739, 1761), True, 'import networkx as nx\n')]
|
#!/usr/bin/env python
# <NAME>, 2013 (zougloub)
"""
reStructuredText support (experimental)
Example::
def configure(conf):
conf.load('rst')
if not conf.env.RST2HTML:
conf.fatal('The program rst2html is required')
def build(bld):
bld(
features = 'rst',
type = 'rst2html', # rst2html, rst2pdf, ...
source = 'index.rst', # mandatory, the source
deps = 'image.png', # to give additional non-trivial dependencies
)
By default the tool looks for a set of programs in PATH.
The tools are defined in `rst_progs`.
To configure with a special program use::
$ RST2HTML=/path/to/rst2html waf configure
This tool is experimental; don't hesitate to contribute to it.
"""
import re
from waflib import Errors
from waflib import Logs
from waflib import Node
from waflib import Task
from waflib import Utils
from waflib.TaskGen import before_method
from waflib.TaskGen import feature
rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split()
def parse_rst_node(task, node, nodes, names, seen, dirs=None):
# TODO add extensibility, to handle custom rst include tags...
if dirs is None:
dirs = (node.parent, node.get_bld().parent)
if node in seen:
return
seen.append(node)
code = node.read()
re_rst = re.compile(
r"^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$", re.M
)
for match in re_rst.finditer(code):
ipath = match.group("file")
itype = match.group("type")
Logs.debug("rst: visiting %s: %s", itype, ipath)
found = False
for d in dirs:
Logs.debug("rst: looking for %s in %s", ipath, d.abspath())
found = d.find_node(ipath)
if found:
Logs.debug("rst: found %s as %s", ipath, found.abspath())
nodes.append((itype, found))
if itype == "include":
parse_rst_node(task, found, nodes, names, seen)
break
if not found:
names.append((itype, ipath))
class docutils(Task.Task):
"""
Compile a rst file.
"""
def scan(self):
"""
A recursive regex-based scanner that finds rst dependencies.
"""
nodes = []
names = []
seen = []
node = self.inputs[0]
if not node:
return (nodes, names)
parse_rst_node(self, node, nodes, names, seen)
Logs.debug("rst: %r: found the following file deps: %r", self, nodes)
if names:
Logs.warn("rst: %r: could not find the following file deps: %r", self, names)
return ([v for (t, v) in nodes], [v for (t, v) in names])
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError(f"{msg!r} command exit status {retcode!r}")
def run(self):
"""
Runs the rst compilation using docutils
"""
raise NotImplementedError()
class rst2html(docutils):
color = "BLUE"
def __init__(self, *args, **kw):
docutils.__init__(self, *args, **kw)
self.command = self.generator.env.RST2HTML
self.attributes = ["stylesheet"]
def scan(self):
nodes, names = docutils.scan(self)
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
ssnode = self.generator.to_nodes(stylesheet)[0]
nodes.append(ssnode)
Logs.debug("rst: adding dep to %s %s", attribute, stylesheet)
return nodes, names
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.command + [src, dst]
cmd += Utils.to_list(getattr(self.generator, "options", []))
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
stylesheet = self.generator.to_nodes(stylesheet)[0]
cmd += ["--%s" % attribute, stylesheet.path_from(cwdn)]
return self.exec_command(cmd, cwd=cwdn.abspath())
class rst2s5(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2S5
self.attributes = ["stylesheet"]
class rst2latex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2LATEX
self.attributes = ["stylesheet"]
class rst2xetex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2XETEX
self.attributes = ["stylesheet"]
class rst2pdf(docutils):
color = "BLUE"
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.generator.env.RST2PDF + [src, "-o", dst]
cmd += Utils.to_list(getattr(self.generator, "options", []))
return self.exec_command(cmd, cwd=cwdn.abspath())
@feature("rst")
@before_method("process_source")
def apply_rst(self):
"""
Create :py:class:`rst` or other rst-related task objects
"""
if self.target:
if isinstance(self.target, Node.Node):
tgt = self.target
elif isinstance(self.target, str):
tgt = self.path.get_bld().make_node(self.target)
else:
self.bld.fatal(
f"rst: Don't know how to build target name {self.target} which is not a string or Node for {self}"
)
else:
tgt = None
tsk_type = getattr(self, "type", None)
src = self.to_nodes(self.source)
assert len(src) == 1
src = src[0]
if tsk_type is not None and tgt is None:
if tsk_type.startswith("rst2"):
ext = tsk_type[4:]
else:
self.bld.fatal("rst: Could not detect the output file extension for %s" % self)
tgt = src.change_ext(".%s" % ext)
elif tsk_type is None and tgt is not None:
out = tgt.name
ext = out[out.rfind(".") + 1 :]
self.type = "rst2" + ext
elif tsk_type is not None and tgt is not None:
# the user knows what he wants
pass
else:
self.bld.fatal("rst: Need to indicate task type or target name for %s" % self)
deps_lst = []
if getattr(self, "deps", None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal(f"Could not find {filename!r} for {self!r}")
if not n in deps_lst:
deps_lst.append(n)
try:
task = self.create_task(self.type, src, tgt)
except KeyError:
self.bld.fatal(f"rst: Task of type {self.type} not implemented (created by {self})")
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = self.bld.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
self.bld.node_deps[task.uid()] = deps_lst
inst_to = getattr(self, "install_path", None)
if inst_to:
self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:])
self.source = []
def configure(self):
"""
Try to find the rst programs.
Do not raise any error if they are not found.
You'll have to use additional code in configure() to die
if programs were not found.
"""
for p in rst_progs:
self.find_program(p, mandatory=False)
|
[
"waflib.Errors.WafError",
"re.compile",
"waflib.TaskGen.feature",
"waflib.Logs.debug",
"waflib.TaskGen.before_method",
"waflib.Logs.warn"
] |
[((5482, 5496), 'waflib.TaskGen.feature', 'feature', (['"""rst"""'], {}), "('rst')\n", (5489, 5496), False, 'from waflib.TaskGen import feature\n'), ((5498, 5529), 'waflib.TaskGen.before_method', 'before_method', (['"""process_source"""'], {}), "('process_source')\n", (5511, 5529), False, 'from waflib.TaskGen import before_method\n'), ((1309, 1418), 're.compile', 're.compile', (['"""^\\\\s*.. ((?P<subst>\\\\|\\\\S+\\\\|) )?(?P<type>include|image|figure):: (?P<file>.*)$"""', 're.M'], {}), "(\n '^\\\\s*.. ((?P<subst>\\\\|\\\\S+\\\\|) )?(?P<type>include|image|figure):: (?P<file>.*)$'\n , re.M)\n", (1319, 1418), False, 'import re\n'), ((1540, 1588), 'waflib.Logs.debug', 'Logs.debug', (['"""rst: visiting %s: %s"""', 'itype', 'ipath'], {}), "('rst: visiting %s: %s', itype, ipath)\n", (1550, 1588), False, 'from waflib import Logs\n'), ((2470, 2539), 'waflib.Logs.debug', 'Logs.debug', (['"""rst: %r: found the following file deps: %r"""', 'self', 'nodes'], {}), "('rst: %r: found the following file deps: %r', self, nodes)\n", (2480, 2539), False, 'from waflib import Logs\n'), ((2570, 2647), 'waflib.Logs.warn', 'Logs.warn', (['"""rst: %r: could not find the following file deps: %r"""', 'self', 'names'], {}), "('rst: %r: could not find the following file deps: %r', self, names)\n", (2579, 2647), False, 'from waflib import Logs\n'), ((3054, 3113), 'waflib.Errors.WafError', 'Errors.WafError', (['f"""{msg!r} command exit status {retcode!r}"""'], {}), "(f'{msg!r} command exit status {retcode!r}')\n", (3069, 3113), False, 'from waflib import Errors\n'), ((3793, 3854), 'waflib.Logs.debug', 'Logs.debug', (['"""rst: adding dep to %s %s"""', 'attribute', 'stylesheet'], {}), "('rst: adding dep to %s %s', attribute, stylesheet)\n", (3803, 3854), False, 'from waflib import Logs\n')]
|
# from utils import Sample_main
import gradcam_main
import numpy as np
import tensorflow as tf
import argparse
import os
tf.logging.set_verbosity(tf.logging.ERROR) # disable to see tensorflow warnings
def cam(in_path='sample.bmp', out_path = 'sample.png',):
gradcam_main.cam_vis(in_path, out_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_image', default='sample.bmp', type=str, help= '(Full name of the input image -- default set to sample.bmp')
parser.add_argument('-o', '--output_image', default='sample.png', type=str, help='Full name of output image (should be .png) -- default set to '
'input_image.png')
args = parser.parse_args()
if args.input_image != 'sample.bmp' and args.output_image == 'sample.png':
out_name = args.input_image
out_name = out_name.replace('bmp', 'png')
else:
out_name = args.output_image
out_name = out_name.replace('bmp', 'png')
cam(args.input_image, out_name)
# In case referenced by other modules
if __name__ == '__main__':
main()
|
[
"gradcam_main.cam_vis",
"tensorflow.logging.set_verbosity",
"argparse.ArgumentParser"
] |
[((122, 164), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (146, 164), True, 'import tensorflow as tf\n'), ((266, 305), 'gradcam_main.cam_vis', 'gradcam_main.cam_vis', (['in_path', 'out_path'], {}), '(in_path, out_path)\n', (286, 305), False, 'import gradcam_main\n'), ((333, 358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (356, 358), False, 'import argparse\n')]
|
'''
Part of the dibase.rpi.gpio.test package.
GPIO pin id support classes' platform tests.
Underlying GPIO pin ids are those used by the Linux gpiolib and used
to identify a device's GPIO pins in the Linux sys filesystem GPIO
sub-tree.
Developed by <NAME> / Dibase Limited.
Copyright (c) 2012 Dibase Limited
License: dual: GPL or BSD.
'''
import unittest
import sys
if __name__ == '__main__':
# Add path to directory containing the dibase package directory
sys.path.insert(0, './../../../..')
from dibase.rpi.gpio import pinid
class PinIdRPiPlatforrmTestCases(unittest.TestCase):
def test_0000_get_rpi_major_revision_index_returns_zero_or_positive_int(self):
returned_rev_index = pinid.PinId._get_rpi_major_revision_index()
self.assertIsNotNone(returned_rev_index)
self.assertIsInstance(returned_rev_index,int)
self.assertTrue(returned_rev_index>=0)
def test_0020_PinId_value_of_p1_sda_0_or_2(self):
rev_index = pinid.PinId._get_rpi_major_revision_index()
p1_sda_gpio_id = pinid.PinId.p1_sda()
self.assertTrue((rev_index==0 and p1_sda_gpio_id==0) or p1_sda_gpio_id==2)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"sys.path.insert",
"dibase.rpi.gpio.pinid.PinId.p1_sda",
"dibase.rpi.gpio.pinid.PinId._get_rpi_major_revision_index"
] |
[((515, 550), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./../../../.."""'], {}), "(0, './../../../..')\n", (530, 550), False, 'import sys\n'), ((1230, 1245), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1243, 1245), False, 'import unittest\n'), ((757, 800), 'dibase.rpi.gpio.pinid.PinId._get_rpi_major_revision_index', 'pinid.PinId._get_rpi_major_revision_index', ([], {}), '()\n', (798, 800), False, 'from dibase.rpi.gpio import pinid\n'), ((1024, 1067), 'dibase.rpi.gpio.pinid.PinId._get_rpi_major_revision_index', 'pinid.PinId._get_rpi_major_revision_index', ([], {}), '()\n', (1065, 1067), False, 'from dibase.rpi.gpio import pinid\n'), ((1092, 1112), 'dibase.rpi.gpio.pinid.PinId.p1_sda', 'pinid.PinId.p1_sda', ([], {}), '()\n', (1110, 1112), False, 'from dibase.rpi.gpio import pinid\n')]
|
"""
Analysis code for plotting vertical flux transport and/or a gif of temperature,
velocity and KE from the merged output of a Dedalus Rayleigh-Bérnard code.
Author: <NAME>
"""
# ====================
# IMPORTS
# ====================
import numpy as np
import h5py
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pathlib
import os
import shutil
import time
import imageio
from dedalus import public as de
from dedalus.tools import post
# ====================
# CLA PARSING
# ====================
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", help="Folder where the processing data is stored", required=True
)
parser.add_argument(
"-t", "--heatmap", help="Plot a gif of the temperature heatmap", action="store_true"
)
parser.add_argument(
"-f", "--flux", help="Plot the average flux contributions", action="store_true"
)
parser.add_argument(
"-k", "--KE", help="Plot the kinetic energy only", action="store_true"
)
args = parser.parse_args()
direc = os.path.normpath(args.input) + "/"
with h5py.File(direc + "run_params/run_params_s1.h5", "r") as f:
a = int(np.array(f["tasks"]["a"]))
y = de.Fourier("y", 256, interval=(0, a), dealias=3 / 2)
z = de.Chebyshev("z", 64, interval=(0, 1), dealias=3 / 2)
y = np.array(y.grid(1))
z = np.array(z.grid(1))
# ====================
# Plot Fluxes
# ====================
if args.flux:
avg_t_start = float(input("Start average at: "))
avg_t_stop = float(input("End average at: "))
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
L_cond_arr = np.array(file["tasks"]["L_cond"])[:, 0]
L_conv_arr = np.array(file["tasks"]["L_conv"])[:, 0]
KE = np.array(file["tasks"]["KE"])[:, 0]
snap_t = np.array(file["scales"]["sim_time"])
if (
(avg_t_start <= snap_t[0])
or (avg_t_start >= snap_t[-1])
or (avg_t_stop <= snap_t[0])
or (avg_t_stop >= snap_t[-1])
):
print(
"Average time period out of simulation range: {} -> {}".format(
snap_t[0], snap_t[-1]
)
)
pass
ASI = np.abs(snap_t - avg_t_start).argmin()
if np.isnan(avg_t_stop):
AEI = -1
else:
AEI = np.abs(snap_t - avg_t_stop).argmin()
avg_t_range = snap_t[AEI] - snap_t[ASI]
print("Averaging between {} and {}".format(snap_t[ASI], snap_t[AEI]))
mean_L_cond = np.mean(np.array(L_cond_arr[ASI:AEI]), axis=0)
mean_L_conv = np.mean(np.array(L_conv_arr[ASI:AEI]), axis=0)
mean_L_tot = mean_L_cond + mean_L_conv
del_L = np.max(np.abs(1.0 - mean_L_tot))
print("max del_L = {}".format(del_L))
fig = plt.figure(figsize=(6, 6))
KE_ax = fig.add_subplot(311)
KE_ax.plot(snap_t, KE, "k", label="Kinetic Energy")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.axvspan(
snap_t[ASI], snap_t[AEI], color="r", alpha=0.5, label="Flux averaging"
)
L_ax = fig.add_subplot(212)
L_ax.plot(z, mean_L_cond, "r", linestyle="-", label=r"$L_{cond}$")
L_ax.plot(z, mean_L_conv, "g", linestyle="-", label=r"$L_{conv}$")
L_ax.plot(z, mean_L_tot, "k", ls="-", label=r"$L_{total}$")
L_ax.set_xlabel("z")
L_ax.set_ylabel("L")
L_ax.legend()
plt.savefig(direc + "fluxes.png")
plt.show()
plt.close()
# ====================
# Plot heatmap
# ====================
if args.heatmap:
filenames = []
os.makedirs(direc + "figure", exist_ok=True)
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
KE = np.array(file["tasks"]["KE"])[:, 0]
with h5py.File(direc + "snapshots/snapshots_s1.h5", "r") as file:
T = np.array(file["tasks"]["T"])
v = np.array(file["tasks"]["v"])
w = np.array(file["tasks"]["w"])
snap_t = np.array(file["scales"]["sim_time"])
snap_iter = np.array(file["scales"]["iteration"])
yy, zz = np.meshgrid(y, z)
maxT = np.max(T)
maxV = np.max(v)
maxW = np.max(w)
n_iter = len(T[:, 0:, 0])
start_time = time.time()
print("Plotting {} graphs".format(n_iter))
try:
for i in range(0, int(n_iter)):
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(ncols=2, nrows=3, figure=fig)
T_ax = fig.add_subplot(gs[0:2, 0])
v_ax = fig.add_subplot(gs[0, 1])
w_ax = fig.add_subplot(gs[1, 1])
KE_ax = fig.add_subplot(gs[2, :])
if (i % 50 == 0) and (i != 0):
sec_per_frame = (time.time() - start_time) / i
eta = sec_per_frame * (n_iter - i)
print(
"image {}/{} at {:.3f}ips \t| ETA in {}m {}s".format(
i, n_iter, sec_per_frame, int(eta // 60), int(eta % 60)
)
)
fig.suptitle(
"Iteration: {}\n".format(snap_iter[i])
+ r"Sim Time: {:.2f} $\tau_\kappa$".format(snap_t[i])
)
c1 = v_ax.contourf(
yy,
zz,
np.transpose(v[i, :, :]),
levels=np.linspace(np.min(v), maxV),
cmap="coolwarm",
)
c1_bar = fig.colorbar(c1, ax=v_ax)
c1_bar.set_label("v", rotation=0)
v_ax.set_ylabel("z")
v_ax.set_xlabel("y")
v_ax.invert_xaxis()
c2 = w_ax.contourf(
yy,
zz,
np.transpose(w[i, :, :]),
levels=np.linspace(np.min(w), maxW),
cmap="coolwarm",
)
c2_bar = fig.colorbar(c2, ax=w_ax)
c2_bar.set_label("w", rotation=0)
w_ax.set_ylabel("z")
w_ax.set_xlabel("y")
w_ax.invert_xaxis()
c3 = T_ax.contourf(
yy,
zz,
np.transpose(T[i, :, :]),
levels=np.linspace(0, maxT),
cmap="coolwarm",
)
c3_bar = fig.colorbar(c3, ax=T_ax)
c3_bar.set_label("T", rotation=0)
T_ax.set_ylabel("z")
T_ax.set_xlabel("y")
T_ax.invert_xaxis()
KE_ax.plot(snap_t[:i], KE[:i], "k")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.set_ylim([0, 1.1 * np.max(KE)])
KE_ax.set_xlim([0, np.max(snap_t)])
plt.tight_layout()
plt.savefig(direc + "figure/fig_{:03d}.png".format(i))
filenames.append(direc + "figure/fig_{:03d}.png".format(i))
plt.close()
plt.clf()
except KeyboardInterrupt:
print("ending loop")
print("completed in {:.2f} sec".format(time.time() - start_time))
print("Creating gif...")
with imageio.get_writer(direc + "info.gif", mode="I") as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
print("Removing raw image files...")
shutil.rmtree(direc + "figure")
if args.KE:
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as f:
KE = np.array(f["tasks"]["KE"])[:, 0]
snap_t = np.array(f["scales"]["sim_time"])
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.plot(snap_t, KE, "k")
ax.set_xlabel(r"time [$\tau_\kappa$]")
ax.set_ylabel("KE")
plt.show()
plt.close()
print("done.")
|
[
"numpy.array",
"imageio.get_writer",
"argparse.ArgumentParser",
"numpy.max",
"os.path.normpath",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.min",
"numpy.meshgrid",
"dedalus.public.Fourier",
"numpy.abs",
"matplotlib.pyplot.savefig",
"h5py.File",
"numpy.isnan",
"imageio.imread",
"numpy.transpose",
"time.time",
"dedalus.public.Chebyshev",
"matplotlib.pyplot.show",
"os.makedirs",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"shutil.rmtree"
] |
[((553, 578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (576, 578), False, 'import argparse\n'), ((1187, 1239), 'dedalus.public.Fourier', 'de.Fourier', (['"""y"""', '(256)'], {'interval': '(0, a)', 'dealias': '(3 / 2)'}), "('y', 256, interval=(0, a), dealias=3 / 2)\n", (1197, 1239), True, 'from dedalus import public as de\n'), ((1244, 1297), 'dedalus.public.Chebyshev', 'de.Chebyshev', (['"""z"""', '(64)'], {'interval': '(0, 1)', 'dealias': '(3 / 2)'}), "('z', 64, interval=(0, 1), dealias=3 / 2)\n", (1256, 1297), True, 'from dedalus import public as de\n'), ((1042, 1070), 'os.path.normpath', 'os.path.normpath', (['args.input'], {}), '(args.input)\n', (1058, 1070), False, 'import os\n'), ((1083, 1136), 'h5py.File', 'h5py.File', (["(direc + 'run_params/run_params_s1.h5')", '"""r"""'], {}), "(direc + 'run_params/run_params_s1.h5', 'r')\n", (1092, 1136), False, 'import h5py\n'), ((2205, 2225), 'numpy.isnan', 'np.isnan', (['avg_t_stop'], {}), '(avg_t_stop)\n', (2213, 2225), True, 'import numpy as np\n'), ((2696, 2722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (2706, 2722), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3333), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(direc + 'fluxes.png')"], {}), "(direc + 'fluxes.png')\n", (3311, 3333), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3346, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3364), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3362, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3513), 'os.makedirs', 'os.makedirs', (["(direc + 'figure')"], {'exist_ok': '(True)'}), "(direc + 'figure', exist_ok=True)\n", (3480, 3513), False, 'import os\n'), ((3951, 3968), 'numpy.meshgrid', 'np.meshgrid', (['y', 'z'], {}), '(y, z)\n', (3962, 3968), True, 'import numpy as np\n'), ((3981, 3990), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (3987, 3990), True, 'import numpy as np\n'), ((4002, 4011), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (4008, 4011), True, 'import numpy as np\n'), ((4023, 4032), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (4029, 4032), True, 'import numpy as np\n'), ((4081, 4092), 'time.time', 'time.time', ([], {}), '()\n', (4090, 4092), False, 'import time\n'), ((7078, 7109), 'shutil.rmtree', 'shutil.rmtree', (["(direc + 'figure')"], {}), "(direc + 'figure')\n", (7091, 7109), False, 'import shutil\n'), ((7295, 7321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (7305, 7321), True, 'import matplotlib.pyplot as plt\n'), ((7452, 7462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7460, 7462), True, 'import matplotlib.pyplot as plt\n'), ((7467, 7478), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7476, 7478), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1180), 'numpy.array', 'np.array', (["f['tasks']['a']"], {}), "(f['tasks']['a'])\n", (1163, 1180), True, 'import numpy as np\n'), ((1534, 1583), 'h5py.File', 'h5py.File', (["(direc + 'analysis/analysis_s1.h5')", '"""r"""'], {}), "(direc + 'analysis/analysis_s1.h5', 'r')\n", (1543, 1583), False, 'import h5py\n'), ((1781, 1817), 'numpy.array', 'np.array', (["file['scales']['sim_time']"], {}), "(file['scales']['sim_time'])\n", (1789, 1817), True, 'import numpy as np\n'), ((2450, 2479), 'numpy.array', 'np.array', (['L_cond_arr[ASI:AEI]'], {}), '(L_cond_arr[ASI:AEI])\n', (2458, 2479), True, 'import numpy as np\n'), ((2515, 2544), 'numpy.array', 'np.array', (['L_conv_arr[ASI:AEI]'], {}), '(L_conv_arr[ASI:AEI])\n', (2523, 2544), True, 'import numpy as np\n'), ((2617, 2641), 'numpy.abs', 'np.abs', (['(1.0 - mean_L_tot)'], {}), '(1.0 - mean_L_tot)\n', (2623, 2641), True, 'import numpy as np\n'), ((3524, 3573), 'h5py.File', 'h5py.File', (["(direc + 'analysis/analysis_s1.h5')", '"""r"""'], {}), "(direc + 'analysis/analysis_s1.h5', 'r')\n", (3533, 3573), False, 'import h5py\n'), ((3641, 3692), 'h5py.File', 'h5py.File', (["(direc + 'snapshots/snapshots_s1.h5')", '"""r"""'], {}), "(direc + 'snapshots/snapshots_s1.h5', 'r')\n", (3650, 3692), False, 'import h5py\n'), ((3714, 3742), 'numpy.array', 'np.array', (["file['tasks']['T']"], {}), "(file['tasks']['T'])\n", (3722, 3742), True, 'import numpy as np\n'), ((3755, 3783), 'numpy.array', 'np.array', (["file['tasks']['v']"], {}), "(file['tasks']['v'])\n", (3763, 3783), True, 'import numpy as np\n'), ((3796, 3824), 'numpy.array', 'np.array', (["file['tasks']['w']"], {}), "(file['tasks']['w'])\n", (3804, 3824), True, 'import numpy as np\n'), ((3842, 3878), 'numpy.array', 'np.array', (["file['scales']['sim_time']"], {}), "(file['scales']['sim_time'])\n", (3850, 3878), True, 'import numpy as np\n'), ((3899, 3936), 'numpy.array', 'np.array', (["file['scales']['iteration']"], {}), "(file['scales']['iteration'])\n", (3907, 3936), True, 'import numpy as np\n'), ((6854, 6902), 'imageio.get_writer', 'imageio.get_writer', (["(direc + 'info.gif')"], {'mode': '"""I"""'}), "(direc + 'info.gif', mode='I')\n", (6872, 6902), False, 'import imageio\n'), ((7132, 7181), 'h5py.File', 'h5py.File', (["(direc + 'analysis/analysis_s1.h5')", '"""r"""'], {}), "(direc + 'analysis/analysis_s1.h5', 'r')\n", (7141, 7181), False, 'import h5py\n'), ((7251, 7284), 'numpy.array', 'np.array', (["f['scales']['sim_time']"], {}), "(f['scales']['sim_time'])\n", (7259, 7284), True, 'import numpy as np\n'), ((1614, 1647), 'numpy.array', 'np.array', (["file['tasks']['L_cond']"], {}), "(file['tasks']['L_cond'])\n", (1622, 1647), True, 'import numpy as np\n'), ((1675, 1708), 'numpy.array', 'np.array', (["file['tasks']['L_conv']"], {}), "(file['tasks']['L_conv'])\n", (1683, 1708), True, 'import numpy as np\n'), ((1728, 1757), 'numpy.array', 'np.array', (["file['tasks']['KE']"], {}), "(file['tasks']['KE'])\n", (1736, 1757), True, 'import numpy as np\n'), ((2160, 2188), 'numpy.abs', 'np.abs', (['(snap_t - avg_t_start)'], {}), '(snap_t - avg_t_start)\n', (2166, 2188), True, 'import numpy as np\n'), ((3596, 3625), 'numpy.array', 'np.array', (["file['tasks']['KE']"], {}), "(file['tasks']['KE'])\n", (3604, 3625), True, 'import numpy as np\n'), ((4208, 4234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (4218, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4252, 4299), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(2)', 'nrows': '(3)', 'figure': 'fig'}), '(ncols=2, nrows=3, figure=fig)\n', (4269, 4299), True, 'import matplotlib.gridspec as gridspec\n'), ((6481, 6499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6497, 6499), True, 'import matplotlib.pyplot as plt\n'), ((6651, 6662), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6660, 6662), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6684), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6682, 6684), True, 'import matplotlib.pyplot as plt\n'), ((6969, 6993), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (6983, 6993), False, 'import imageio\n'), ((7201, 7227), 'numpy.array', 'np.array', (["f['tasks']['KE']"], {}), "(f['tasks']['KE'])\n", (7209, 7227), True, 'import numpy as np\n'), ((2268, 2295), 'numpy.abs', 'np.abs', (['(snap_t - avg_t_stop)'], {}), '(snap_t - avg_t_stop)\n', (2274, 2295), True, 'import numpy as np\n'), ((5111, 5135), 'numpy.transpose', 'np.transpose', (['v[i, :, :]'], {}), '(v[i, :, :])\n', (5123, 5135), True, 'import numpy as np\n'), ((5517, 5541), 'numpy.transpose', 'np.transpose', (['w[i, :, :]'], {}), '(w[i, :, :])\n', (5529, 5541), True, 'import numpy as np\n'), ((5923, 5947), 'numpy.transpose', 'np.transpose', (['T[i, :, :]'], {}), '(T[i, :, :])\n', (5935, 5947), True, 'import numpy as np\n'), ((6788, 6799), 'time.time', 'time.time', ([], {}), '()\n', (6797, 6799), False, 'import time\n'), ((5972, 5992), 'numpy.linspace', 'np.linspace', (['(0)', 'maxT'], {}), '(0, maxT)\n', (5983, 5992), True, 'import numpy as np\n'), ((6451, 6465), 'numpy.max', 'np.max', (['snap_t'], {}), '(snap_t)\n', (6457, 6465), True, 'import numpy as np\n'), ((4559, 4570), 'time.time', 'time.time', ([], {}), '()\n', (4568, 4570), False, 'import time\n'), ((5172, 5181), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (5178, 5181), True, 'import numpy as np\n'), ((5578, 5587), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (5584, 5587), True, 'import numpy as np\n'), ((6407, 6417), 'numpy.max', 'np.max', (['KE'], {}), '(KE)\n', (6413, 6417), True, 'import numpy as np\n')]
|
from plumbum import local
from benchbuild import project
from benchbuild.utils import compiler, download, run, wrapping
from benchbuild.utils.cmd import make, mkdir
@download.with_git("https://github.com/bsc-pm/bots", limit=5)
class BOTSGroup(project.Project):
"""
Barcelona OpenMP Task Suite.
Barcelona OpenMP Task Suite is a collection of applications that allow
to test OpenMP tasking implementations and compare its behaviour under
certain circumstances: task tiedness, throttle and cut-offs mechanisms,
single/multiple task generators, etc.
Alignment: Aligns sequences of proteins.
FFT: Computes a Fast Fourier Transformation.
Floorplan: Computes the optimal placement of cells in a floorplan.
Health: Simulates a country health system.
NQueens: Finds solutions of the N Queens problem.
Sort: Uses a mixture of sorting algorithms to sort a vector.
SparseLU: Computes the LU factorization of a sparse matrix.
Strassen: Computes a matrix multiply with Strassen's method.
"""
DOMAIN = 'bots'
GROUP = 'bots'
VERSION = 'HEAD'
path_dict = {
"alignment": "serial/alignment",
"fft": "serial/fft",
"fib": "serial/fib",
"floorplan": "serial/floorplan",
"health": "serial/health",
"knapsack": "serial/knapsack",
"nqueens": "serial/nqueens",
"sort": "serial/sort",
"sparselu": "serial/sparselu",
"strassen": "serial/strassen",
"uts": "serial/uts"
}
input_dict = {
"alignment": ["prot.100.aa", "prot.20.aa"],
"floorplan": ["input.15", "input.20", "input.5"],
"health": ["large.input", "medium.input", "small.input", "test.input"],
"knapsack": [
"knapsack-012.input", "knapsack-016.input", "knapsack-020.input",
"knapsack-024.input", "knapsack-032.input", "knapsack-036.input",
"knapsack-040.input", "knapsack-044.input", "knapsack-048.input",
"knapsack-064.input", "knapsack-096.input", "knapsack-128.input"
],
"uts": [
"huge.input", "large.input", "medium.input", "small.input",
"test.input", "tiny.input"
]
}
SRC_FILE = "bots.git"
def compile(self):
self.download()
makefile_config = local.path(self.src_file) / "config" / "make.config"
clang = compiler.cc(self)
with open(makefile_config, 'w') as config:
lines = [
"LABEL=benchbuild",
"ENABLE_OMPSS=",
"OMPSSC=",
"OMPC=",
"CC={cc}",
"OMPSSLINK=",
"OMPLINK={cc} -fopenmp",
"CLINK={cc}",
"OPT_FLAGS=",
"CC_FLAGS=",
"OMPC_FLAGS=",
"OMPSSC_FLAGS=",
"OMPC_FINAL_FLAGS=",
"OMPSSC_FINAL_FLAG=",
"CLINK_FLAGS=",
"OMPLINK_FLAGS=",
"OMPSSLINK_FLAGS=",
]
lines = [l.format(cc=clang) + "\n" for l in lines]
config.writelines(lines)
mkdir(local.path(self.src_file) / "bin")
with local.cwd(self.src_file):
run.run(make["-C", self.path_dict[self.name]])
def run_tests(self, runner):
binary_name = "{name}.benchbuild.serial".format(name=self.name)
binary_path = local.path(self.src_file) / "bin" / binary_name
exp = wrapping.wrap(binary_path, self)
if self.name in self.input_dict:
for test_input in self.input_dict[self.name]:
input_file = local.path(
self.src_file) / "inputs" / self.name / test_input
runner(exp["-f", input_file])
else:
runner(exp)
class Alignment(BOTSGroup):
NAME = 'alignment'
class FFT(BOTSGroup):
NAME = 'fft'
class Fib(BOTSGroup):
NAME = 'fib'
class FloorPlan(BOTSGroup):
NAME = 'floorplan'
class Health(BOTSGroup):
NAME = 'health'
class Knapsack(BOTSGroup):
NAME = 'knapsack'
class NQueens(BOTSGroup):
NAME = 'nqueens'
class Sort(BOTSGroup):
NAME = 'sort'
class SparseLU(BOTSGroup):
NAME = 'sparselu'
class Strassen(BOTSGroup):
NAME = 'strassen'
class UTS(BOTSGroup):
NAME = 'uts'
|
[
"benchbuild.utils.compiler.cc",
"benchbuild.utils.wrapping.wrap",
"benchbuild.utils.download.with_git",
"plumbum.local.path",
"plumbum.local.cwd",
"benchbuild.utils.run.run"
] |
[((169, 229), 'benchbuild.utils.download.with_git', 'download.with_git', (['"""https://github.com/bsc-pm/bots"""'], {'limit': '(5)'}), "('https://github.com/bsc-pm/bots', limit=5)\n", (186, 229), False, 'from benchbuild.utils import compiler, download, run, wrapping\n'), ((2385, 2402), 'benchbuild.utils.compiler.cc', 'compiler.cc', (['self'], {}), '(self)\n', (2396, 2402), False, 'from benchbuild.utils import compiler, download, run, wrapping\n'), ((3477, 3509), 'benchbuild.utils.wrapping.wrap', 'wrapping.wrap', (['binary_path', 'self'], {}), '(binary_path, self)\n', (3490, 3509), False, 'from benchbuild.utils import compiler, download, run, wrapping\n'), ((3202, 3226), 'plumbum.local.cwd', 'local.cwd', (['self.src_file'], {}), '(self.src_file)\n', (3211, 3226), False, 'from plumbum import local\n'), ((3240, 3286), 'benchbuild.utils.run.run', 'run.run', (["make['-C', self.path_dict[self.name]]"], {}), "(make['-C', self.path_dict[self.name]])\n", (3247, 3286), False, 'from benchbuild.utils import compiler, download, run, wrapping\n'), ((2316, 2341), 'plumbum.local.path', 'local.path', (['self.src_file'], {}), '(self.src_file)\n', (2326, 2341), False, 'from plumbum import local\n'), ((3154, 3179), 'plumbum.local.path', 'local.path', (['self.src_file'], {}), '(self.src_file)\n', (3164, 3179), False, 'from plumbum import local\n'), ((3415, 3440), 'plumbum.local.path', 'local.path', (['self.src_file'], {}), '(self.src_file)\n', (3425, 3440), False, 'from plumbum import local\n'), ((3639, 3664), 'plumbum.local.path', 'local.path', (['self.src_file'], {}), '(self.src_file)\n', (3649, 3664), False, 'from plumbum import local\n')]
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests covering attention used by the DIN model.
"""
import tensorflow as tf
import unittest
import pytest
import numpy as np
import sys
from pathlib import Path
# Add common module to path
common_path = Path(Path(__file__).absolute().parent.parent.parent)
sys.path.append(str(common_path))
from common.utils import din_attention
from din.din_model import DIN
seed = 3
tf.set_random_seed(seed)
@pytest.mark.category1
@pytest.mark.ipus(1)
class TestDINFCN(unittest.TestCase):
"""Testing att layer"""
@classmethod
def setUpClass(cls):
cls.model_dtype = tf.float32
cls.ATTENTION_SIZE = 1
def test_att_results(self):
# test attention layer output
query_value = np.ones([4, 2], np.float32)
query_value = query_value * 0.8
query_inp = tf.placeholder(shape=[4, 2], dtype='float32')
facts_value = np.ones([4, 8, 2], np.float32)
facts_value = facts_value * 0.5
facts_inp = tf.placeholder(shape=[4, 8, 2], dtype='float32')
mask_value = np.ones([4, 8], np.float32)
mask_value = mask_value * 0.2
mask_inp = tf.placeholder(shape=[4, 8], dtype='float32')
out = din_attention(query_inp, facts_inp, self.ATTENTION_SIZE, mask_inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(out, feed_dict={query_inp: query_value, facts_inp: facts_value, mask_inp: mask_value})
y0 = np.float32(0.5)
y1 = np.float32(0.5)
self.assertAlmostEqual(output[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(output[0, 0, 0], y1, delta = 0.01)
def test_fcn_results(self):
# test fcn results
inputs_value = np.ones([2, 6, 2], np.float32)
inp = tf.placeholder(shape=[2, 6, 2], dtype='float32')
y_hat = DIN.build_fcn_net(self, inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
y = sess.run(y_hat, feed_dict={inp: inputs_value})
y0 = np.float32(0.5225718)
y1 = np.float32(0.47742826)
self.assertAlmostEqual(y[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(y[0, 0, 1], y1, delta = 0.01)
|
[
"pytest.mark.ipus",
"numpy.ones",
"numpy.float32",
"tensorflow.compat.v1.Session",
"pathlib.Path",
"tensorflow.placeholder",
"din.din_model.DIN.build_fcn_net",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"common.utils.din_attention"
] |
[((977, 1001), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (995, 1001), True, 'import tensorflow as tf\n'), ((1028, 1047), 'pytest.mark.ipus', 'pytest.mark.ipus', (['(1)'], {}), '(1)\n', (1044, 1047), False, 'import pytest\n'), ((1319, 1346), 'numpy.ones', 'np.ones', (['[4, 2]', 'np.float32'], {}), '([4, 2], np.float32)\n', (1326, 1346), True, 'import numpy as np\n'), ((1407, 1452), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 2]', 'dtype': '"""float32"""'}), "(shape=[4, 2], dtype='float32')\n", (1421, 1452), True, 'import tensorflow as tf\n'), ((1476, 1506), 'numpy.ones', 'np.ones', (['[4, 8, 2]', 'np.float32'], {}), '([4, 8, 2], np.float32)\n', (1483, 1506), True, 'import numpy as np\n'), ((1567, 1615), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 8, 2]', 'dtype': '"""float32"""'}), "(shape=[4, 8, 2], dtype='float32')\n", (1581, 1615), True, 'import tensorflow as tf\n'), ((1638, 1665), 'numpy.ones', 'np.ones', (['[4, 8]', 'np.float32'], {}), '([4, 8], np.float32)\n', (1645, 1665), True, 'import numpy as np\n'), ((1723, 1768), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 8]', 'dtype': '"""float32"""'}), "(shape=[4, 8], dtype='float32')\n", (1737, 1768), True, 'import tensorflow as tf\n'), ((1784, 1850), 'common.utils.din_attention', 'din_attention', (['query_inp', 'facts_inp', 'self.ATTENTION_SIZE', 'mask_inp'], {}), '(query_inp, facts_inp, self.ATTENTION_SIZE, mask_inp)\n', (1797, 1850), False, 'from common.utils import din_attention\n'), ((2082, 2097), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (2092, 2097), True, 'import numpy as np\n'), ((2111, 2126), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (2121, 2126), True, 'import numpy as np\n'), ((2344, 2374), 'numpy.ones', 'np.ones', (['[2, 6, 2]', 'np.float32'], {}), '([2, 6, 2], np.float32)\n', (2351, 2374), True, 'import numpy as np\n'), ((2389, 2437), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[2, 6, 2]', 'dtype': '"""float32"""'}), "(shape=[2, 6, 2], dtype='float32')\n", (2403, 2437), True, 'import tensorflow as tf\n'), ((2454, 2482), 'din.din_model.DIN.build_fcn_net', 'DIN.build_fcn_net', (['self', 'inp'], {}), '(self, inp)\n', (2471, 2482), False, 'from din.din_model import DIN\n'), ((2660, 2681), 'numpy.float32', 'np.float32', (['(0.5225718)'], {}), '(0.5225718)\n', (2670, 2681), True, 'import numpy as np\n'), ((2695, 2717), 'numpy.float32', 'np.float32', (['(0.47742826)'], {}), '(0.47742826)\n', (2705, 2717), True, 'import numpy as np\n'), ((1864, 1886), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1884, 1886), True, 'import tensorflow as tf\n'), ((2496, 2518), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2516, 2518), True, 'import tensorflow as tf\n'), ((1917, 1950), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1948, 1950), True, 'import tensorflow as tf\n'), ((2549, 2582), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2580, 2582), True, 'import tensorflow as tf\n'), ((816, 830), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (820, 830), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
"""
Deterimines the reflectance based on r and mua.
"""
import math
import helpers.analyticalvalues as av
def reflectance(mua, r):
"""
mua: the absorption coefficient used.
r: the radial distance used.
"""
values = av.analyticalValues(r, mua)
# the value of the reflectance is determined
return (values.z0 * (values.ueff + values.rho1 ** -1) * math.exp( -values.ueff * values.rho1)
/ (values.rho1 ** 2) + (values.z0 + 2 * values.zb) * (values.ueff + values.rho2 ** -1)
* math.exp( -values.ueff * values.rho2) / (values.rho2 ** 2)) / 4 / math.pi
|
[
"math.exp",
"helpers.analyticalvalues.analyticalValues"
] |
[((266, 293), 'helpers.analyticalvalues.analyticalValues', 'av.analyticalValues', (['r', 'mua'], {}), '(r, mua)\n', (285, 293), True, 'import helpers.analyticalvalues as av\n'), ((408, 444), 'math.exp', 'math.exp', (['(-values.ueff * values.rho1)'], {}), '(-values.ueff * values.rho1)\n', (416, 444), False, 'import math\n'), ((561, 597), 'math.exp', 'math.exp', (['(-values.ueff * values.rho2)'], {}), '(-values.ueff * values.rho2)\n', (569, 597), False, 'import math\n')]
|
from contextlib import contextmanager
from functools import partial
from inspect import Parameter
from random import choice, randint, uniform
import string
from typing import Any
from i2 import Sig
from numbers import Number
from sys import platform
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from strand import run_process
from streamlitfront.run_app import run_app
from time import sleep
import dill
import pickle
STREAMLIT_APP_URL = 'http://localhost:8501'
@contextmanager
def dispatch_funcs_with_selenium(funcs, headless=False):
"""
Dispatches the functions in a streamlit application and build a selenium object
representing the root of the DOM for the application.
"""
serialize_funcs = False
try:
pickle.dumps(funcs)
except:
serialize_funcs = True
_funcs = dill.dumps(funcs) if serialize_funcs else funcs
with run_process(func=run_app, func_kwargs={'funcs': _funcs}, is_ready=3) as proc:
options = ChromeOptions()
# options.add_argument('--no-sandbox')
options.add_argument('--window-size=1920,1080')
if headless:
options.add_argument('--headless')
# options.add_argument('--disable-gpu')
# options.add_argument('--allow-running-insecure-content')
dom = Chrome(service=Service(ChromeDriverManager().install()), options=options)
dom.get(STREAMLIT_APP_URL)
try:
yield dom
finally:
dom.close()
def give_a_chance_to_render_element(func):
"""
Gives a chance to the application to render the element by trying up to three times
with 1 second of interval to find it before raising an error.
"""
# @wrap(func)
def wrapper(*args, **kwargs):
def _try_to_find_element(intent_nb):
try:
return func(*args, **kwargs)
except NoSuchElementException:
if intent_nb < 3:
sleep(1)
return _try_to_find_element(intent_nb + 1)
raise
return _try_to_find_element(1)
return wrapper
@give_a_chance_to_render_element
def find_element_by_css_selector(css_selector, root):
return root.find_element(By.CSS_SELECTOR, css_selector)
def select_func(idx, root):
radio_button = find_element_by_css_selector(
f".block-container .stRadio div[role='radiogroup'] label:nth-child({idx + 1})",
root,
)
radio_button.click()
sleep(0.5)
def send_input(input_, idx, root):
def get_input_type():
if isinstance(input_, Number):
return 'number'
if isinstance(input_, str):
return 'text'
input_type = get_input_type()
input_el = find_element_by_css_selector(
f".main .element-container:nth-child({idx + 2}) input[type='{input_type}']",
root,
)
input_el.click()
select_all_first_key = Keys.COMMAND if platform == 'darwin' else Keys.CONTROL
input_el.send_keys(select_all_first_key, 'a')
input_el.send_keys(str(input_))
def compute_output(func, root):
def get_output(previous_output=None, intent_nb=1):
output_el = find_element_by_css_selector(output_css_selector, root)
if output_el.find_elements(By.TAG_NAME, 'code'):
output_el = find_element_by_css_selector('code', output_el)
output = output_el.text
return_annot = Sig(func).return_annotation
if return_annot not in (Parameter.empty, Any):
output = return_annot(output)
if previous_output is not None and output == previous_output and intent_nb < 3:
sleep(1)
return get_output(previous_output, intent_nb + 1)
return output
def get_previous_output():
if root.find_elements(By.CSS_SELECTOR, output_css_selector):
return get_output()
nb_args = len(Sig(func))
output_css_selector = f'.element-container:nth-child({nb_args + 3}) .stMarkdown p'
previous_output = get_previous_output()
submit_button = find_element_by_css_selector(
f'.element-container:nth-child({nb_args + 2}) button', root
)
submit_button.click()
return get_output(previous_output)
|
[
"selenium.webdriver.ChromeOptions",
"i2.Sig",
"webdriver_manager.chrome.ChromeDriverManager",
"pickle.dumps",
"time.sleep",
"strand.run_process",
"dill.dumps"
] |
[((2734, 2744), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (2739, 2744), False, 'from time import sleep\n'), ((1014, 1033), 'pickle.dumps', 'pickle.dumps', (['funcs'], {}), '(funcs)\n', (1026, 1033), False, 'import pickle\n'), ((1090, 1107), 'dill.dumps', 'dill.dumps', (['funcs'], {}), '(funcs)\n', (1100, 1107), False, 'import dill\n'), ((1147, 1215), 'strand.run_process', 'run_process', ([], {'func': 'run_app', 'func_kwargs': "{'funcs': _funcs}", 'is_ready': '(3)'}), "(func=run_app, func_kwargs={'funcs': _funcs}, is_ready=3)\n", (1158, 1215), False, 'from strand import run_process\n'), ((1243, 1258), 'selenium.webdriver.ChromeOptions', 'ChromeOptions', ([], {}), '()\n', (1256, 1258), False, 'from selenium.webdriver import Chrome, ChromeOptions\n'), ((4130, 4139), 'i2.Sig', 'Sig', (['func'], {}), '(func)\n', (4133, 4139), False, 'from i2 import Sig\n'), ((3660, 3669), 'i2.Sig', 'Sig', (['func'], {}), '(func)\n', (3663, 3669), False, 'from i2 import Sig\n'), ((3885, 3893), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (3890, 3893), False, 'from time import sleep\n'), ((2215, 2223), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2220, 2223), False, 'from time import sleep\n'), ((1582, 1603), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (1601, 1603), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')]
|
import requests, json, bs4, urllib.parse, math
from . import Course, Platform
class Edx(Platform):
name = 'edX'
def _urls(self):
res = requests.get(make_url())
count = json.loads(res.text)['objects']['count']
num_pages = math.ceil(count / 20)
urls = [make_url(page=page) for page in range(1, num_pages + 1)]
return urls
def _parse(self, url):
res = requests.get(url)
courses = []
results = res.json()['objects']['results']
for result in results:
title = result['title']
if result['full_description']:
description = html_to_text(result['full_description'])
else:
description = result['short_description']
snippet = ''
if result['short_description'] and result['short_description'] != '.':
snippet = result['short_description']
url = result['marketing_url']
tags = [subject_uuids.get(uuid) for uuid in result['subject_uuids']]
partners = [result.get('org')]
course = Course(title, partners, self.name,
description, tags, url, snippet=snippet)
courses.append(course)
return courses
subject_uuids = {'d8244ef2-45fb-4be3-a9d7-a6749cee3b19': 'Architecture',
'2cc66121-0c07-407b-96c4-99305359a36f': 'Art & Culture',
'9d5b5edb-254a-4d54-b430-776f1f00eaf0': 'Biology & Life Sciences',
'409d43f7-ff36-4834-9c28-252132347d87': 'Business & Management',
'c5ec1f86-4e59-4273-8e22-ceec2b8d10a2': 'Chemistry',
'605bb663-a342-4cf3-b5a5-fee2f33f1642': 'Communication',
'e52e2134-a4e4-4fcb-805f-cbef40812580': 'Computer Science',
'a168a80a-4b6c-4d92-9f1d-4c235206feaf': 'Data Analysis & Statistics',
'34173fb0-fe3d-4715-b4e0-02a9426a873c': 'Design',
'bab458d9-19b3-476e-864f-8abd1d1aab44': 'Economics & Finance',
'8ac7a3da-a60b-4565-b361-384baaa49279': 'Education & Teacher Training',
'337dfb23-571e-49d7-9c8e-385120dea6f3': 'Electronics',
'07406bfc-76c4-46cc-a5bf-2deace7995a6': 'Energy & Earth Sciences',
'0d7bb9ed-4492-419a-bb44-415adafd9406': 'Engineering',
'8aaac548-1930-4614-aeb4-a089dae7ae26': 'Environmental Studies',
'8a552a20-963e-475c-9b0d-4c5efe22d015': 'Ethics',
'caa4db79-f325-41ca-8e09-d5bb6e148240': 'Food & Nutrition',
'51a13a1c-7fc8-42a6-9e96-6636d10056e2': 'Health & Safety',
'c8579e1c-99f2-4a95-988c-3542909f055e': 'Histroy',
'00e5d5e0-ce45-4114-84a1-50a5be706da5': 'Humanities',
'32768203-e738-4627-8b04-78b0ed2b44cb': 'Language',
'4925b67d-01c4-4287-a8d1-a3e0066113b8': 'Law',
'74b6ed2a-3ba0-49be-adc9-53f7256a12e1': 'Literature',
'a669e004-cbc0-4b68-8882-234c12e1cce4': 'Math',
'a5db73b2-05b4-4284-beef-c7876ec1499b': 'Medicine',
'f520dcc1-f5b7-42fe-a757-8acfb1e9e79d': 'Music',
'830f46dc-624e-46f4-9df0-e2bc6b346956': 'Philosophy & Ethics',
'88eb7ca7-2296-457d-8aac-e5f7503a9333': 'Physics',
'f830cfeb-bb7e-46ed-859d-e2a9f136499f': 'Science',
'eefb009b-0a02-49e9-b1b1-249982b6ce86': 'Social Sciences'}
def make_url(page=1):
params = {'selected_facets[]': 'transcript_languages_exact:English',
'partner': 'edx',
'content_type[]': 'courserun',
'page': page,
'page_size': 20}
return 'https://www.edx.org/api/v1/catalog/search?' + urllib.parse.urlencode(params)
def html_to_text(html):
soup = bs4.BeautifulSoup(html, 'lxml')
return soup.text
|
[
"bs4.BeautifulSoup",
"json.loads",
"math.ceil",
"requests.get"
] |
[((3918, 3949), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (3935, 3949), False, 'import requests, json, bs4, urllib.parse, math\n'), ((269, 290), 'math.ceil', 'math.ceil', (['(count / 20)'], {}), '(count / 20)\n', (278, 290), False, 'import requests, json, bs4, urllib.parse, math\n'), ((436, 453), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (448, 453), False, 'import requests, json, bs4, urllib.parse, math\n'), ((208, 228), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (218, 228), False, 'import requests, json, bs4, urllib.parse, math\n')]
|
# Generated by Django 2.2.5 on 2019-09-09 21:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('relations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Datetime on which the object was created.', verbose_name='created at ')),
('modified', models.DateTimeField(auto_now=True, help_text='Datetime on which the object was last modified.', verbose_name='modified at ')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('is_active', models.BooleanField(default=True, help_text='Are you currently actively doing it?', verbose_name='Is active')),
('last_time', models.DateField(blank=True, null=True, verbose_name='Last time done')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
|
[
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((456, 549), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (472, 549), False, 'from django.db import migrations, models\n'), ((576, 703), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'help_text': '"""Datetime on which the object was created."""', 'verbose_name': '"""created at """'}), "(auto_now_add=True, help_text=\n 'Datetime on which the object was created.', verbose_name='created at ')\n", (596, 703), False, 'from django.db import migrations, models\n'), ((730, 865), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'help_text': '"""Datetime on which the object was last modified."""', 'verbose_name': '"""modified at """'}), "(auto_now=True, help_text=\n 'Datetime on which the object was last modified.', verbose_name=\n 'modified at ')\n", (750, 865), False, 'from django.db import migrations, models\n'), ((883, 914), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (899, 914), False, 'from django.db import migrations, models\n'), ((949, 967), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (965, 967), False, 'from django.db import migrations, models\n'), ((1000, 1114), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Are you currently actively doing it?"""', 'verbose_name': '"""Is active"""'}), "(default=True, help_text=\n 'Are you currently actively doing it?', verbose_name='Is active')\n", (1019, 1114), False, 'from django.db import migrations, models\n'), ((1142, 1212), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Last time done"""'}), "(blank=True, null=True, verbose_name='Last time done')\n", (1158, 1212), False, 'from django.db import migrations, models\n'), ((1241, 1337), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1258, 1337), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
import pytest
"""
Test 268. Missing Number
"""
@pytest.fixture(scope="session")
def init_variables_268():
from src.leetcode_268_missing_number import Solution
solution = Solution()
def _init_variables_268():
return solution
yield _init_variables_268
class TestClass268:
def test_solution_0(self, init_variables_268):
assert init_variables_268().missingNumber([3, 0, 1]) == 2
def test_solution_1(self, init_variables_268):
assert init_variables_268().missingNumber([0, 1]) == 2
def test_solution_2(self, init_variables_268):
assert init_variables_268().missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1]) == 8
def test_solution_3(self, init_variables_268):
assert init_variables_268().missingNumber([0]) == 1
|
[
"pytest.fixture",
"src.leetcode_268_missing_number.Solution"
] |
[((74, 105), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (88, 105), False, 'import pytest\n'), ((205, 215), 'src.leetcode_268_missing_number.Solution', 'Solution', ([], {}), '()\n', (213, 215), False, 'from src.leetcode_268_missing_number import Solution\n')]
|
import unittest
import os
import json
from functions.db.connector import *
from functions.db.models import *
from functions.authentication import *
sample_search = {
"search_groups": [
{
"search_terms": ["blockchain", "distributed ledger"],
"match": "OR"
},
{
"search_terms": ["energy", "infrastructure", "smart meter"],
"match": "OR"
}
],
"match": "AND"
}
db_dict = {"db_name": "hallo", "api_key": "test"}
class TestConnector(unittest.TestCase):
def setUp(self):
name = "test_review"
self.review = add_review(name)
self.sample_query = new_query(self.review, sample_search)
with open('test_results.json', 'r') as file:
self.results = json.load(file)
save_results(self.results['records'], self.review, self.sample_query)
def test_add_review(self):
name = "test_review"
new_review = add_review(name)
review = get_review_by_id(new_review._id)
review.delete()
self.assertEqual(review._id, new_review._id)
def test_save_results(self):
query = new_query(self.review, sample_search)
jsonpath = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..", "test_results.json"))
with open(jsonpath, 'r') as file:
results = json.load(file)
save_results(results['records'], self.review, query)
results_from_db = get_persisted_results(query).get('results')
self.assertEqual(len(results_from_db), len(results['records']))
def test_pagination(self):
page1 = get_persisted_results(self.sample_query, 1, 10).get('results')
self.assertTrue(len(page1) == 10)
page2 = get_persisted_results(self.sample_query, 2, 10).get('results')
self.assertTrue(len(page2) == 10)
self.assertNotEqual(page1, page2)
def test_get_list_of_dois_for_review(self):
dois = get_dois_for_review(self.review)
for record in self.results.get('records'):
self.assertTrue(record.get('doi') in dois)
def test_update_score(self):
user = User(name="test user")
doi = self.results.get('records')[0].get('doi')
result = get_result_by_doi(self.review, doi)
self.assertEqual(len(result.scores), 0)
evaluation = {
"user": "testmann",
"score": 2,
"comment": "test_comment"
}
update_score(self.review, result, evaluation)
self.assertEqual(result.scores[0].score, 2)
evaluation = {
"user": "testmann",
"score": 5,
"comment": "joiefjlke"
}
update_score(self.review, result, evaluation)
self.assertEqual(result.scores[0].score, 5)
self.assertEqual(len(result.scores), 1)
user.delete()
def test_delete_results_for_review(self):
num_results = len(get_dois_for_review(self.review))
self.assertGreater(num_results, 0)
delete_results_for_review(self.review)
num_results = len(get_dois_for_review(self.review))
self.assertEquals(num_results, 0)
def tearDown(self):
delete_results_for_review(self.review)
self.review.delete()
class TestUserDB(unittest.TestCase):
# TODO rewrite test cases
def setUp(self):
username = "philosapiens"
name = "Philippe"
surname = "Kalinowski"
email = "<EMAIL>"
password = "<PASSWORD>"
# databases = DatabaseInfo()
# databases.name = "SPRINGER_API"
# databases.api_key = "5150230aac7a227ve33693f99b5697aa"
# self.user = add_user(username, name, surname, email, password)
def test_add_user(self):
username = "philosapfiens"
name = "Philippe"
surname = "Kalinowski"
email = "<EMAIL>"
password = "<PASSWORD>"
db_name = "SPRINGER_API"
api_key = "5150230aac7a227ve33693f99b5697aa"
# databases312 = DatabaseInfo.from_document(sample_databases)
# print(databases312)
new_user = add_user(username, name, surname, email, password)
# update_databases(new_user, db_dict)
# user = get_user_by_id(new_user.name)
def test_get_user_by_username(self):
user = get_user_by_username("philosapiens")
print(user.email)
def test_update_user(self):
user = get_user_by_username("philosapiens")
print(user.email)
update_user(user, user.name, "btesfd", "<EMAIL>", user.password)
user = get_user_by_username("philosapiens")
print(user.email)
def test_get_all_users(self):
print(str(get_users()))
def test_delete_users(self):
user = get_user_by_username("philosapiens")
delete_user(user)
class TestAuth(unittest.TestCase):
def setUp(self):
username = "philosapiens"
name = "Philippe"
surname = "Kalinowski"
email = "<EMAIL>"
password = "<PASSWORD>"
def test_login(self):
username = "philosapiens"
password = "<PASSWORD>"
user = get_user_by_username(username)
password_correct = check_if_password_is_correct(user, password)
print(password_correct)
token = get_jwt_for_user(user)
print(type(token))
add_jwt_to_session(user, token)
is_token_valid = check_for_token(token)
print(is_token_valid)
is_token_in_session = check_if_jwt_is_in_session(token)
print(is_token_in_session)
# remove_jwt_from_session(user)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"json.load",
"os.path.dirname"
] |
[((5653, 5668), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5666, 5668), False, 'import unittest\n'), ((781, 796), 'json.load', 'json.load', (['file'], {}), '(file)\n', (790, 796), False, 'import json\n'), ((1378, 1393), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1387, 1393), False, 'import json\n'), ((1253, 1278), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1268, 1278), False, 'import os\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
import os
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.contrib.mixed_precision.amp_nn as amp_nn
from test_update_loss_scaling_op_npu import TestUpdateLossScalingOpBad
paddle.enable_static()
SEED = 2021
class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad):
def setUp(self):
self.set_npu()
self.op_type = "update_loss_scaling"
self.place = paddle.NPUPlace(0)
self.init()
fluid.core.globals()['FLAGS_min_loss_scaling'] = 1639
found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
x[i[0]][j[0]] = np.inf
self.inputs = {
'X': [('x0', x)],
'FoundInfinite': found_inf,
'PrevLossScaling': self.prev_loss_scaling,
'InGoodSteps': self.num_good_steps,
'InBadSteps': self.num_bad_steps
}
self.outputs = {
'Out': [('out0', np.zeros_like(x))],
'LossScaling': np.array([1639.0]).astype(self.dtype),
'OutGoodSteps': self.zero_steps,
'OutBadSteps': self.zero_steps
}
def init(self):
self.incr_ratio = 2.0
self.decr_ratio = 0.8
self.dtype = np.float32
self.prev_loss_scaling = np.array([2048]).astype(self.dtype)
self.num_good_steps = np.array([999], dtype=np.int32)
self.num_bad_steps = np.array([1], dtype=np.int32)
self.zero_steps = np.array([0], dtype=np.int32)
self.attrs = {
'incr_every_n_steps': 1000,
'decr_every_n_nan_or_inf': 2,
'incr_ratio': self.incr_ratio,
'decr_ratio': self.decr_ratio,
}
if __name__ == '__main__':
unittest.main()
|
[
"numpy.random.random",
"numpy.zeros_like",
"paddle.enable_static",
"numpy.array",
"numpy.random.randint",
"paddle.fluid.core.globals",
"unittest.main",
"paddle.NPUPlace",
"sys.path.append"
] |
[((670, 691), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (685, 691), False, 'import sys\n'), ((895, 917), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (915, 917), False, 'import paddle\n'), ((2536, 2551), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2549, 2551), False, 'import unittest\n'), ((1119, 1137), 'paddle.NPUPlace', 'paddle.NPUPlace', (['(0)'], {}), '(0)\n', (1134, 1137), False, 'import paddle\n'), ((1241, 1273), 'numpy.array', 'np.array', (['[True]'], {'dtype': 'np.bool_'}), '([True], dtype=np.bool_)\n', (1249, 1273), True, 'import numpy as np\n'), ((1348, 1377), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1024)', '(1)'], {}), '(0, 1024, 1)\n', (1365, 1377), True, 'import numpy as np\n'), ((1390, 1419), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1024)', '(1)'], {}), '(0, 1024, 1)\n', (1407, 1419), True, 'import numpy as np\n'), ((2155, 2186), 'numpy.array', 'np.array', (['[999]'], {'dtype': 'np.int32'}), '([999], dtype=np.int32)\n', (2163, 2186), True, 'import numpy as np\n'), ((2216, 2245), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (2224, 2245), True, 'import numpy as np\n'), ((2272, 2301), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (2280, 2301), True, 'import numpy as np\n'), ((1167, 1187), 'paddle.fluid.core.globals', 'fluid.core.globals', ([], {}), '()\n', (1185, 1187), True, 'import paddle.fluid as fluid\n'), ((1286, 1316), 'numpy.random.random', 'np.random.random', (['(1024, 1024)'], {}), '((1024, 1024))\n', (1302, 1316), True, 'import numpy as np\n'), ((2089, 2105), 'numpy.array', 'np.array', (['[2048]'], {}), '([2048])\n', (2097, 2105), True, 'import numpy as np\n'), ((1759, 1775), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1772, 1775), True, 'import numpy as np\n'), ((1806, 1824), 'numpy.array', 'np.array', (['[1639.0]'], {}), '([1639.0])\n', (1814, 1824), True, 'import numpy as np\n')]
|
import aspose.email as ae
import datetime
def run():
# The path to the File directory.
dataDir = "Data/"
#ExStart: SetEmailHeaders
# Create an instance of MailMessage class
eml = ae.MailMessage()
# Specify ReplyTo, From, To field, Cc and Bcc Addresses
eml.reply_to_list.Add("<EMAIL>")
eml.from_address = "<EMAIL>"
eml.to.append(ae.MailAddress("<EMAIL>", "Recipient 1"))
eml.to.append(ae.MailAddress("<EMAIL>", "Recipient 2"))
eml.cc.append(ae.MailAddress("<EMAIL>", "Recipient 3"))
eml.bcc.append(ae.MailAddress("<EMAIL>", "Recipient 4"))
# Specify Date, Message subject, XMailer, Secret Header, Save message to disc
eml.subject = "test mail"
eml.date = datetime.datetime(2006, 3, 6, 12, 00)
eml.xmailer = "Aspose.Email"
eml.headers.Add("secret-header", "mystery")
eml.save(dataDir + "SetEmailHeaders_out.msg", ae.SaveOptions.default_msg)
#ExEnd: SetEmailHeaders
if __name__ == '__main__':
run()
|
[
"datetime.datetime",
"aspose.email.MailAddress",
"aspose.email.MailMessage"
] |
[((201, 217), 'aspose.email.MailMessage', 'ae.MailMessage', ([], {}), '()\n', (215, 217), True, 'import aspose.email as ae\n'), ((722, 758), 'datetime.datetime', 'datetime.datetime', (['(2006)', '(3)', '(6)', '(12)', '(0)'], {}), '(2006, 3, 6, 12, 0)\n', (739, 758), False, 'import datetime\n'), ((371, 411), 'aspose.email.MailAddress', 'ae.MailAddress', (['"""<EMAIL>"""', '"""Recipient 1"""'], {}), "('<EMAIL>', 'Recipient 1')\n", (385, 411), True, 'import aspose.email as ae\n'), ((431, 471), 'aspose.email.MailAddress', 'ae.MailAddress', (['"""<EMAIL>"""', '"""Recipient 2"""'], {}), "('<EMAIL>', 'Recipient 2')\n", (445, 471), True, 'import aspose.email as ae\n'), ((491, 531), 'aspose.email.MailAddress', 'ae.MailAddress', (['"""<EMAIL>"""', '"""Recipient 3"""'], {}), "('<EMAIL>', 'Recipient 3')\n", (505, 531), True, 'import aspose.email as ae\n'), ((552, 592), 'aspose.email.MailAddress', 'ae.MailAddress', (['"""<EMAIL>"""', '"""Recipient 4"""'], {}), "('<EMAIL>', 'Recipient 4')\n", (566, 592), True, 'import aspose.email as ae\n')]
|
import math
import numpy as np
import pandas as pd
class PenmanMonteithDaily(object):
r"""The class *PenmanMonteithDaily* calculates daily potential evapotranspiration according to the Penman-Monteith
method as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ (Allen et al.,
1998). Reference evapotranspiration for a hypothetical grass reference crop (:math:`h=12` *cm*;
:math:`albedo=0.23`, and :math:`LAI=2.88`) is calculated by default. Wind and humidity observations at 2 meters
height as well as soil heat flux density :math:`G=0.0` *MJ/m²day* are also assumed by default.
Default values can be changed in the keyword arguments (`**kwargs`) described below.
The class *PenmanMonteithDaily* solves equation 3 in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_:
.. math::
ET = \frac{\Delta (R_n - G) + \rho_a c_p \frac{e_s - e_a}{r_a}}
{\lambda \left[ \Delta + \gamma \left( 1 + \frac{r_s}{r_a} \right) \right]}
\tag{eq. 3, p. 19}
:param elevation: elevation above sea level (*z*) *[m]*. Used in :meth:`clear_sky_shortwave_radiation` and
:meth:`atmospheric_pressure`
:type elevation: float
:param latitude: latitude (:math:`\varphi`) *[decimal degrees]*. Used in :meth:`sunset_hour_angle` and
:meth:`extraterrestrial_radiation`
:type latitude: float
:Keyword Arguments:
* **albedo** (*float*) - albedo or canopy reflection coefficient (:math:`\alpha`) *[-]*.
Range: :math:`0.0 \leq \alpha \leq 1.0`. Default :math:`albedo=0.23` for the hypothetical grass
reference crop. Used in :meth:`net_shortwave_radiation`
* **h** (*float*) - crop height (*h*) *[m]*. Default :math:`h=0.12` for the hypothetical grass reference
crop. Required to calculate the zero plane displacement height (:math:`d`) *[m]* and the roughness length
governing momentum (:math:`z_{om}`) *[m]*, both necessary for the aerodynamic resistance (:math:`r_a`) *[s/m]*.
See :meth:`aerodynamic_resistance_factor`
* **lai** (*float*) - leaf area index (:math:`LAI`) *[-]*. Default :math:`lai=2.88` for the hypothetical
grass reference crop. See *BOX 5* in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ and
:meth:`bulk_surface_resistance`
* **rl** (*float*) - bulk stomatal resistance of well-illuminated leaf (:math:`r_l`) *[s/m]*. Default
:math:`rl=100.0` for any crop. See :meth:`bulk_surface_resistance`
* **zm** (*float*) - height of wind measurements (:math:`z_m`) *[m]*. Default :math:`zm=2.0`. Required to
calculate aerodynamic resistance (:math:`r_a`) *[s/m]*. See :meth:`aerodynamic_resistance_factor`
* **zh** (*float*) - height of humidity measurements (:math:`z_h`) *[m]*. Default :math:`zh=2.0`. Required to
calculate aerodynamic resistance (:math:`r_a`) *[s/m]*. See :meth:`aerodynamic_resistance_factor`
* **g** (*float*) - soil heat flux density (:math:`G`) *[MJ/m²day]*. Default :math:`g=0.0`. This
corresponds to :math:`G` in eq. 3, p. 19 above. It can be also given with daily parameters in :meth:`et0`
.. note::
Only :attr:`elevation` and :attr:`latitude` are mandatory parameters of :meth:`PenmanMonteithDaily()`.
:attr:`albedo`, :attr:`h`, and :attr:`lai` are only necessary when calculating evapotranspiration for crops
other than reference grass.
:ivar doy: day of year *[-]*
:ivar z: elevation in meters above sea level (*z*) *[m]*
:ivar p: atmospheric pressure (*P*) *[kPa]*
:ivar u2: wind speed at height :math:`z` (:math:`u_2`) *[m/s]*
:ivar ld: latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*. See :meth:`latent_heat_of_vaporization()`
:ivar s: slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]*.
See :meth:`slope_of_saturation_vapour_pressure_curve()`
:ivar psych: psychrometric constant (:math:`\gamma`) *[kPa/°C]*. See :meth:`psychrometric_constant()`
:ivar mn: daylight hours (:math:`N`) *[hours]*. See :meth:`daylight_hours()`
:ivar es: saturation vapour pressure (:math:`e_s`) *[kPa]*. See :meth:`saturation_vapour_pressure()`
:ivar ea: actual vapour pressure (:math:`e_a`) *[kPa]*. See :meth:`actual_vapour_pressure()`
:ivar ra: daily extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation()`
:ivar rs: daily shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation()`
:ivar rs0: clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*.
See :meth:`clear_sky_shortwave_radiation()`
:ivar rns: net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]*. See :meth:`net_shortwave_radiation()`
:ivar rnl: net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]*. See :meth:`net_longwave_radiation()`
:ivar rn: net radiation (:math:`R_{n}`) *[MJ/m²day]*. :math:`R_{n} = R_{ns} - R_{nl}`
:ivar etr: radiation component of reference evapotranspiration *[mm/day]*
:ivar etw: wind component of reference evapotranspiration *[mm/day]*
:ivar et: reference evapotranspiration *[mm/day]*
Object Constants:
* **e** - ratio molecular weight of water vapour/dry air (:math:`\varepsilon`) *[-]*.
:math:`e = 0.622`
* **r** - specific gas constant *[kJ/kg.K]*. :math:`r = 0.287`
* **k** - von Karman constant (:math:`k`) *[-]*, see
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ eq. 4.
:math:`k=0.41`
Object crop specific factors:
* **d_factor** - factor of the zero plane displacement height (:math:`d`) *[-]*. :math:`d\_factor = 2.0 / 3.0`
* **zom_factor** - factor of the roughness length governing momentum transfer (:math:`z_{om}`) *[-]*.
:math:`zom\_factor = 0.123`
* **zoh_factor** - factor of the roughness length governing transfer of heat and vapour (:math:`z_{oh}`) *[-]*.
:math:`zoh\_factor = 0.1`
* **lai_active_factor** - factor of the active (sunlit) leaf area index (:math:`LAI_{active}`) *[-]* (it
considers that generally only the upper half of dense clipped grass is actively contributing to the surface
heat and vapour transfer). :math:`lai\_active\_factor = 0.5`
Calculation with :meth:`et0`::
- pm = PenmanMonteithDaily(elevation, latitude, ...)
- et0 = pm.et0(...)
Calculation with :meth:`et0_frame` given a *pandas.DataFrame()* as input parameter::
- pm = PenmanMonteithDaily(elevation, latitude, ...)
- df = pm.et0_frame(df, ...)
"""
def __init__(self, elevation, latitude, **kwargs):
self.albedo = kwargs.get('albedo', 0.23) # albedo
self.h = kwargs.get('h', 0.12) # crop height h [m]
self.zm = kwargs.get('zm', 2.0) # height of wind measurements [m]
self.zh = kwargs.get('zh', 2.0) # roughness length governing transfer of heat and vapour [m]
self.lai = kwargs.get('lai', 2.88) # LAI dependence
self.rl = kwargs.get('rl', 100.0) # The stomatal resistance
self.g_default = kwargs.get('g', 0.0) # soil heat flux density [MJ/m²day]
self.doy = None
self.u2 = None
self.ld = None
self.s = None
self.pc = None
self.mn = None
self.es = None
self.ea = None
self.ra = None
self.rs = None
self.rs0 = None
self.rns = None
self.rnl = None
self.rn = None
self.etr = None
self.etw = None
self.et = None
self.e = 0.622
self.r = 0.287
self.k = 0.41
self.d_factor = 2.0 / 3.0
self.zom_factor = 0.123
self.zoh_factor = 0.1
self.lai_active_factor = 0.5
if latitude:
days = np.array(range(367))
latitude = float(np.radians(latitude))
dr_366 = self.inverse_relative_distance_earth_sun(days)
sd_366 = np.array([self.solar_declination(day) for day in range(367)])
ws_366 = np.array([self.sunset_hour_angle(latitude, s) for s in sd_366])
self.daylight_hours_366 = np.array([PenmanMonteithDaily.daylight_hours(w) for w in ws_366])
self.ra_366 = np.array([self.extraterrestrial_radiation(
dr_366[i], ws_366[i], latitude, sd_366[i]) for i in range(len(dr_366))])
self.rs0_366 = np.array([self.clear_sky_shortwave_radiation(
ra, elevation=elevation) for ra in self.ra_366])
else:
self.daylight_hours_366 = None
self.ra_366 = None
self.rs0_366 = None
self.z = elevation
self.p = PenmanMonteithDaily.atmospheric_pressure(self.z)
ra_factor = self.aerodynamic_resistance_factor()
self.f1 = 86400 * self.e / (1.01 * self.r * ra_factor)
"""f1 = (specific heat at constant pressure) * (mean air density at constant pressure) /
(1.01 * :attr:`r` * :meth:`aerodynamic_resistance_factor`).
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ Box 6
"""
self.f2 = self.bulk_surface_resistance() / ra_factor
r""":math:`f_1 = \frac{rs}{f_{ra}}` with :math:`f_{ra}` = :meth:`aerodynamic_resistance_factor`"""
def reset(self):
r"""Reset the following output attributes before calculating :math:`ETo`: :math:`doy`, :math:`u2`,
:math:`ld`, :math:`s`, :math:`pc`, :math:`mn`, :math:`es`, :math:`ea`, :math:`ra`,
:math:`rs`, :math:`rs0`, :math:`rns`, :math:`rnl`, :math:`rn`, :math:`etr`, :math:`etw`, and :math:`et`
"""
self.doy = None
self.u2 = None
self.ld = None
self.s = None
self.pc = None
self.mn = None
self.es = None
self.ea = None
self.ra = None
self.rs = None
self.rs0 = None
self.rns = None
self.rnl = None
self.rn = None
self.etr = None
self.etw = None
self.et = None
@staticmethod
def atmospheric_pressure(z):
r""" Return the atmospheric pressure (:math:`P`) *[kPa]* as a function of the elevation above sea level as
defined in `FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 7, p. 31):
.. math::
P = 101.3\left(\frac{293-0.0065z}{293}\right)^{5.26}
The atmospheric pressure (:math:`P`) is the pressure exerted by the weight of the earth's atmosphere.
Evaporation at high altitudes is promoted due to low atmospheric pressure as expressed in the psychrometric
constant. The effect is, however, small and in the calculation procedures, the average value for a location
is sufficient. A simplification of the ideal gas law, assuming :math:`20` *°C* for a standard atmosphere,
can be employed to calculate :math:`P`
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_).
:param z: elevation above sea level *[m]*
:type z: float or np.array
:return: (*float or np.array*) atmospheric pressure (:math:`P`) *[kPa]*
"""
return 101.3 * ((293.0 - 0.0065 * z) / 293.0) ** 5.26
@staticmethod
def latent_heat_of_vaporization(temperature=20):
r"""Return the latent heat of vaporization (:math:`\lambda`) *[MJ/kg]* as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(Annex 3, eq. 3-1, p. 223):
.. math::
\lambda = 2.501-(2.361 * 10^{-3})T
:param temperature: air temperature (:math:`T`) *[°C]*. Default :math:`temperature=20`
:type temperature: float or np.array
:return: (*float or np.array*) latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*.
Default :math:`\lambda=2.45378`
"""
return 2.501 - 2.361e-3 * temperature
@staticmethod
def psychrometric_constant(p, **kwargs):
r"""Return the psychrometric constant (:math:`\gamma`) *[kPa/°C]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
eq. 8, p. 32:
.. math::
\gamma = \frac{c_p P}{\varepsilon \lambda}
or, using default values:
.. math::
\gamma = a_{psy} \cdot P
:param p: atmospheric pressure (:math:`P`) *[kPa]*
:type p: float or np.array
:Keyword Arguments:
* **lamda** (*float*) - latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*. Default :math:`lamda=2.45`.
See Used in :meth:`latent_heat_of_vaporization`
* **cp** (*float*) - specific heat at constant pressure (:math:`c_p`) *[MJ/kg]*. Default
:math:`cp=1.013e^{-3}`
* **epsilon** (*float*) - ratio molecular weight of water vapour/dry air (:math:`\epsilon`) *[-]*.
Default :math:`epsilon=0.622`
* **a_psy** (*float*) - coefficient depending on the type of the ventilation of the bulb *[1/°C]*. Examples:
* :math:`a_{psy} = 0.000665` (default)
* :math:`a_{psy} = 0.000662` for ventilated (Asmann type) psychrometers, with an air movement of some 5
*m/s*
* :math:`a_{psy} = 0.000800` for natural ventilated psychrometers (about 1 *m/s*)
* :math:`a_{psy} = 0.001200` for non-ventilated psychrometers installed indoors
The method uses :math:`a_{psy}` if given, otherwise eq. 8 (see above) with given or default values. Default
values correspond to :math:`a_{psy} = 0.000665` as argument.
:return: (*float or np.array*) psychrometric constant (:math:`\gamma`) *[kPa/°C]*
"""
if 'a_psy' in kwargs:
return kwargs.get('a_psy', 0.000665) * p
else:
return (kwargs.get('cp', 1.013e-3) * p) / (kwargs.get('epsilon', 0.622) * kwargs.get('lamda', 2.45))
@staticmethod
def saturation_vapour_pressure(*temperature):
r"""Return the saturation vapour pressure (:math:`e_s`) *[kPa]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 11, p. 36):
.. math::
e^{°}(T) = 0.6108 exp \left[\frac{17.27 T}{T + 237.3}\right]
:param temperature: air temperature (:math:`T`) *[°C]*
:type temperature: float or np.array
:return: (*float or np.array*) saturation vapour pressure (:math:`e_s`) *[kPa]*
"""
t = np.array([0.6108 * np.exp((17.27 * t) / (t + 237.3)) for t in temperature])
t = np.mean(t, axis=0)
return t
@staticmethod
def slope_of_saturation_vapour_pressure_curve(*temperature):
r"""Return the slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 13, p. 37):
.. math::
\Delta = 4098\left[\frac{0.6108exp\left(\frac{17.27 T}{T + 237.3}\right)}{(T + 237.3)^{2}}\right]
:param temperature: air temperature (:math:`T`) *[°C]*
:type temperature: float or np.array
:return: (*float or np.array*) slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]*
"""
sl = np.array([(4098.0 * PenmanMonteithDaily.saturation_vapour_pressure(t)) / ((t + 237.3) ** 2)
for t in temperature])
return np.mean(sl, axis=0)
@staticmethod
def actual_vapour_pressure(**kwargs):
"""Return the actual vapour pressure (:math:`e_a`) *[kPa]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(p. 37 , 38 , and 39):
:Keyword Arguments:
* **rh_min** (*float*) - 0.0 to 100.0 *[%]*
* **rh_max** (*float*) - 0.0 to 100.0 *[%]*
* **es_min** (*float*) - saturation vapour pressure for :math:`t\_min` *[kPa]*
* **es_max** (*float*) - saturation vapour pressure for :math:`t\_max` *[kPa]*
* **t_min** (*float*) - minimum air temperature *[°C]*
* **t_max** (*float*) - maximum air temperature *[°C]*
* **t_dew** (*float*) - dew point temperature *[°C]*
* **t_wet** (*float*) - wet bulb temperature *[°C]*
* **t_dry** (*float*) - dry bulb temperature *[°C]*
* **apsy** (*float*) - coefficient depending on the type of ventilation of the wet bulb *[-]*
:return: (*float or np.array*) actual vapour pressure (:math:`e_a`) *[kPa]*
"""
try:
rh_min = kwargs['rh_min'] / 100.0
rh_max = kwargs['rh_max'] / 100.0
if 'es_min' in kwargs and 'es_max' in kwargs:
es_min = kwargs['es_min']
es_max = kwargs['es_max']
else:
es_min = PenmanMonteithDaily.saturation_vapour_pressure(kwargs['t_min'])
es_max = PenmanMonteithDaily.saturation_vapour_pressure(kwargs['t_max'])
return (rh_max * es_min + rh_min * es_max) / 2.0
except KeyError:
t_dew = kwargs.get('t_dew', None)
return 0.6108 * math.exp((17.27 * t_dew) / (t_dew + 237.3))
def aerodynamic_resistance_factor(self):
r"""Return the aerodynamic resistance (:math:`r_a`) *[s/m]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 4, p. 20):
.. math::
r_a = \frac{ \ln \left( \frac{z_m - d}{z_{om}} \right) \ln \left( \frac{z_h - d}{z_{oh}} \right) }
{ k^2 u_z }
where (see :meth:`PenmanMonteithDaily()`):
:math:`u_z` --- the wind speed *[m/s]* at height :math:`z` (see :meth:`et0()`)
:math:`k` --- von Karman's constant *[-]*
:math:`zm` --- height of wind measurements *[m]*
:math:`zh` --- height of air humidity measurements *[m]*
The aerodynamic resistance factor :math:`f_{r_a}` is constant for a given crop:
.. math::
f_{r_a} = \frac{ \ln \left( \frac{z_m - d}{z_{om}} \right) \ln \left( \frac{z_h - d}{z_{oh}} \right) }
{ k^2}
with the zero plane displacement height (:math:`d`):
.. math::
d = f_d \cdot h
and roughness length governing momentum transfer (:math:`z_{om}`):
.. math::
z_{om} = f_{zom} \cdot h
where:
:math:`f_d` --- defined in :attr:`d_factor`
:math:`f_{zom}` --- defined in in :attr:`zom_factor`
:return: (*float*) aerodynamic resistance factor :math:`f_{r_a}`
"""
# zero plane displacement height, d [m]
d = self.d_factor * self.h
# roughness length governing momentum transfer [m]
zom = self.zom_factor * self.h
# roughness length governing transfer of heat and vapour [m]
zoh = self.zoh_factor * zom
return math.log((self.zm - d) / zom) * math.log((self.zh - d) / zoh) / (self.k ** 2)
def bulk_surface_resistance(self):
r"""Return (bulk) surface resistance (:math:`r_s`) *[s/m]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 5, p. 21):
.. math::
r_s = \frac{ r_l } { LAI_{active} }
where:
:math:`r_l` --- the bulk stomatal resistance of the well-illuminated leaf *[s/m]*
:math:`LAI_{active}` --- the active (sunlit) leaf area index *[m² (leaf area) / m² (soil surface)]*
A general equation for :math:`LAI_{active}` is:
.. math::
LAI_{active} = 0.5 LAI
with:
.. math::
LAI = 24 h
where :math:`h` is an optional input parameter in :class:`PenmanMonteithDaily`.
:return: (*float*) (bulk) surface resistance :math:`r_s` *[s/m]*
"""
#
# active (sunlit) leaf area index [m^2 (leaf area) / m^2 (soil surface)]
lai_active = self.lai_active_factor * self.lai
rs = self.rl / lai_active
return rs
@staticmethod
def to_u2(uz, z):
r""" Return the calculated wind speed at 2 meters above ground surface (:math:`u_2`) *[m/s]*:
.. math::
u_2 = \frac{ 4.87 u_z}{ \ln{(67.8 z - 5.42)}}
:param uz: measured wind speed at :math:`z` meters above ground surface *[m/s]*
:type uz: float or np.array
:param z: height of measurement above ground surface *[m]*
:type z: float
:return: (*float or np.array*) wind speed at 2 meters above ground surface *[m/s]*
"""
return uz * 4.87 / np.log(67.8 * z - 5.42)
@staticmethod
def extraterrestrial_radiation(dr, ws, lat, sd):
r"""Return the extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 21, p. 46):
.. math::
R_a = \frac{24(60)}{\pi} G_{sc} d_r [ \omega_s \sin(\varphi) \sin(\delta) + \cos(\varphi) \cos(\delta)
\sin(\omega_s)]
:param dr: inverse relative distance Earth-Sun (:math:`d_r`) *[-]*.
See :meth:`inverse_relative_distance_earth_sun`
:type dr: float
:param ws: sunset hour angle (:math:`\omega_s`) *[rad]*. See :meth:`sunset_hour_angle`
:type ws: float
:param lat: latitude (:math:`\varphi`) *[rad]*
:type lat: float
:param sd: solar declination (:math:`\delta`) *[rad]*. See :meth:`solar_declination`
:type sd: float
:return: *(float or np.array)* daily extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*
"""
# solar_constant = 0.0820 # MJ.m-2.min-1
# (24.0 * 60.0 / pi) * solar_constant = 37.586031360582005
return 37.586031360582005 * dr * (ws * np.sin(lat) * np.sin(sd) + np.cos(lat) * np.cos(sd) * np.sin(ws))
@staticmethod
def inverse_relative_distance_earth_sun(day):
r"""Return the inverse relative distance Earth-Sun (:math:`d_r`) *[-]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 23, p. 46):
.. math::
d_r = 1 + 0.033 \cos{ \left( \frac{2 \pi}{365} J \right)}
:param day: day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`
:type day: int or np.array
:return: *(float or np.array)* inverse relative distance Earth-Sun (:math:`d_r`) *[-]*
"""
# 2.0 * pi / 365 = 0.01721420632103996
return 1 + 0.033 * np.cos(0.01721420632103996 * day)
@staticmethod
def solar_declination(day):
r"""Return the solar declination (:math:`\delta`) *[rad]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 24, p. 46):
.. math::
\delta = 0.409 \sin{ \left( \frac{2 \pi}{365} J - 1.39\right)}
:param day: day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`
:type day: int
:return: (*float or np.array*) solar declination (:math:`\delta`) *[rad]*
"""
# 2.0 * pi / 365 = 0.01721420632103996
return 0.409 * np.sin(0.01721420632103996 * day - 1.39)
@staticmethod
def sunset_hour_angle(lat, sd):
r"""Return the sunset hour angle (:math:`\omega_s`) *[rad]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 25, p. 46):
.. math::
\omega_s = \arccos{ \left[-tan(\varphi)tan(\delta)\right]}
:param lat: latitude (:math:`\varphi`) *[rad]*
:type lat: float or np.array
:param sd: solar declination (:math:`\delta`) *[rad]*. See :meth:`solar_declination`
:type sd: float or np.array
:return: (*float or np.array*) sunset hour angle (:math:`\omega_s`) *[rad]*
"""
return np.arccos(-np.tan(sd) * np.tan(lat))
@staticmethod
def daylight_hours(ws):
r"""Return the daylight hours (:math:`N`) *[hour]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 34, p. 49):
.. math::
N = \frac{24}{\pi} \omega_s
:param ws: sunset hour angle (:math:`\omega_s`) *[rad]*. See :meth:`sunset_hour_angle`
:type ws: float or np.numpy
:return: (*float or np.numpy*) daylight hours (:math:`N`) *[hour]*
"""
# 24.0 / pi = 7.639437268410976
return 7.639437268410976 * ws
@staticmethod
def clear_sky_shortwave_radiation(ra, elevation=0.0, a_s=0.25, b_s=0.50):
r"""Return the clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*. It is required for computing
:meth:`net_longwave_radiation`.
For near sea level or when calibrated values for :math:`a_s` and :math:`b_s` are available
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_, eq. 36,
p. 51):
.. math::
R_{so} = (a_s + b_s ) R_a
When calibrated values for :math:`a_s` and :math:`b_s` are not available
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_,
eq. 37, p. 51):
.. math::
R_{so} = (0.75 + 2 * 10^{−5} z) R_a
where :math:`z` is the station elevation above sea level *[m]*.
:param ra: extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation`
:type ra: float or np.numpy
:param elevation: meters above sea level see (:math:`z`) [m]. See :attr:`elevation`
:type elevation: float or np.numpy
:param a_s: regression constant (:math:`a_s`) *[-]*. Default :math:`a_s=0.25`. It expresses the fraction of
extraterrestrial radiation reaching the earth on overcast days (:math:`n = 0`)
:type a_s: float or np.numpy
:param b_s: regression constant (:math:`b_s`) *[-]*. Default :math:`b_s=0.50`. The expression
:math:`a_s+b_s` indicates the fraction of extraterrestrial radiation reaching the earth on clear days
(:math:`n = N`)
:type b_s: float or np.numpy
:return: (*float or np.numpy*) daily clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*
"""
rs0 = ((a_s + b_s) + 2e-5 * elevation) * ra
return rs0
@staticmethod
def shortwave_radiation(ra, n, mn, a_s=0.25, b_s=0.50):
r"""Return the daily shortwave radiation (:math:`R_s`) *[MJ/m²day]* according to the Angstrom formula as
described in `FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 35, p. 50):
.. math::
R_s = \left( a_s + b_s \frac{n}{N} \right) R_a
Depending on atmospheric conditions (humidity, dust) and solar declination (latitude and month), the Angstrom
values :math:`a_s` and :math:`b_s` will vary. Where no actual solar radiation data are available and no
calibration has been carried out for improved :math:`a_s` and :math:`b_s` parameters, the values
:math:`a_s = 0.25` and :math:`b_s = 0.50` are recommended.
:param ra: extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation`
:type ra: float or np.array
:param n: actual duration of sunshine or cloudless hours (:math:`n`) *[hour]*
:type n: float or np.array
:param mn: maximum possible duration of sunshine or daylight hours (:math:`N`) *[hour]*
See :meth:`daylight_hours`
:type mn: float, np.array
:param a_s: regression constant (:math:`as`) *[-]*. Default :math:`a_s=0.25`. It expresses the fraction
of extraterrestrial radiation reaching the earth on overcast days (:math:`n = 0`)
:type a_s: float or np.numpy
:param b_s: regression constant (:math:`bs`) *[-]*. Default :math:`b_s=0.50`. The expression
:math:`a_s+b_s` indicates the fraction of extraterrestrial radiation reaching the earth on clear days
(:math:`n = N`)
:type b_s: float or np.numpy
:return: (*float, np.array*) daily total shortwave radiation (:math:`R_s`) *[MJ/m²day]* reaching the earth
.. note::
If shortwave radiation (i.e., solar radiation) measurements are available, :meth:`shortwave_radiation`
function is no needed. Measurements of shortwave radiation may be directly used as input data in
:meth:`et0`.
"""
rns = (a_s + b_s * n / mn) * ra
return rns
@staticmethod
def net_shortwave_radiation(rs, albedo):
r"""The net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]* resulting from the balance between incoming
and reflected solar radiation as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 38, p. 51):
.. math::
R_{ns} = (1 − \alpha) R_s
:param rs: daily shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation`
:type rs: float or np.array
:param albedo: albedo or reflection coefficient (:math:`\alpha` *[-]*). Range:
:math:`0.0 \leq \alpha \leq 1.0` (:math:`\alpha=0.23` for the hypothetical grass reference crop).
See :class:`PenmanMonteithDaily` and :meth:`et0`
:type albedo: float or np.array
:return: (*float or np.array*) daily net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]* reaching the earth
"""
return (1.0 - albedo) * rs
@staticmethod
def net_longwave_radiation(t_min, t_max, rs, rs0, ea=None):
r"""Return the net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 39, p. 52):
.. math::
R_{nl} = \sigma\left[\frac{T_{max,K}^4 + T_{min,K}^4}{2}\right](0.34-0.14\sqrt{e_a})\left(1.35
\frac{R_s}{R_{so}}-0.35\right)
:param t_min: minimum daily air temperature (:math:`T_{max}`) *[°C]*
:type t_min: float or np.array
:param t_max: maximum daily air temperature (:math:`T_{min}`) *[°C]*
:type t_max: float or np.array
:param rs: shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation`
:type rs: float or np.array
:param rs0: clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*. See
:meth:`clear_sky_shortwave_radiation`
:type rs0: float or np.array
:param ea: actual vapour pressure (:math:`e_a`) *[kPa]*
:type ea: float or np.array
:return: (*float or np.array*) daily net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]*
.. note::
The :math:`R_s/R_{so}` term in the equation above must be limited so that :math:`R_s/R_{so} \leq 1.0`.
"""
t_min = t_min + 273.15
t_max = t_max + 273.15
if ea is not None:
rln = 4.903e-9 * (t_min ** 4 + t_max ** 4) * 0.5 * (0.34 - 0.14 * np.sqrt(ea)) * (1.35 * rs / rs0 - 0.35)
else:
t_mean = (t_min + t_max) / 2.0
rln = 4.903e-9 * (t_min ** 4 + t_max ** 4) * 0.5 * \
(-0.02 + 0.261 * np.exp(-7.77e10 ** -4 * t_mean ** 2)) * (1.35 * rs / rs0 - 0.35)
return rln
def et0(self, **kwargs):
r"""Returns potential evapotranspiration (:math:`ETo`) *[mm/day]* as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_. Reference
(grass) potencial evapotranspiration is returned for default constructor values. If values in `**kwargs` are
arrays, their lengths must be the same.
:Keyword Arguments:
* **date** (*str, datetime.date, datetime.datetime, pandas.TimeStamp, or np.array*)
* **doy** (*int or np.array*) - day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`.
It is not used if date is given
* **u2** (*float or np.array*) - wind speed at 2 meters above ground surface *[m/s]*
* **uz** (*float or np.array*) - measured wind speed at :math:`z` meters above ground surface *[m/s]*
* **z** (*float or np.array*) - height of measurement above ground surface *[m]*
* **t_mean** (*float or np.array*) - daily mean air temperature *[°C]*
* **t_min** (*float or np.array*) - daily minimum air temperature *[°C]*
* **t_max** (*float or np.array*) - daily maximum air temperature *[°C]*
* **rh_mean** (*float or np.array*) - daily mean relative humidity *[%]*
* **rh_min** (*float or np.array*) - daily minimum relative humidity *[%]*
* **rh_max** (*float or np.array*) - daily maximum relative humidity *[%]*
* **rs** (*float or np.array*) - solar or shortwave radiation *[MJ/m²day]*
* **n** (*float or np.array*) - daily actual duration of sunshine or cloudless hours *[hour]*
* **g** (*float or np.array*) - soil heat flux density *[MJ/m²day]*. If not given, *g* defined in
:meth:`PenmanMonteithDaily` will be used
* **a_s** (*float or np.array*) - see :meth:`shortwave_radiation`. Default :math:`a_s = 0.25`
* **b_s** (*float or np.array*) - see :meth:`shortwave_radiation`. Default :math:`b_s = 0.50`
* **negative_rnl** (*bool*) - allow negative net longwave radiation. Default :math:`negative\_rnl=True`
* **negative_et0** (*bool*) - allow negative reference evapotranspiration. Default :math:`negative\_et0=True`
:return: (*float or np.array*) potential evapotranspiration (:math:`ETo`) *[mm/day]*
Cases:
* If date and doy are given, :math:`doy` is disregarded
* if :math:`uz` is given, :math:`z` must also be given
* if :math:`u2` and (:math:`uz`, :math:`z`) are given, both :math:`uz` and :math:`z` are disregarded
* if :math:`rs` and :math:`n` are given, :math:`n` will be disregarded
* The best options for air temperature are, in this order: 1) t_min, t_max, and t_mean, 2) t_min, t_max, and
3) tmean
* The best options for relative air humidity are, in this order: 1) rh_max and rh_min, 2) rh_max, and 3)
rh_mean
Example 1::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> et0 = pm.et0(doy=187, u2=2.078, t_min=12.3, t_max=21.5, rh_min=63, rh_max=84, n=9.25)
>>> print(et0)
3.872968723753793
Example 2::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> et0 = pm.et0(date='2001-07-06', u2=2.078, t_min=12.3, t_max=21.5, rh_min=63, rh_max=84, n=9.25)
>>> print(et0)
3.872968723753793
Example 3::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> date=np.array(['2001-07-06', '2001-07-06'])
>>> u2=np.array([2.078, 2.078])
>>> t_min=np.array([12.3, 12.3])
>>> t_max=np.array([21.5, 21.5])
>>> rh_min=np.array([63, 63])
>>> rh_max=np.array([84, 84])
>>> n=np.array([9.25, 9.25])
>>> et0 = pm.et0(date=date, u2=u2, t_min=t_min, t_max=t_max, rh_min=rh_min, rh_max=rh_max, n=n)
>>> print(et0)
[3.87296872 3.87296872]
"""
self.reset()
try:
self.u2 = kwargs.get('u2', None)
if self.u2 is None:
self.u2 = self.to_u2(kwargs['uz'], kwargs['z'])
except KeyError:
raise KeyError('Penmam-Monteith: Either u2 or both uz and z must be given')
t_min = kwargs.get('t_min', None)
if t_min is None:
t_min = kwargs['t_mean']
t_max = kwargs.get('t_max', None)
if t_max is None:
t_max = kwargs['t_mean']
t_mean = kwargs.get('t_mean', None)
rh_min = kwargs.get('rh_min', None)
rh_max = kwargs.get('rh_max', None)
if rh_max is not None:
if rh_min is None:
rh_min = rh_max
else:
rh_min = rh_max = kwargs['rh_mean']
self.doy = kwargs.get('doy', None)
if self.doy is None:
self.doy = pd.to_datetime(kwargs['date']).dayofyear
self.rs = kwargs.get('rs', None)
n = kwargs.get('n', None)
g = kwargs.get('g', None)
if g is None:
g = self.g_default
a_s = kwargs.get('a_s', 0.25)
b_s = kwargs.get('b_s', 0.50)
if t_mean is None:
t_mean = (t_min + t_max) / 2.0
self.ld = PenmanMonteithDaily.latent_heat_of_vaporization(t_mean)
# In FAO 56, where delta occurs in the numerator and denominator, the slope
# of the vapour pressure curve is calculated using mean air temperature (Equation 9)
self.s = PenmanMonteithDaily.slope_of_saturation_vapour_pressure_curve(t_mean)
self.pc = PenmanMonteithDaily.psychrometric_constant(self.p, lamda=self.ld)
self.es = PenmanMonteithDaily.saturation_vapour_pressure(t_min, t_max)
self.ea = PenmanMonteithDaily.actual_vapour_pressure(rh_min=rh_min, rh_max=rh_max, t_min=t_min, t_max=t_max)
try:
self.ra = np.array([self.ra_366[i] for i in self.doy])
self.rs0 = np.array([self.rs0_366[i] for i in self.doy])
if self.rs is None:
self.mn = np.array([self.daylight_hours_366[i] for i in self.doy])
self.rs = self.shortwave_radiation(self.ra, n, self.mn, a_s, b_s)
# FAO56 eq. 39. The Rs/Rso term in equation 39 must be limited so that Rs/Rso ≤ 1.0.
self.rs = np.where(self.rs > self.rs0, self.rs0, self.rs)
except TypeError:
self.ra = self.ra_366[self.doy]
self.rs0 = self.rs0_366[self.doy]
if self.rs is None:
self.mn = self.daylight_hours_366[self.doy]
self.rs = self.shortwave_radiation(self.ra, n, self.mn, a_s, b_s)
# FAO56 eq. 39. The Rs/Rso term in equation 39 must be limited so that Rs/Rso ≤ 1.0.
self.rs = self.rs0 if self.rs > self.rs0 else self.rs
self.rns = self.net_shortwave_radiation(self.rs, self.albedo)
self.rnl = self.net_longwave_radiation(t_min, t_max, self.rs, self.rs0, self.ea)
if kwargs.get('negative_rnl', False) and self.rnl < 0.0:
self.rnl = 0.0
self.rn = self.rns - self.rnl
# denominator of FAO 56 eq. 3
etd = self.ld * (self.s + self.pc * (1 + self.f2 * self.u2))
# ETo energy component of FAO 56 eq. 3
self.etr = self.s * (self.rn - g) / etd
# ETo wind component of FAO 56 eq. 3
self.etw = (self.ld * self.pc * self.u2 * self.f1 * (self.es - self.ea) / (t_mean + 273.0)) / etd
# Reference evapotranspiration
self.et = self.etr + self.etw
self.et = np.where(self.et < 0.0, 0.0, self.et)
try:
self.et = float(self.et)
except TypeError:
pass
if kwargs.get('negative_rnl', False) and self.et < 0.0:
self.et = 0.0
return self.et
def et0_frame(self, df, **kwargs):
"""Return the input DataFrame extended by :meth:`et0` and further calculation parameters.
:param df: pandas DataFrame with columns corresponding to the inputs described in :meth:`et0`
:type df: pandas.DataFrame
:Keyword Arguments:
* **show_all** (*bool*) - show all results if :math:`True`, otherwise set `parameter=True` to show individual
parameters. For example :math:`doy=True`, :math:`ld=True`, etc. See :meth:`PenmanMonteithDaily`
:return: (*pandas.DataFrame*) DataFrame
"""
doy_str = kwargs.get('doy', 'doy')
date_str = kwargs.get('date', 'date')
u2_str = kwargs.get('u2', 'u2')
uz_str = kwargs.get('uz', 'uz')
z_str = kwargs.get('z', 'z')
t_mean_str = kwargs.get('t_mean', 't_mean')
t_min_str = kwargs.get('t_min', 't_min')
t_max_str = kwargs.get('t_max', 't_max')
rh_mean_str = kwargs.get('rh_mean', 'rh_mean')
rh_min_str = kwargs.get('rh_min', 'rh_min')
rh_max_str = kwargs.get('rh_max', 'rh_max')
rs_str = kwargs.get('rs', 'rs')
n_str = kwargs.get('n', 'n')
g_str = kwargs.get('g', 'g')
columns = df.columns
doy = df[doy_str].values if doy_str in columns else None
date = df[date_str].values if date_str in columns else None
u2 = df[u2_str].values if u2_str in columns else None
uz = df[uz_str].values if uz_str in columns else None
z = df[z_str].values if z_str in columns else None
t_mean = df[t_mean_str].values if t_mean_str in columns else None
t_min = df[t_min_str].values if t_min_str in columns else None
t_max = df[t_max_str].values if t_max_str in columns else None
rh_mean = df[rh_mean_str].values if rh_mean_str in columns else None
rh_min = df[rh_min_str].values if rh_min_str in columns else None
rh_max = df[rh_max_str].values if rh_max_str in columns else None
rs = df[rs_str].values if rs_str in columns else None
n = df[n_str].values if n_str in columns else None
g = df[g_str].values if g_str in columns else None
self.et0(doy=doy, date=date, u2=u2, uz=uz, z=z, t_mean=t_mean, t_min=t_min, t_max=t_max,
rh_mean=rh_mean, rh_min=rh_min, rh_max=rh_max, rs=rs, n=n, g=g)
show_all = kwargs.get('show_all', True)
if show_all:
if doy is None:
df['DoY'] = self.doy
df['Lambda'] = self.ld
df['Psy'] = self.pc
df['Delta'] = self.s
df['es'] = self.es
df['ea'] = self.ea
df['Rs'] = self.rs
df['Rns'] = self.rns
df['Rnl'] = self.rnl
df['ET0r'] = self.etr
df['ET0w'] = self.etw
df['ET0'] = self.et
else:
if kwargs.get('Lambda', False):
df['Lambda'] = self.ld
if kwargs.get('Psy', False):
df['Psy'] = self.pc
if kwargs.get('Delta', False):
df['Delta'] = self.s
if kwargs.get('es', False):
df['es'] = self.es
if kwargs.get('ea', False):
df['ea'] = self.ea
if kwargs.get('Rs', False):
df['Rs'] = self.rs
if kwargs.get('Rns', False):
df['Rns'] = self.rns
if kwargs.get('Rnl', False):
df['Rnl'] = self.rnl
if kwargs.get('ET0r', False):
df['ET0r'] = self.etr
if kwargs.get('ET0w', False):
df['ET0w'] = self.etw
if kwargs.get('ET0', True):
df['ET0'] = self.et
return df
|
[
"numpy.radians",
"numpy.mean",
"numpy.sqrt",
"numpy.tan",
"numpy.where",
"numpy.log",
"math.log",
"numpy.exp",
"numpy.array",
"numpy.cos",
"numpy.sin",
"math.exp",
"pandas.to_datetime"
] |
[((15037, 15055), 'numpy.mean', 'np.mean', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (15044, 15055), True, 'import numpy as np\n'), ((15924, 15943), 'numpy.mean', 'np.mean', (['sl'], {'axis': '(0)'}), '(sl, axis=0)\n', (15931, 15943), True, 'import numpy as np\n'), ((40204, 40241), 'numpy.where', 'np.where', (['(self.et < 0.0)', '(0.0)', 'self.et'], {}), '(self.et < 0.0, 0.0, self.et)\n', (40212, 40241), True, 'import numpy as np\n'), ((21214, 21237), 'numpy.log', 'np.log', (['(67.8 * z - 5.42)'], {}), '(67.8 * z - 5.42)\n', (21220, 21237), True, 'import numpy as np\n'), ((23873, 23913), 'numpy.sin', 'np.sin', (['(0.01721420632103996 * day - 1.39)'], {}), '(0.01721420632103996 * day - 1.39)\n', (23879, 23913), True, 'import numpy as np\n'), ((38516, 38560), 'numpy.array', 'np.array', (['[self.ra_366[i] for i in self.doy]'], {}), '([self.ra_366[i] for i in self.doy])\n', (38524, 38560), True, 'import numpy as np\n'), ((38584, 38629), 'numpy.array', 'np.array', (['[self.rs0_366[i] for i in self.doy]'], {}), '([self.rs0_366[i] for i in self.doy])\n', (38592, 38629), True, 'import numpy as np\n'), ((8125, 8145), 'numpy.radians', 'np.radians', (['latitude'], {}), '(latitude)\n', (8135, 8145), True, 'import numpy as np\n'), ((19481, 19510), 'math.log', 'math.log', (['((self.zm - d) / zom)'], {}), '((self.zm - d) / zom)\n', (19489, 19510), False, 'import math\n'), ((19513, 19542), 'math.log', 'math.log', (['((self.zh - d) / zoh)'], {}), '((self.zh - d) / zoh)\n', (19521, 19542), False, 'import math\n'), ((23204, 23237), 'numpy.cos', 'np.cos', (['(0.01721420632103996 * day)'], {}), '(0.01721420632103996 * day)\n', (23210, 23237), True, 'import numpy as np\n'), ((24632, 24643), 'numpy.tan', 'np.tan', (['lat'], {}), '(lat)\n', (24638, 24643), True, 'import numpy as np\n'), ((37506, 37536), 'pandas.to_datetime', 'pd.to_datetime', (["kwargs['date']"], {}), "(kwargs['date'])\n", (37520, 37536), True, 'import pandas as pd\n'), ((38688, 38744), 'numpy.array', 'np.array', (['[self.daylight_hours_366[i] for i in self.doy]'], {}), '([self.daylight_hours_366[i] for i in self.doy])\n', (38696, 38744), True, 'import numpy as np\n'), ((38954, 39001), 'numpy.where', 'np.where', (['(self.rs > self.rs0)', 'self.rs0', 'self.rs'], {}), '(self.rs > self.rs0, self.rs0, self.rs)\n', (38962, 39001), True, 'import numpy as np\n'), ((14968, 14999), 'numpy.exp', 'np.exp', (['(17.27 * t / (t + 237.3))'], {}), '(17.27 * t / (t + 237.3))\n', (14974, 14999), True, 'import numpy as np\n'), ((17672, 17713), 'math.exp', 'math.exp', (['(17.27 * t_dew / (t_dew + 237.3))'], {}), '(17.27 * t_dew / (t_dew + 237.3))\n', (17680, 17713), False, 'import math\n'), ((22462, 22472), 'numpy.sin', 'np.sin', (['sd'], {}), '(sd)\n', (22468, 22472), True, 'import numpy as np\n'), ((22502, 22512), 'numpy.sin', 'np.sin', (['ws'], {}), '(ws)\n', (22508, 22512), True, 'import numpy as np\n'), ((24619, 24629), 'numpy.tan', 'np.tan', (['sd'], {}), '(sd)\n', (24625, 24629), True, 'import numpy as np\n'), ((22448, 22459), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (22454, 22459), True, 'import numpy as np\n'), ((22475, 22486), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (22481, 22486), True, 'import numpy as np\n'), ((22489, 22499), 'numpy.cos', 'np.cos', (['sd'], {}), '(sd)\n', (22495, 22499), True, 'import numpy as np\n'), ((31976, 31987), 'numpy.sqrt', 'np.sqrt', (['ea'], {}), '(ea)\n', (31983, 31987), True, 'import numpy as np\n'), ((32171, 32213), 'numpy.exp', 'np.exp', (['(-77700000000.0 ** -4 * t_mean ** 2)'], {}), '(-77700000000.0 ** -4 * t_mean ** 2)\n', (32177, 32213), True, 'import numpy as np\n')]
|
from pathlib import Path
import logging
from .logger import Logger
from .log_formatter import LogFormatter
class FileLogger(Logger):
fmt = LogFormatter(use_colour=False, output_ts=False)
logger = None
def __init__(self, folder, format=None):
if format is None:
format = ("%(asctime)s|%(levelname)s|%(message)s",)
formatter = logging.Formatter(format)
log_file = Path(folder, "sayn.log")
if not log_file.parent.exists():
log_file.parent.mkdir(parents=True)
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.logger = logger
def print(self, s=None):
if s is not None:
if s["level"] == "info":
func = self.logger.info
elif s["level"] == "error":
func = self.logger.error
elif s["level"] == "warning":
func = self.logger.warning
else:
func = self.logger.debug
s = s["message"]
if isinstance(s, str):
s = [s]
elif not isinstance(s, list):
raise ValueError("error in logging print")
func(f"{s[0]}")
for e in s[1:]:
for l in e.split("\n"):
func(f"{l}")
|
[
"logging.getLogger",
"logging.Formatter",
"logging.FileHandler",
"pathlib.Path"
] |
[((370, 395), 'logging.Formatter', 'logging.Formatter', (['format'], {}), '(format)\n', (387, 395), False, 'import logging\n'), ((416, 440), 'pathlib.Path', 'Path', (['folder', '"""sayn.log"""'], {}), "(folder, 'sayn.log')\n", (420, 440), False, 'from pathlib import Path\n'), ((549, 578), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (568, 578), False, 'import logging\n'), ((677, 704), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (694, 704), False, 'import logging\n')]
|
from pygsuite import DefaultFonts, TextStyle, Color
from pygsuite.docs.doc_elements.paragraph import Paragraph
BRIGHT_GREEN_HEX = "#72FF33"
def test_text(test_document):
document = test_document
docbody = document.body
docbody.delete()
docbody.add_text(
"TEST_CUSTOM\n",
style=TextStyle(font_size=18, font_weight=200, color=Color(hex=BRIGHT_GREEN_HEX)),
)
docbody.add_text("TEST_DEFAULT\n", style=DefaultFonts.NORMAL_TEXT)
docbody.add_text("TEST_INDEX\n", style=DefaultFonts.NORMAL_TEXT, position=1)
document.flush()
text = [item for item in document.body if isinstance(item, Paragraph)]
assert text[0].text.strip() == "TEST_INDEX"
assert text[2].text.strip() == "TEST_DEFAULT"
# TODO: return style objects
assert text[1].elements[0].style.font_size == 18
def test_paragraph(test_document):
document = test_document
docbody = document.body
docbody.delete()
docbody.add_text(
"TEST_CUSTOM\n",
style=TextStyle(font_size=18, font_weight=200, color=Color(hex=BRIGHT_GREEN_HEX)),
)
docbody.flush()
docbody.content[1].text = "TEST_CUSTOM_SETTER"
docbody.add_text("INSERT\n", position=0)
docbody.flush()
docbody.paragraphs[1].elements[0].style = TextStyle(
font_size=24, font_weight=500, color=Color(hex=BRIGHT_GREEN_HEX)
)
docbody.flush()
assert docbody.content[2].text.strip() == "TEST_CUSTOM_SETTER"
assert docbody.paragraphs[1].elements[0].style.font_size == 24
|
[
"pygsuite.Color"
] |
[((1325, 1352), 'pygsuite.Color', 'Color', ([], {'hex': 'BRIGHT_GREEN_HEX'}), '(hex=BRIGHT_GREEN_HEX)\n', (1330, 1352), False, 'from pygsuite import DefaultFonts, TextStyle, Color\n'), ((359, 386), 'pygsuite.Color', 'Color', ([], {'hex': 'BRIGHT_GREEN_HEX'}), '(hex=BRIGHT_GREEN_HEX)\n', (364, 386), False, 'from pygsuite import DefaultFonts, TextStyle, Color\n'), ((1051, 1078), 'pygsuite.Color', 'Color', ([], {'hex': 'BRIGHT_GREEN_HEX'}), '(hex=BRIGHT_GREEN_HEX)\n', (1056, 1078), False, 'from pygsuite import DefaultFonts, TextStyle, Color\n')]
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# pylint: disable=invalid-name,g-bad-import-order,missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from absl import app
from absl import flags
from concurrent import futures
import gin
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Any, Dict, List, Optional, Tuple
from neutra import utils
tfd = tfp.distributions
tfb = tfp.bijectors
FLAGS = flags.FLAGS
TRAIN_BATCH = 250
TEST_BATCH = 1000
AIS_BATCH = 50
def ReduceL2(tensor, dims):
return tf.sqrt(tf.reduce_sum(tf.square(tensor), dims))
@utils.MakeTFTemplate
def Conv2DWN(inputs,
num_filters,
kernel_size=[3, 3],
stride=[1, 1],
pad="SAME",
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.shape[3])
with tf.variable_scope(scope, "conv_2d_wn"):
w = tf.get_variable(
"w", [kernel_size[0], kernel_size[1], num_inputs, num_filters],
initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, stride[0], stride[1], 1], pad)
if biases_initializer is not None:
out += tf.reshape(b, [1, 1, 1, num_filters])
return activation(out)
def GetLinearARMask(num_inputs, num_outputs, zero_diagonal=False):
assert num_inputs % num_outputs == 0 or num_outputs % num_inputs == 0, "%d vs %d" % (num_inputs, num_outputs)
mask = np.ones([num_inputs, num_outputs], dtype=np.float32)
if num_outputs >= num_inputs:
k = num_outputs // num_inputs
for i in range(num_inputs):
mask[i + 1:, i * k:(i + 1) * k] = 0
if zero_diagonal:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = num_inputs // num_outputs
for i in range(num_outputs):
mask[(i + 1) * k:, i:i + 1] = 0
if zero_diagonal:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def GetConvARMask(h, w, num_inputs, num_filters, zero_diagonal=False):
l = (h - 1) // 2
m = (w - 1) // 2
mask = np.ones([h, w, num_inputs, num_filters], dtype=np.float32)
mask[:l, :, :, :] = 0
mask[l, :m, :, :] = 0
mask[l, m, :, :] = GetLinearARMask(num_inputs, num_filters, zero_diagonal)
return mask
@utils.MakeTFTemplate
def Conv2DAR(inputs, num_filters,
kernel_size=[3, 3],
zero_diagonal=False,
weights_initializer=None,
biases_initializer=tf.zeros_initializer(),
scope=None):
num_inputs = int(inputs.get_shape()[3])
mask = GetConvARMask(kernel_size[0], kernel_size[1], num_inputs, num_filters, zero_diagonal)
w = tf.get_variable("w", [kernel_size[0], kernel_size[1], num_inputs, num_filters], initializer=weights_initializer)
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value() * mask, [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w * mask, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, 1, 1, 1], "SAME")
return out + tf.reshape(b, [1, 1, 1, num_filters])
@utils.MakeTFTemplate
def ConvAR(x,
h=None,
real_event_shape=[],
hidden_layers=[],
**kwargs):
#input_shape = (
# np.int32(x.shape.as_list())
# if x.shape.is_fully_defined() else tf.shape(x))
#x = tf.reshape(x, [-1] + real_event_shape)
for i, units in enumerate(hidden_layers):
x = Conv2DAR("conv2d_ar_%d"%i, num_filters=units, zero_diagonal=False, **kwargs)(inputs=x)
if i == 0 and h is not None:
if h.shape[-1] != x.shape[-1]:
x += Conv2DWN("conv2d_h", num_filters=int(x.shape[-1]), kernel_size=[1, 1], stride=[1, 1])(h)
else:
x += h
x = tf.nn.elu(x)
shift = Conv2DAR(
"conv2d_shift",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
log_scale = Conv2DAR(
"conv2d_scale",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
#shift = tf.reshape(shift, input_shape)
#log_scale = tf.reshape(log_scale, input_shape)
return shift, log_scale
@utils.MakeTFTemplate
def DenseWN(inputs,
num_outputs,
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.get_shape()[1])
with tf.variable_scope(scope, "dense_wn"):
w = tf.get_variable(
"w", [num_inputs, num_outputs], initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_outputs], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0])))
g = tf.exp(g)
w = g * tf.nn.l2_normalize(w, [0])
out = tf.matmul(inputs, w)
if biases_initializer is not None:
out += tf.expand_dims(b, 0)
return activation(out)
@utils.MakeTFTemplate
def ResConv2D(inputs,
num_filters,
kernel_size,
stride,
activation=tf.nn.elu,
output_init_factor=1.0):
x = Conv2DWN(
"conv2d_in",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
activation=activation)(
inputs=inputs)
non_linear = Conv2DWN(
"conv2d_nl",
num_filters=num_filters,
kernel_size=kernel_size,
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=x)
skip = Conv2DWN(
"conv2d_skip",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=inputs)
return non_linear + skip
@utils.MakeTFTemplate
def ResDense(inputs, num_dims, activation=None):
x = DenseWN("dense_in", num_outputs=num_dims, activation=activation)(inputs)
non_linear = DenseWN("dense_nl", num_outputs=num_dims)(x)
skip = DenseWN("dense_skip", num_outputs=num_dims)(x)
return non_linear + skip
@gin.configurable("conv_hier_encoder")
@utils.MakeTFTemplate
def ConvHierEncoder(images, depth = 2, num_blocks = 2, z_dims = 32, h_dims=160):
x = Conv2DWN("conv2d_in", num_filters=h_dims, stride=[2, 2], kernel_size=[5, 5])(inputs=images - 0.5)
means = []
raw_scales = []
contexts = []
for i in range(depth):
for j in range(num_blocks):
downsample = i > 0 and j == 0
if downsample:
stride = [2, 2]
else:
stride = [1, 1]
h = tf.nn.elu(x)
h = Conv2DWN("conv2d_in_%d_%d"%(i, j), num_filters=2*z_dims + 2 * h_dims, stride=stride, kernel_size=[3, 3])(inputs=h)
mean, raw_scale, context, h = tf.split(h, [z_dims, z_dims, h_dims, h_dims], -1)
means.append(mean)
raw_scales.append(raw_scale)
contexts.append(context)
h = tf.nn.elu(h)
h = Conv2DWN("conv2d_h_%d_%d"%(i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
if downsample:
x = tf.image.resize_nearest_neighbor(x, [int(x.shape[1]) // 2, int(x.shape[2]) // 2])
x += 0.1 * h
return means, raw_scales, contexts
@gin.configurable("conv_hier_prior_post")
@utils.MakeTFTemplate
def ConvHierPriorPost(images=None,
encoder=None,
z=None,
batch=None,
depth = 2,
num_blocks = 2,
z_dims = 32,
h_dims = 160,
image_width = 32):
is_q = encoder is not None
if is_q:
means, raw_scales, up_contexts = encoder(images)
if batch is None:
if images is not None:
batch = tf.shape(images)[0]
else:
batch = tf.shape(z[0])[0]
h = tf.get_variable("h_top", [h_dims], initializer=tf.zeros_initializer())
h = tf.reshape(h, [1, 1, 1, -1])
top_width = image_width // 2 ** num_blocks
h = tf.tile(h, [batch, top_width, top_width, 1])
x = h
ret_z = []
ret_log_pz = []
for i in reversed(list(range(depth))):
for j in reversed(list(range(num_blocks))):
downsample = i > 0 and j == 0
h = tf.nn.elu(x)
h_p = Conv2DWN(
"conv2d_p_%d_%d" % (i, j),
num_filters=2 * h_dims + 2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
p_mean, p_raw_scale, down_context, h_det = tf.split(
h_p, [z_dims, z_dims, h_dims, h_dims], -1)
p_z = tfd.Independent(
tfd.Normal(loc=p_mean, scale=tf.nn.softplus(p_raw_scale)),
reinterpreted_batch_ndims=3)
if is_q:
h_q = Conv2DWN(
"conv2d_q_%d_%d" % (i, j),
num_filters=2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
q_mean, q_raw_scale = tf.split(h_q, [z_dims, z_dims], -1)
context = down_context + up_contexts.pop()
q_mean += means.pop()
q_raw_scale += raw_scales.pop()
num_flat_dims = np.prod(q_mean.shape.as_list()[1:])
_maf_template = ConvAR(
"iaf_%d_%d" % (i, j),
real_event_shape=q_mean.shape.as_list()[1:],
hidden_layers=[h_dims, h_dims],
h=context,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
#x.set_shape([None, num_flat_dims])
x.set_shape([None] + q_mean.shape.as_list()[1:])
return t(x)
bijectors = []
#bijectors.append(tfb.Reshape(tf.shape(q_mean)[1:], [num_flat_dims]))
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
#bijectors.append(tfb.Reshape([num_flat_dims], tf.shape(q_mean)[1:]))
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
bijectors.append(tfb.AffineScalar(shift=q_mean, scale=tf.nn.softplus(q_raw_scale)))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(q_mean), scale=tf.ones_like(q_raw_scale)),
reinterpreted_batch_ndims=3)
q_z = tfd.TransformedDistribution(mvn, bijector)
if is_q:
dist = q_z
else:
dist = p_z
if z is None:
z_val = dist.sample()
else:
z_val = z[0]
z = z[1:]
ret_z.append(z_val)
ret_log_pz.append(dist.log_prob(z_val))
h = tf.concat([z_val, h_det], -1)
if downsample:
new_shape = [2 * int(x.shape[1]), 2 * int(x.shape[2])]
x = tf.image.resize_nearest_neighbor(x, new_shape)
h = tf.image.resize_nearest_neighbor(h, new_shape)
h = Conv2DWN("deconv2d_%d_%d" % (i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
x = x + 0.1 * h
x = tf.image.resize_nearest_neighbor(x, [2 * int(x.shape[1]), 2 * int(x.shape[2])])
x = Conv2DWN("conv2d_out", num_filters=3, stride=[1, 1], kernel_size=[5, 5])(inputs=x)
return ret_z, ret_log_pz, x
@gin.configurable("conv_encoder")
@utils.MakeTFTemplate
def ConvEncoder(images, num_outputs, hidden_dims = 450,
filter_scale = 1, fully_convolutional = False):
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return ResConv2D("res_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
else:
x = tf.reshape(x, [-1, filter_scale * 32 * 4 * 4])
x = ResDense("dense_h", num_dims=hidden_dims, activation=tf.nn.elu)(x)
return DenseWN(
"dense_out",
num_outputs=num_outputs,
weights_initializer=utils.L2HMCInitializer())(
x)
@gin.configurable("conv_decoder")
@utils.MakeTFTemplate
def ConvDecoder(encoding,
output_shape,
filter_scale = 1,
hidden_dims = 450,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
if fully_convolutional:
tf.logging.info("Encoding shape: %s", encoding.shape)
x = ResConv2D("res_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
else:
x = ResDense("dense_in", num_dims=hidden_dims, activation=tf.nn.elu)(encoding)
x = ResDense("dense_h", num_dims=filter_scale * 32 * 4 * 4, activation=tf.nn.elu)(x)
x = tf.reshape(x, [-1, 4, 4, filter_scale * 32])
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = ResConv2D("res_5", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = ResConv2D("res_3", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = ResConv2D(
"res_1",
num_filters=output_shape[-1],
kernel_size=[3, 3],
stride=[1, 1],
output_init_factor=0.01)(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder2")
@utils.MakeTFTemplate
def ConvEncoder2(images, num_outputs, filter_scale = 1):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_5", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
return ResConv2D("conv_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
@gin.configurable("conv_decoder2")
@utils.MakeTFTemplate
def ConvDecoder2(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = Conv2DWN("conv_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = Conv2DWN("conv_5", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_3", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN(
"conv_1",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder3")
@utils.MakeTFTemplate
def ConvEncoder3(images, num_outputs, hidden_dims = 450,
filter_scale = 1):
# This comes from VLAE paper.
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_8", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
return Conv2DWN("conv_10", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
@gin.configurable("conv_decoder3")
@utils.MakeTFTemplate
def ConvDecoder3(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
x = Conv2DWN("conv_1", num_filters=filter_scale * 96, kernel_size=[1, 1], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_8", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_10", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder4")
@utils.MakeTFTemplate
def ConvEncoder4(images, num_outputs,
filter_scale = 1,
fully_convolutional = False):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return Conv2DWN("conv_out", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
else:
return DenseWN("dense_out", num_outputs=num_outputs)(tf.layers.flatten(x))
@gin.configurable("conv_decoder4")
@utils.MakeTFTemplate
def ConvDecoder4(encoding,
output_shape,
filter_scale = 1,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
if not fully_convolutional:
x = tf.reshape(DenseWN("dense_in", num_outputs=8*8*16)(x), [-1, 8, 8, 16])
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[1, 1],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("dense_encoder")
@utils.MakeTFTemplate
def DenseEncoder(images,
num_outputs,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
x = tf.layers.flatten(images)
# Center the data, assuming it goes from [0, 1] initially.
# x = 2.0 * x - 1.0
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
return tf.layers.dense(x, num_outputs, kernel_initializer=utils.L2HMCInitializer())
@gin.configurable("dense_decoder")
@utils.MakeTFTemplate
def DenseDecoder(encoding,
output_shape,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = tf.layers.flatten(encoding)
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
num_outputs = np.prod(output_shape)
return tf.reshape(
tf.layers.dense(
x, num_outputs, kernel_initializer=utils.L2HMCInitializer(factor=0.01)),
[-1] + output_shape)
def IndependentBernouli3D(logits):
return tfd.Independent(
tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=3)
def IndependentDiscreteLogistic3D(locations,
scales):
dist = tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=locations, scale=scales),
bijector=tfb.AffineScalar(scale=255.0))
dist = tfd.QuantizedDistribution(distribution=dist, low=0., high=255.0)
dist = tfd.Independent(dist, reinterpreted_batch_ndims=3)
class ScaleHack(object):
def __init__(self, dist):
self._dist = dist
def sample(self, *args, **kwargs):
return self._dist.sample(*args, **kwargs) / 255.0
def log_prob(self, x, *args, **kwargs):
return self._dist.log_prob(tf.clip_by_value(x * 255.0, 0.0, 255.0), *args, **kwargs)
return ScaleHack(dist)
def IndependentDiscreteLogistic3D2(locations,
scales):
class IndependentDiscreteLogistic(object):
def __init__(self, loc, scale):
self._loc = loc
self._scale = scale
def sample(self, *args, **kwargs):
dist = tfd.Logistic(loc=self._loc, scale=self._scale)
return tf.clip_by_value(dist.sample(*args, **kwargs), 0.0, 1.0)
def log_prob(self, x, *args, **kwargs):
sample = x
mean = self._loc
scales = self._scale
binsize=1.0 / 256.0
sample = (tf.floor(sample / binsize) * binsize - mean) / scales
return tf.reduce_sum(
tf.log(
tf.sigmoid(sample + binsize / scales) - tf.sigmoid(sample) + 1e-7),
[-1, -2, -3])
return IndependentDiscreteLogistic(locations, scales)
@gin.configurable("dense_recognition")
@utils.MakeTFTemplate
def DenseRecognition(images, encoder, z=None, sigma_activation="exp"
):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
num_dims = int(encoding.shape[-1]) // 2
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, 2]), num=2, axis=-1)
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.Affine(shift=mu, scale_diag=sigma)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
tf.logging.info("bijector z shape: %s", z[0].shape)
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine")
@utils.MakeTFTemplate
def DenseRecognitionAffine(images, encoder, z=None,
z_dims=None):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
tril_raw = tfd.fill_triangular(encoding[:, z_dims:])
sigma = tf.nn.softplus(tf.matrix_diag_part(tril_raw))
tril = tf.linalg.set_diag(tril_raw, sigma)
bijector = tfb.Affine(shift=mu, scale_tril=tril)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine_lr")
@utils.MakeTFTemplate
def DenseRecognitionAffineLR(images, encoder, z=None,
z_dims=None, rank=1):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
sigma = encoding[:, z_dims:2*z_dims]
perturb = encoding[:, 2*z_dims:]
perturb = tf.reshape(perturb, [-1, z_dims, rank])
sigma = tf.nn.softplus(sigma)
bijector = tfb.Affine(shift=mu, scale_diag=sigma,
scale_perturb_factor=perturb)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_rnvp")
@utils.MakeTFTemplate
def DenseRecognitionRNVP(
images,
encoder,
z=None,
num_bijectors=3,
condition_bijector=False,
layer_sizes=[128, 128],
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_bijector:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_bijector:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_bijectors):
_rnvp_template = utils.DenseShiftLogScale(
"rnvp_%d" % i,
h=h,
hidden_layers=layer_sizes,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def rnvp_template(x, output_units, t=_rnvp_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims - output_units])
return t(x, output_units)
bijectors.append(
tfb.Invert(
tfb.RealNVP(
num_masked=num_dims // 2,
shift_and_log_scale_fn=rnvp_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_iaf")
@utils.MakeTFTemplate
def DenseRecognitionIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_iaf_layers):
#_maf_template = tfb.masked_autoregressive_default_template(
# hidden_layers=iaf_layer_sizes,
# activation=tf.nn.softplus,
# kernel_initializer=utils.L2HMCInitializer(factor=0.01))
_maf_template = utils.DenseAR(
"maf_%d" % i,
hidden_layers=iaf_layer_sizes,
h=h,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
class FlipImageBijector(tfb.Bijector):
def __init__(self, validate_args=False, name=None):
"""Creates the `Permute` bijector.
Args:
permutation: An `int`-like vector-shaped `Tensor` representing the
permutation to apply to the rightmost dimension of the transformed
`Tensor`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if `not permutation.dtype.is_integer`.
ValueError: if `permutation` does not contain exactly one of each of
`{0, 1, ..., d}`.
"""
super(FlipImageBijector, self).__init__(
forward_min_event_ndims=3,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "flip_image")
def _forward(self, x):
return tf.image.flip_left_right(tf.image.flip_up_down(x))
def _inverse(self, y):
return tf.image.flip_up_down(tf.image.flip_left_right(y))
def _inverse_log_det_jacobian(self, y):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return tf.constant(0., dtype=y.dtype.base_dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0., dtype=x.dtype.base_dtype)
@gin.configurable("conv_iaf")
@utils.MakeTFTemplate
def ConvIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
bijectors = []
for i in range(num_iaf_layers):
_maf_template = ConvAR(
"iaf_%d" % i,
real_event_shape=encoding_parts[0].shape.as_list()[1:],
hidden_layers=iaf_layer_sizes,
h=h,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None] + encoding_parts[0].shape.as_list()[1:])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(FlipImageBijector())
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.AffineScalar(shift=mu, scale=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("conv_shift_scale")
@utils.MakeTFTemplate
def ConvShiftScale(
images,
encoder,
z=None,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.AffineScalar(shift=mu, scale=sigma)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def SimplePrior(z=None, batch=None,
num_dims=None):
"""Models P(z)"""
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros(num_dims), scale_diag=tf.ones(num_dims))
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def Simple3DPrior(z=None, batch=None,
shape=None):
"""Models P(z)"""
mvn = tfd.Independent(tfd.Normal(loc=tf.zeros(shape), scale=tf.ones(shape)), reinterpreted_batch_ndims=3)
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def DenseMNISTNoise(x=None, z=None, decoder=None, return_means=True):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
bernoulli = IndependentBernouli3D(decoding)
if x is None:
if return_means:
x = bernoulli.mean()
else:
x = tf.to_float(bernoulli.sample())
return x, bernoulli.log_prob(x)
@gin.configurable("cifar10_noise")
@utils.MakeTFTemplate
def DenseCIFAR10TNoise(x=None, z=None, decoder=None, return_means=True, uniform_scale=False, logistic_impl="mine"):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
if uniform_scale:
scale = tf.get_variable("scale", initializer=1.0)
scales = tf.reshape(scale, [1, 1, 1])
else:
scales = tf.get_variable(
"scales", [32, 32, 3], initializer=tf.ones_initializer())
if logistic_impl == "mine":
disc_logistic = IndependentDiscreteLogistic3D(decoding, tf.nn.softplus(scales))
elif logistic_impl == "kingma":
disc_logistic = IndependentDiscreteLogistic3D2(decoding, tf.nn.softplus(scales))
if x is None:
x = tf.to_float(disc_logistic.sample())
return x, disc_logistic.log_prob(x)
@gin.configurable("learning_rate")
def LearningRate(train_size, global_step, schedule = "hoffman", warmup_steps=0):
if schedule == "hoffman":
base = tf.train.piecewise_constant(
global_step, [train_size * 500 // TRAIN_BATCH], [1e-3, 1e-4])
elif schedule == "new":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[1e-3, 1e-4, 1e-5])
elif schedule == "new_gentle":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[0.5e-3, 1e-4, 1e-5])
elif schedule == "fast":
base = tf.train.piecewise_constant(
global_step,
[train_size * 800 // TRAIN_BATCH],
[1e-2, 1e-5])
else:
raise ValueError("Invalid schedule: " + schedule)
if warmup_steps == 0:
return base
else:
return tf.minimum(base * tf.to_float(global_step) / warmup_steps, base)
VAEOutputs = collections.namedtuple(
"VAEOutputs", "log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z")
AISOutputs = collections.namedtuple(
"AISOutputs",
"log_p, p_accept, z_fin, recon"
)
def MakeVAE(images, recognition, prior, noise, beta, num_samples,
min_kl):
z, log_q_z = recognition(images)
_, log_p_z = prior(z)
_, log_p_x_z = noise(images, z)
post_z = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - beta * total_klqp
recon_means, _ = noise(None, z)
z, _ = prior(batch=num_samples)
sample_means, _ = noise(None, z)
return VAEOutputs(
log_p_x_z=log_p_x_z,
elbo=elbo,
sample_means=sample_means,
recon_means=recon_means,
klqp=klqp,
total_klqp=total_klqp,
post_z=post_z,
prior_z=z)
DLGMOutputs = collections.namedtuple(
"DLGMOutputs",
"elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq"
)
@gin.configurable("dlgm")
class DLGM(object):
def __init__(self,
z_dims=64,
beta=1.0,
beta_steps=0,
step_size=0.2,
num_leapfrog_steps=5,
num_hmc_steps=2,
use_neutra=True,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
q_loss_type="klqp",
min_kl=0.0,
symm_factor=0.5,
save_chain_state=False,
chain_warmup_epochs=5,
use_q_z_for_gen=False,
no_gen_train_steps=0,
dataset=None,
use_bijector_for_ais=False,
prior_type="simple",
adapt_step_size=False,
step_size_gain=1e-3,
use_q_z_for_ais=False,
affine_rank=1,
step_size_warmup=0):
self.train_size = dataset.train_size
self._use_q_z_for_ais = use_q_z_for_ais
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._use_bijector_for_ais = use_bijector_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
self._use_neutra = use_neutra
self._num_leapfrog_steps = num_leapfrog_steps
self._num_hmc_steps = num_hmc_steps
self._q_loss_type = q_loss_type
self._symm_factor = symm_factor
self._save_chain_state = save_chain_state
self._chain_warmup_epochs = chain_warmup_epochs
self._use_q_z_for_gen = use_q_z_for_gen
self._no_gen_train_steps = no_gen_train_steps
self._step_size_gain = step_size_gain
self._adapt_step_size = adapt_step_size
self._step_size_warmup = step_size_warmup
self._init_step_size = step_size
if self._adapt_step_size:
self._step_size = tf.get_variable("step_size", initializer=step_size)
else:
self._step_size = tf.constant(step_size)
if self._save_chain_state:
self._chain_state = tf.get_variable(
"train_chain_state", [self.train_size, z_dims], trainable=False)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
#assert dataset.name == "cifar10"
#self._encoder = ConvHierEncoder("encoder")
#self._prior_posterior = ConvHierPriorPost("prior_post")
#self._decoder = lambda z: self._prior_posterior(z=z)[2]
#self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
#self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
pass
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition(
"recog",
encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
self._recog = recog
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def AdjustedStepSize(self):
if self._step_size_warmup > 0:
global_step = tf.train.get_or_create_global_step()
max_step = self._init_step_size * tf.to_float(
global_step) / self._step_size_warmup
return tf.where(global_step > self._step_size_warmup, self._step_size,
tf.minimum(max_step, self._step_size))
else:
return self._step_size
def RecogVars(self):
return self._encoder.variables + self._recog.variables
def GenVars(self):
return (
self._prior.variables + self._decoder.variables + self._noise.variables)
def MakeDLGM(self,
images,
other_z_init=None,
use_other_z_init=None,
num_samples=64):
z, log_q_z, bijector = self._recog(images)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
post_z = z
q_z = z
if use_other_z_init is not None:
z_init = [tf.cond(use_other_z_init, lambda: tf.identity(other_layer_z),
lambda: tf.identity(layer_z)) for other_layer_z, layer_z in zip(z, other_z_init)]
z_init = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(self._min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - self._beta * total_klqp
def TargetLogProbFn(*z):
for post_z_e, z_e in zip(post_z, z):
tf.logging.info("Shape here: %s %s", post_z_e.shape, z_e.shape)
z_e.set_shape(post_z_e.shape)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=TargetLogProbFn,
step_size=self.AdjustedStepSize(),
num_leapfrog_steps=self._num_leapfrog_steps)
if self._use_neutra:
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel, bijector=bijector)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=self._num_hmc_steps, current_state=z, kernel=kernel)
z = [tf.stop_gradient(s[-1, Ellipsis]) for s in states]
post_z = z
_, log_q_z, _ = self._recog(images, z=z)
xentpq = -tf.add_n([tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z])
if self._use_q_z_for_gen:
z = q_z
recon_means, _ = self._noise(None, z)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
mcmc_log_p = tf.reduce_mean(tf.add_n(log_p_z) + log_p_x_z)
if self._use_neutra:
log_accept_ratio = kernel_results.inner_results.log_accept_ratio
else:
log_accept_ratio = kernel_results.log_accept_ratio
p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))
z, _ = self._prior(batch=num_samples)
sample_means, _ = self._noise(None, z)
return DLGMOutputs(
elbo=elbo,
sample_means=sample_means,
mcmc_log_p=mcmc_log_p,
recon_means=recon_means,
p_accept=p_accept,
post_z=post_z,
post_z_chain=states,
q_z=z_init,
xentpq=xentpq)
def GetPosterior(self, images):
outputs = self.MakeDLGM(images)
return outputs.post_z
def TrainOp(self, data_idx, images):
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
if self._save_chain_state:
other_z_init = tf.gather(self._chain_state, data_idx)
use_other_z_init = (
global_step > self._chain_warmup_epochs * self.train_size // TRAIN_BATCH)
else:
other_z_init = None
use_other_z_init = None
outputs = self.MakeDLGM(
images, other_z_init=other_z_init, use_other_z_init=use_other_z_init)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
utils.LogAndSummarizeMetrics({
"learning_rate": learning_rate,
"elbo": outputs.elbo,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
"step_size": self.AdjustedStepSize(),
}, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
if self._save_chain_state:
with tf.control_dependencies([outputs.post_z]):
chain_state_update_op = tf.scatter_update(self._chain_state, data_idx,
outputs.post_z)
else:
chain_state_update_op = tf.no_op()
if self._adapt_step_size:
new_step_size = self._step_size + self._step_size_gain * (outputs.p_accept - 0.651)
new_step_size = tf.clip_by_value(new_step_size, 1e-3, 0.5)
step_size_op = self._step_size.assign(
tf.where(global_step > self._step_size_warmup, new_step_size,
self._step_size))
else:
step_size_op = tf.no_op()
with tf.name_scope("recog_train"):
if self._q_loss_type == "klqp":
loss = -outputs.elbo
elif self._q_loss_type == "symm":
loss = (
self._symm_factor * -outputs.elbo +
(1.0 - self._symm_factor) * outputs.xentpq)
elif self._q_loss_type == "klpq":
loss = outputs.xentpq
if self._save_chain_state:
# Not super efficient...
loss = tf.cond(use_other_z_init, lambda: tf.identity(loss),
lambda: tf.identity(-outputs.elbo))
recog_train_op = tf.contrib.training.create_train_op(
loss,
opt,
summarize_gradients=True,
variables_to_train=self.RecogVars(),
transform_grads_fn=utils.ProcessGradients)
with tf.name_scope("gen_train"):
gen_loss = tf.cond(global_step < self._no_gen_train_steps,
lambda: -outputs.elbo, lambda: -outputs.mcmc_log_p)
gen_train_op = tf.contrib.training.create_train_op(
gen_loss,
opt,
None,
summarize_gradients=True,
variables_to_train=self.GenVars(),
transform_grads_fn=utils.ProcessGradients)
return tf.group(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)
def EvalOp(self, data_idx, images):
outputs = self.MakeDLGM(images)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
return utils.LogAndSummarizeMetrics({
"elbo": outputs.elbo,
"xentpq": outputs.xentpq,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
})
def AIS(self, images, num_chains):
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z, _ = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
if self._use_bijector_for_ais:
_, _, bijector = self._recog(images)
else:
bijector = None
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("vae")
class VAE(object):
def __init__(self,
z_dims=64,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
beta=1.0,
beta_steps=0,
min_kl=0,
use_q_z_for_ais=False,
dataset=None,
prior_type="simple",
affine_rank=1):
self.train_size = dataset.train_size
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._beta = beta
self._use_q_z_for_ais = use_q_z_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
assert dataset.name == "cifar10"
self._encoder = ConvHierEncoder("encoder")
self._prior_posterior = ConvHierPriorPost("prior_post")
self._decoder = lambda z: self._prior_posterior(z=z)[2]
self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition("recog", encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
# Drop the bijector return.
self._recog = lambda *args, **kwargs: recog(*args, **kwargs)[:2]
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def MakeVAE(self, images, beta_override=None, num_samples=64):
if beta_override is not None:
beta = beta_override
else:
beta = self._beta
return MakeVAE(images, self._recog, self._prior, self._noise, beta,
num_samples, self._min_kl)
def TrainOp(self, data_idx, images):
outputs = self.MakeVAE(images)
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
metrics = {
"learning_rate": learning_rate,
"log_p_x_z": outputs.log_p_x_z,
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
"beta": self._beta,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
utils.LogAndSummarizeMetrics(metrics, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
return tf.contrib.training.create_train_op(
-outputs.elbo,
opt,
summarize_gradients=True,
transform_grads_fn=utils.ProcessGradients)
def GetPosterior(self, images):
outputs = self.MakeVAE(images)
return outputs.post_z
def EvalOp(self, data_idx, images):
outputs = self.MakeVAE(images, 1.0)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
metrics = {
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
return utils.LogAndSummarizeMetrics(metrics)
def AIS(self, images, num_chains):
outputs = self.MakeVAE(images)
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("train")
def Train(model, dataset, train_dir, master, epochs=600, polyak_averaging=0.0, warmstart_ckpt=""):
data_idx, images = dataset.TrainBatch(TRAIN_BATCH, epochs)
train_op = model.TrainOp(data_idx, images)
if polyak_averaging > 0.0:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=polyak_averaging)
with tf.control_dependencies([train_op]):
train_op = ema.apply()
utils.LogAndSaveHParams()
tf.Session.reset(master)
if warmstart_ckpt:
tf.init_from_checkpoint(warmstart_ckpt, {"/": "/"})
hooks = [
tf.train.StopAtStepHook(last_step=dataset.train_size * epochs //
TRAIN_BATCH),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), every_n_secs=60)
]
tf.contrib.training.train(
train_op,
logdir=train_dir,
master=master,
hooks=hooks,
save_checkpoint_secs=120,
save_summaries_steps=60)
def Eval(model, dataset, train_dir, eval_dir, master,
use_polyak_averaging=False, max_number_of_evaluations=None):
data_idx, images = dataset.TestBatch(TEST_BATCH)
eval_op = model.EvalOp(data_idx, images)
utils.LogAndSaveHParams()
tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(saver=saver)
tf.Session.reset(master)
hooks = [
# Just for logging.
tf.contrib.training.StopAfterNEvalsHook(dataset.test_size // TEST_BATCH),
tf.contrib.training.SummaryAtEndHook(eval_dir),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), at_end=True)
]
tf.contrib.training.evaluate_repeatedly(
train_dir,
eval_ops=eval_op,
hooks=hooks,
# LOL...
eval_interval_secs=120,
max_number_of_evaluations=max_number_of_evaluations,
master=master,
scaffold=scaffold)
def AISEvalShard(shard, master, num_workers, num_chains, dataset, use_polyak_averaging, writer, train_dir, model_fn, batch):
tf.logging.info("Thread started")
model = model_fn()
tf.logging.info("Built model")
shard_idx = tf.placeholder(tf.int64, [])
tf.logging.info("built data")
data_iterator = dataset.AISIterator(batch, shard_idx, num_workers)
images, _ = data_iterator.get_next()
tf.logging.info("Built mA")
ais_outputs = model.AIS(images, num_chains)
log_p = ais_outputs.log_p
p_accept = ais_outputs.p_accept
tf.logging.info("Built mB")
if shard == 1:
utils.LogAndSaveHParams()
summary_op = tf.summary.merge_all()
global_step = tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
tf.logging.info("Built mC")
global_step_val = []
tf.logging.info("Starting shard %d, %s", shard, master)
#with tf.MonitoredSession(
# tf.train.ChiefSessionCreator(
# master=master,
# checkpoint_dir=train_dir)) as sess:
while True:
try:
tf.Session.reset(master)
with tf.Session(master) as sess:
all_log_p = np.zeros([0])
saver.restore(sess, tf.train.latest_checkpoint(train_dir))
sess.run(data_iterator.initializer, {shard_idx: shard})
try:
step_num = 0
while True:
fetch = {
"log_p": log_p,
"global_step": global_step,
"p_accept": p_accept
}
if shard == 0:
fetch["summary"] = summary_op
tf.logging.info("Shard %d step %d started.", shard, step_num)
fetch = sess.run(fetch)
tf.logging.info("Shard %d step %d done.", shard, step_num)
tf.logging.info("Shard %d log_p %.2f, p_accept: %.2f", shard,
np.mean(fetch["log_p"]),
np.mean(fetch["p_accept"]))
all_log_p = np.hstack([all_log_p, fetch["log_p"]])
if shard == 0 and step_num == 0:
global_step_val.append(fetch["global_step"])
writer.add_summary(fetch["summary"], global_step_val[0])
step_num += 1
except tf.errors.OutOfRangeError:
tf.logging.info("Shard %d done.", shard)
pass
return all_log_p
except tf.errors.AbortedError:
pass
def AISEval(model_fn, dataset, train_dir, eval_dir, worker_master_pattern,
num_workers, num_chains, use_polyak_averaging=False):
tf.reset_default_graph()
log_p_ph = tf.placeholder(tf.float32, [None])
log_p_summary = tf.summary.scalar("log_p", tf.reduce_mean(log_p_ph))
writer = tf.summary.FileWriter(eval_dir)
with futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
results = []
for shard in range(num_workers):
tf.logging.info("Submitting shard %d", shard)
master = worker_master_pattern.format(shard)
results.append(
executor.submit(AISEvalShard, shard, master, num_workers, num_chains,
dataset, use_polyak_averaging, writer, train_dir,
model_fn, AIS_BATCH))
all_log_p = np.zeros([0])
for result in results:
log_p = result.result()
all_log_p = np.hstack([all_log_p, log_p])
log_p = np.mean(all_log_p)
tf.logging.info("Log P: %.2f", log_p)
with tf.Session() as sess:
writer.add_summary(
sess.run(log_p_summary, {log_p_ph: all_log_p}), 0)
writer.flush()
return log_p
MODEL_TO_CLASS = {"vae": VAE, "dlgm": DLGM}
def main(argv):
del argv # Unused.
utils.BindHParams(FLAGS.hparams)
if FLAGS.data_type == "mnist":
dataset = utils.MNISTDataset(FLAGS.mnist_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "fashion_mnist":
dataset = utils.MNISTDataset(FLAGS.fashion_mnist_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "cifar10":
dataset = utils.CIFAR10Dataset(FLAGS.cifar10_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "fake":
dataset = utils.FakeMNISTDataset()
if FLAGS.mode == "train":
model = MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
Train(model, dataset, FLAGS.train_dir, FLAGS.master,
polyak_averaging=FLAGS.polyak_averaging)
elif FLAGS.mode == "eval":
model = MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
Eval(model, dataset, FLAGS.train_dir, FLAGS.eval_dir,
FLAGS.master,
use_polyak_averaging=FLAGS.polyak_averaging > 0.0)
elif FLAGS.mode == "ais_eval":
replica_log_p = []
if FLAGS.ais_replicas:
replicas = FLAGS.ais_replicas
else:
replicas = list(range(FLAGS.ais_num_replicas))
for i in replicas:
train_dir = FLAGS.train_dir.format(i)
eval_dir = FLAGS.eval_dir.format(i)
model_fn = lambda: MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
log_p = AISEval(model_fn, dataset, train_dir, eval_dir,
FLAGS.ais_worker_pattern, FLAGS.ais_num_workers,
FLAGS.ais_num_chains,
use_polyak_averaging=FLAGS.polyak_averaging > 0.0)
replica_log_p.append(log_p)
log_p = np.mean(replica_log_p)
std_log_p = np.std(replica_log_p)
tf.logging.info("Log P: %.2f +- %.2f", log_p,
std_log_p / np.sqrt(len(replicas)))
tf.logging.info("All log_p: %s", replica_log_p)
elif FLAGS.mode == "ais_eval2":
if FLAGS.ais_replicas:
replicas = FLAGS.ais_replicas
else:
replicas = list(range(FLAGS.ais_num_replicas))
for i in replicas:
tf.reset_default_graph()
train_dir = FLAGS.train_dir.format(i)
eval_dir = FLAGS.eval_dir.format(i)
model_fn = lambda: MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
sentinel_filename = os.path.join(eval_dir, "ais_shard_%d_done" % FLAGS.ais_shard)
if tf.gfile.Exists(sentinel_filename):
continue
batch = FLAGS.ais_batch_size
assert (dataset.test_size // FLAGS.ais_num_workers) % batch == 0
writer = tf.summary.FileWriter(eval_dir)
log_p = AISEvalShard(FLAGS.ais_shard, "", FLAGS.ais_num_workers, FLAGS.ais_num_chains,
dataset, FLAGS.polyak_averaging > 0.0, writer, train_dir, model_fn, batch)
tf.gfile.MakeDirs(eval_dir)
with tf.gfile.Open(os.path.join(eval_dir, "ais_shard_%d" % FLAGS.ais_shard), "w") as f:
np.savetxt(f, log_p)
with tf.gfile.Open(sentinel_filename, "w") as f:
f.write("done")
if __name__ == "__main__":
flags.DEFINE_string("mnist_data_dir", "", "")
flags.DEFINE_string("fashion_mnist_data_dir", "", "")
flags.DEFINE_string("cifar10_data_dir", "", "")
flags.DEFINE_string("data_type", "mnist", "")
flags.DEFINE_enum("mode", "train", ["train", "eval", "ais_eval", "ais_eval2"], "")
flags.DEFINE_enum("model", "vae", list(MODEL_TO_CLASS.keys()), "")
flags.DEFINE_string("train_dir", "/tmp/vae/train", "")
flags.DEFINE_string("eval_dir", "/tmp/vae/eval", "")
flags.DEFINE_string("master", "", "")
flags.DEFINE_string("ais_worker_pattern", "", "")
flags.DEFINE_integer("ais_shard", 0, "")
flags.DEFINE_integer("ais_num_workers", 1, "")
flags.DEFINE_integer("ais_num_chains", 1, "")
flags.DEFINE_integer("ais_num_replicas", 1, "")
flags.DEFINE_list("ais_replicas", "", "Manual listing of replicas")
flags.DEFINE_integer("ais_batch_size", 25, "")
flags.DEFINE_float("polyak_averaging", 0.0, "")
flags.DEFINE_boolean("test_is_valid", False, "")
flags.DEFINE(utils.YAMLDictParser(), "hparams", "", "")
app.run(main)
|
[
"tensorflow.tile",
"neutra.utils.LogAndSaveHParams",
"tensorflow.matrix_diag_part",
"tensorflow.group",
"tensorflow.nn.softplus",
"neutra.utils.LogAndSummarizeMetrics",
"absl.flags.DEFINE_float",
"numpy.arange",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.gfile.Exists",
"tensorflow.Session",
"tensorflow.Session.reset",
"tensorflow.train.get_or_create_global_step",
"tensorflow.image.flip_left_right",
"collections.namedtuple",
"neutra.utils.FakeMNISTDataset",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.reshape",
"tensorflow.expand_dims",
"six.moves.range",
"neutra.utils.CIFAR10Dataset",
"neutra.utils.AIS",
"tensorflow.train.Saver",
"neutra.utils.YAMLDictParser",
"tensorflow.name_scope",
"neutra.utils.MNISTDataset",
"neutra.utils.L2HMCInitializer",
"tensorflow.shape",
"tensorflow.layers.flatten",
"tensorflow.nn.elu",
"tensorflow.split",
"neutra.utils.StitchImages",
"gin.configurable",
"neutra.utils.GetLoggingOutputs",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.init_from_checkpoint",
"tensorflow.maximum",
"tensorflow.clip_by_value",
"six.moves.zip",
"tensorflow.zeros",
"tensorflow.nn.conv2d",
"tensorflow.summary.merge_all",
"numpy.ones",
"tensorflow.variable_scope",
"tensorflow.nn.l2_normalize",
"tensorflow.train.StopAtStepHook",
"tensorflow.gather",
"numpy.std",
"tensorflow.floor",
"tensorflow.train.latest_checkpoint",
"tensorflow.summary.FileWriter",
"tensorflow.minimum",
"tensorflow.reset_default_graph",
"tensorflow.to_float",
"tensorflow.logging.info",
"absl.flags.DEFINE_integer",
"tensorflow.cond",
"tensorflow.contrib.training.create_train_op",
"tensorflow.add_n",
"numpy.prod",
"tensorflow.get_variable",
"tensorflow.contrib.training.SummaryAtEndHook",
"tensorflow.control_dependencies",
"tensorflow.gfile.MakeDirs",
"tensorflow.ones_like",
"tensorflow.reduce_mean",
"absl.flags.DEFINE_enum",
"tensorflow.image.flip_up_down",
"absl.flags.DEFINE_boolean",
"tensorflow.contrib.training.evaluate_repeatedly",
"tensorflow.square",
"tensorflow.zeros_like",
"tensorflow.where",
"numpy.savetxt",
"absl.flags.DEFINE_string",
"tensorflow.no_op",
"tensorflow.ones",
"tensorflow.contrib.training.train",
"numpy.zeros",
"tensorflow.constant",
"tensorflow.stop_gradient",
"tensorflow.identity",
"tensorflow.linalg.set_diag",
"tensorflow.contrib.training.StopAfterNEvalsHook",
"numpy.hstack",
"tensorflow.scatter_update",
"tensorflow.train.Scaffold",
"tensorflow.zeros_initializer",
"tensorflow_probability.mcmc.sample_chain",
"absl.flags.DEFINE_list",
"neutra.utils.BindHParams",
"absl.app.run",
"tensorflow.concat",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.piecewise_constant",
"tensorflow.sigmoid",
"tensorflow_probability.mcmc.TransformedTransitionKernel",
"tensorflow.gfile.Open",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
"tensorflow.ones_initializer",
"tensorflow.exp"
] |
[((7472, 7509), 'gin.configurable', 'gin.configurable', (['"""conv_hier_encoder"""'], {}), "('conv_hier_encoder')\n", (7488, 7509), False, 'import gin\n'), ((8570, 8610), 'gin.configurable', 'gin.configurable', (['"""conv_hier_prior_post"""'], {}), "('conv_hier_prior_post')\n", (8586, 8610), False, 'import gin\n'), ((12586, 12618), 'gin.configurable', 'gin.configurable', (['"""conv_encoder"""'], {}), "('conv_encoder')\n", (12602, 12618), False, 'import gin\n'), ((13745, 13777), 'gin.configurable', 'gin.configurable', (['"""conv_decoder"""'], {}), "('conv_decoder')\n", (13761, 13777), False, 'import gin\n'), ((15405, 15438), 'gin.configurable', 'gin.configurable', (['"""conv_encoder2"""'], {}), "('conv_encoder2')\n", (15421, 15438), False, 'import gin\n'), ((16208, 16241), 'gin.configurable', 'gin.configurable', (['"""conv_decoder2"""'], {}), "('conv_decoder2')\n", (16224, 16241), False, 'import gin\n'), ((17508, 17541), 'gin.configurable', 'gin.configurable', (['"""conv_encoder3"""'], {}), "('conv_encoder3')\n", (17524, 17541), False, 'import gin\n'), ((18816, 18849), 'gin.configurable', 'gin.configurable', (['"""conv_decoder3"""'], {}), "('conv_decoder3')\n", (18832, 18849), False, 'import gin\n'), ((20581, 20614), 'gin.configurable', 'gin.configurable', (['"""conv_encoder4"""'], {}), "('conv_encoder4')\n", (20597, 20614), False, 'import gin\n'), ((21207, 21240), 'gin.configurable', 'gin.configurable', (['"""conv_decoder4"""'], {}), "('conv_decoder4')\n", (21223, 21240), False, 'import gin\n'), ((22219, 22252), 'gin.configurable', 'gin.configurable', (['"""dense_encoder"""'], {}), "('dense_encoder')\n", (22235, 22252), False, 'import gin\n'), ((22770, 22803), 'gin.configurable', 'gin.configurable', (['"""dense_decoder"""'], {}), "('dense_decoder')\n", (22786, 22803), False, 'import gin\n'), ((25066, 25103), 'gin.configurable', 'gin.configurable', (['"""dense_recognition"""'], {}), "('dense_recognition')\n", (25082, 25103), False, 'import gin\n'), ((25991, 26035), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_affine"""'], {}), "('dense_recognition_affine')\n", (26007, 26035), False, 'import gin\n'), ((26722, 26769), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_affine_lr"""'], {}), "('dense_recognition_affine_lr')\n", (26738, 26769), False, 'import gin\n'), ((27527, 27569), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_rnvp"""'], {}), "('dense_recognition_rnvp')\n", (27543, 27569), False, 'import gin\n'), ((29626, 29667), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_iaf"""'], {}), "('dense_recognition_iaf')\n", (29642, 29667), False, 'import gin\n'), ((33226, 33254), 'gin.configurable', 'gin.configurable', (['"""conv_iaf"""'], {}), "('conv_iaf')\n", (33242, 33254), False, 'import gin\n'), ((35385, 35421), 'gin.configurable', 'gin.configurable', (['"""conv_shift_scale"""'], {}), "('conv_shift_scale')\n", (35401, 35421), False, 'import gin\n'), ((37643, 37676), 'gin.configurable', 'gin.configurable', (['"""cifar10_noise"""'], {}), "('cifar10_noise')\n", (37659, 37676), False, 'import gin\n'), ((38429, 38462), 'gin.configurable', 'gin.configurable', (['"""learning_rate"""'], {}), "('learning_rate')\n", (38445, 38462), False, 'import gin\n'), ((39427, 39553), 'collections.namedtuple', 'collections.namedtuple', (['"""VAEOutputs"""', '"""log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z"""'], {}), "('VAEOutputs',\n 'log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z'\n )\n", (39449, 39553), False, 'import collections\n'), ((39565, 39634), 'collections.namedtuple', 'collections.namedtuple', (['"""AISOutputs"""', '"""log_p, p_accept, z_fin, recon"""'], {}), "('AISOutputs', 'log_p, p_accept, z_fin, recon')\n", (39587, 39634), False, 'import collections\n'), ((40592, 40730), 'collections.namedtuple', 'collections.namedtuple', (['"""DLGMOutputs"""', '"""elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq"""'], {}), "('DLGMOutputs',\n 'elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq'\n )\n", (40614, 40730), False, 'import collections\n'), ((40735, 40759), 'gin.configurable', 'gin.configurable', (['"""dlgm"""'], {}), "('dlgm')\n", (40751, 40759), False, 'import gin\n'), ((54623, 54646), 'gin.configurable', 'gin.configurable', (['"""vae"""'], {}), "('vae')\n", (54639, 54646), False, 'import gin\n'), ((61917, 61942), 'gin.configurable', 'gin.configurable', (['"""train"""'], {}), "('train')\n", (61933, 61942), False, 'import gin\n'), ((1586, 1610), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (1608, 1610), False, 'from neutra import utils\n'), ((1644, 1666), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (1664, 1666), True, 'import tensorflow as tf\n'), ((2668, 2720), 'numpy.ones', 'np.ones', (['[num_inputs, num_outputs]'], {'dtype': 'np.float32'}), '([num_inputs, num_outputs], dtype=np.float32)\n', (2675, 2720), True, 'import numpy as np\n'), ((3247, 3305), 'numpy.ones', 'np.ones', (['[h, w, num_inputs, num_filters]'], {'dtype': 'np.float32'}), '([h, w, num_inputs, num_filters], dtype=np.float32)\n', (3254, 3305), True, 'import numpy as np\n'), ((3673, 3695), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3693, 3695), True, 'import tensorflow as tf\n'), ((3874, 3990), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[kernel_size[0], kernel_size[1], num_inputs, num_filters]'], {'initializer': 'weights_initializer'}), "('w', [kernel_size[0], kernel_size[1], num_inputs,\n num_filters], initializer=weights_initializer)\n", (3889, 3990), True, 'import tensorflow as tf\n'), ((3993, 4060), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[num_filters]'], {'initializer': 'biases_initializer'}), "('b', [num_filters], initializer=biases_initializer)\n", (4008, 4060), True, 'import tensorflow as tf\n'), ((4173, 4182), 'tensorflow.exp', 'tf.exp', (['g'], {}), '(g)\n', (4179, 4182), True, 'import tensorflow as tf\n'), ((4278, 4323), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'w', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(inputs, w, [1, 1, 1, 1], 'SAME')\n", (4290, 4323), True, 'import tensorflow as tf\n'), ((5576, 5600), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (5598, 5600), False, 'from neutra import utils\n'), ((5633, 5655), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (5653, 5655), True, 'import tensorflow as tf\n'), ((7775, 7787), 'six.moves.range', 'range', (['depth'], {}), '(depth)\n', (7780, 7787), False, 'from six.moves import range\n'), ((9251, 9279), 'tensorflow.reshape', 'tf.reshape', (['h', '[1, 1, 1, -1]'], {}), '(h, [1, 1, 1, -1])\n', (9261, 9279), True, 'import tensorflow as tf\n'), ((9331, 9375), 'tensorflow.tile', 'tf.tile', (['h', '[batch, top_width, top_width, 1]'], {}), '(h, [batch, top_width, top_width, 1])\n', (9338, 9375), True, 'import tensorflow as tf\n'), ((12874, 12886), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (12883, 12886), True, 'import tensorflow as tf\n'), ((12987, 12999), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (12996, 12999), True, 'import tensorflow as tf\n'), ((13100, 13112), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (13109, 13112), True, 'import tensorflow as tf\n'), ((13213, 13225), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (13222, 13225), True, 'import tensorflow as tf\n'), ((13326, 13338), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (13335, 13338), True, 'import tensorflow as tf\n'), ((14467, 14510), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[8, 8]'], {}), '(x, [8, 8])\n', (14499, 14510), True, 'import tensorflow as tf\n'), ((14611, 14623), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (14620, 14623), True, 'import tensorflow as tf\n'), ((14724, 14736), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (14733, 14736), True, 'import tensorflow as tf\n'), ((14812, 14897), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (14844, 14897), True, 'import tensorflow as tf\n'), ((14994, 15006), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (15003, 15006), True, 'import tensorflow as tf\n'), ((15107, 15119), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (15116, 15119), True, 'import tensorflow as tf\n'), ((15126, 15197), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (15158, 15197), True, 'import tensorflow as tf\n'), ((15367, 15401), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (15377, 15401), True, 'import tensorflow as tf\n'), ((16533, 16576), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[8, 8]'], {}), '(x, [8, 8])\n', (16565, 16576), True, 'import tensorflow as tf\n'), ((16884, 16969), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (16916, 16969), True, 'import tensorflow as tf\n'), ((17204, 17275), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (17236, 17275), True, 'import tensorflow as tf\n'), ((17470, 17504), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (17480, 17504), True, 'import tensorflow as tf\n'), ((17802, 17814), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (17811, 17814), True, 'import tensorflow as tf\n'), ((17915, 17927), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (17924, 17927), True, 'import tensorflow as tf\n'), ((18028, 18040), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18037, 18040), True, 'import tensorflow as tf\n'), ((18142, 18154), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18151, 18154), True, 'import tensorflow as tf\n'), ((18255, 18267), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18264, 18267), True, 'import tensorflow as tf\n'), ((18368, 18380), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18377, 18380), True, 'import tensorflow as tf\n'), ((18482, 18494), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18491, 18494), True, 'import tensorflow as tf\n'), ((18595, 18607), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18604, 18607), True, 'import tensorflow as tf\n'), ((18708, 18720), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18717, 18720), True, 'import tensorflow as tf\n'), ((19148, 19160), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19157, 19160), True, 'import tensorflow as tf\n'), ((19261, 19273), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19270, 19273), True, 'import tensorflow as tf\n'), ((19374, 19386), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19383, 19386), True, 'import tensorflow as tf\n'), ((19487, 19499), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19496, 19499), True, 'import tensorflow as tf\n'), ((19506, 19591), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (19538, 19591), True, 'import tensorflow as tf\n'), ((19688, 19700), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19697, 19700), True, 'import tensorflow as tf\n'), ((19802, 19814), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19811, 19814), True, 'import tensorflow as tf\n'), ((19915, 19927), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19924, 19927), True, 'import tensorflow as tf\n'), ((19934, 20005), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (19966, 20005), True, 'import tensorflow as tf\n'), ((20106, 20118), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20115, 20118), True, 'import tensorflow as tf\n'), ((20220, 20232), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20229, 20232), True, 'import tensorflow as tf\n'), ((20334, 20346), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20343, 20346), True, 'import tensorflow as tf\n'), ((20543, 20577), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (20553, 20577), True, 'import tensorflow as tf\n'), ((20870, 20882), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20879, 20882), True, 'import tensorflow as tf\n'), ((20983, 20995), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20992, 20995), True, 'import tensorflow as tf\n'), ((21599, 21684), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (21631, 21684), True, 'import tensorflow as tf\n'), ((21781, 21793), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (21790, 21793), True, 'import tensorflow as tf\n'), ((21800, 21871), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (21832, 21871), True, 'import tensorflow as tf\n'), ((21972, 21984), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (21981, 21984), True, 'import tensorflow as tf\n'), ((22181, 22215), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (22191, 22215), True, 'import tensorflow as tf\n'), ((22428, 22453), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['images'], {}), '(images)\n', (22445, 22453), True, 'import tensorflow as tf\n'), ((23051, 23078), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['encoding'], {}), '(encoding)\n', (23068, 23078), True, 'import tensorflow as tf\n'), ((23239, 23260), 'numpy.prod', 'np.prod', (['output_shape'], {}), '(output_shape)\n', (23246, 23260), True, 'import numpy as np\n'), ((25855, 25906), 'tensorflow.logging.info', 'tf.logging.info', (['"""bijector z shape: %s"""', 'z[0].shape'], {}), "('bijector z shape: %s', z[0].shape)\n", (25870, 25906), True, 'import tensorflow as tf\n'), ((26360, 26395), 'tensorflow.linalg.set_diag', 'tf.linalg.set_diag', (['tril_raw', 'sigma'], {}), '(tril_raw, sigma)\n', (26378, 26395), True, 'import tensorflow as tf\n'), ((27073, 27112), 'tensorflow.reshape', 'tf.reshape', (['perturb', '[-1, z_dims, rank]'], {}), '(perturb, [-1, z_dims, rank])\n', (27083, 27112), True, 'import tensorflow as tf\n'), ((27124, 27145), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['sigma'], {}), '(sigma)\n', (27138, 27145), True, 'import tensorflow as tf\n'), ((28243, 28263), 'six.moves.range', 'range', (['num_bijectors'], {}), '(num_bijectors)\n', (28248, 28263), False, 'from six.moves import range\n'), ((30330, 30351), 'six.moves.range', 'range', (['num_iaf_layers'], {}), '(num_iaf_layers)\n', (30335, 30351), False, 'from six.moves import range\n'), ((33996, 34017), 'six.moves.range', 'range', (['num_iaf_layers'], {}), '(num_iaf_layers)\n', (34001, 34017), False, 'from six.moves import range\n'), ((39997, 40022), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_p_x_z'], {}), '(log_p_x_z)\n', (40011, 40022), True, 'import tensorflow as tf\n'), ((40203, 40217), 'tensorflow.add_n', 'tf.add_n', (['klqp'], {}), '(klqp)\n', (40211, 40217), True, 'import tensorflow as tf\n'), ((62370, 62395), 'neutra.utils.LogAndSaveHParams', 'utils.LogAndSaveHParams', ([], {}), '()\n', (62393, 62395), False, 'from neutra import utils\n'), ((62399, 62423), 'tensorflow.Session.reset', 'tf.Session.reset', (['master'], {}), '(master)\n', (62415, 62423), True, 'import tensorflow as tf\n'), ((62713, 62850), 'tensorflow.contrib.training.train', 'tf.contrib.training.train', (['train_op'], {'logdir': 'train_dir', 'master': 'master', 'hooks': 'hooks', 'save_checkpoint_secs': '(120)', 'save_summaries_steps': '(60)'}), '(train_op, logdir=train_dir, master=master, hooks=\n hooks, save_checkpoint_secs=120, save_summaries_steps=60)\n', (62738, 62850), True, 'import tensorflow as tf\n'), ((63106, 63131), 'neutra.utils.LogAndSaveHParams', 'utils.LogAndSaveHParams', ([], {}), '()\n', (63129, 63131), False, 'from neutra import utils\n'), ((63134, 63170), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (63168, 63170), True, 'import tensorflow as tf\n'), ((63407, 63437), 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {'saver': 'saver'}), '(saver=saver)\n', (63424, 63437), True, 'import tensorflow as tf\n'), ((63441, 63465), 'tensorflow.Session.reset', 'tf.Session.reset', (['master'], {}), '(master)\n', (63457, 63465), True, 'import tensorflow as tf\n'), ((63718, 63920), 'tensorflow.contrib.training.evaluate_repeatedly', 'tf.contrib.training.evaluate_repeatedly', (['train_dir'], {'eval_ops': 'eval_op', 'hooks': 'hooks', 'eval_interval_secs': '(120)', 'max_number_of_evaluations': 'max_number_of_evaluations', 'master': 'master', 'scaffold': 'scaffold'}), '(train_dir, eval_ops=eval_op, hooks=\n hooks, eval_interval_secs=120, max_number_of_evaluations=\n max_number_of_evaluations, master=master, scaffold=scaffold)\n', (63757, 63920), True, 'import tensorflow as tf\n'), ((64098, 64131), 'tensorflow.logging.info', 'tf.logging.info', (['"""Thread started"""'], {}), "('Thread started')\n", (64113, 64131), True, 'import tensorflow as tf\n'), ((64155, 64185), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built model"""'], {}), "('Built model')\n", (64170, 64185), True, 'import tensorflow as tf\n'), ((64201, 64229), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[]'], {}), '(tf.int64, [])\n', (64215, 64229), True, 'import tensorflow as tf\n'), ((64232, 64261), 'tensorflow.logging.info', 'tf.logging.info', (['"""built data"""'], {}), "('built data')\n", (64247, 64261), True, 'import tensorflow as tf\n'), ((64372, 64399), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built mA"""'], {}), "('Built mA')\n", (64387, 64399), True, 'import tensorflow as tf\n'), ((64510, 64537), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built mB"""'], {}), "('Built mB')\n", (64525, 64537), True, 'import tensorflow as tf\n'), ((64601, 64623), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (64621, 64623), True, 'import tensorflow as tf\n'), ((64640, 64676), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (64674, 64676), True, 'import tensorflow as tf\n'), ((64901, 64928), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built mC"""'], {}), "('Built mC')\n", (64916, 64928), True, 'import tensorflow as tf\n'), ((64955, 65010), 'tensorflow.logging.info', 'tf.logging.info', (['"""Starting shard %d, %s"""', 'shard', 'master'], {}), "('Starting shard %d, %s', shard, master)\n", (64970, 65010), True, 'import tensorflow as tf\n'), ((66636, 66660), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (66658, 66660), True, 'import tensorflow as tf\n'), ((66674, 66708), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (66688, 66708), True, 'import tensorflow as tf\n'), ((66792, 66823), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['eval_dir'], {}), '(eval_dir)\n', (66813, 66823), True, 'import tensorflow as tf\n'), ((67427, 67445), 'numpy.mean', 'np.mean', (['all_log_p'], {}), '(all_log_p)\n', (67434, 67445), True, 'import numpy as np\n'), ((67448, 67485), 'tensorflow.logging.info', 'tf.logging.info', (['"""Log P: %.2f"""', 'log_p'], {}), "('Log P: %.2f', log_p)\n", (67463, 67485), True, 'import tensorflow as tf\n'), ((67719, 67751), 'neutra.utils.BindHParams', 'utils.BindHParams', (['FLAGS.hparams'], {}), '(FLAGS.hparams)\n', (67736, 67751), False, 'from neutra import utils\n'), ((70614, 70659), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""mnist_data_dir"""', '""""""', '""""""'], {}), "('mnist_data_dir', '', '')\n", (70633, 70659), False, 'from absl import flags\n'), ((70662, 70715), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""fashion_mnist_data_dir"""', '""""""', '""""""'], {}), "('fashion_mnist_data_dir', '', '')\n", (70681, 70715), False, 'from absl import flags\n'), ((70718, 70765), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""cifar10_data_dir"""', '""""""', '""""""'], {}), "('cifar10_data_dir', '', '')\n", (70737, 70765), False, 'from absl import flags\n'), ((70768, 70813), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_type"""', '"""mnist"""', '""""""'], {}), "('data_type', 'mnist', '')\n", (70787, 70813), False, 'from absl import flags\n'), ((70816, 70902), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""mode"""', '"""train"""', "['train', 'eval', 'ais_eval', 'ais_eval2']", '""""""'], {}), "('mode', 'train', ['train', 'eval', 'ais_eval',\n 'ais_eval2'], '')\n", (70833, 70902), False, 'from absl import flags\n'), ((70970, 71024), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""train_dir"""', '"""/tmp/vae/train"""', '""""""'], {}), "('train_dir', '/tmp/vae/train', '')\n", (70989, 71024), False, 'from absl import flags\n'), ((71027, 71079), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""eval_dir"""', '"""/tmp/vae/eval"""', '""""""'], {}), "('eval_dir', '/tmp/vae/eval', '')\n", (71046, 71079), False, 'from absl import flags\n'), ((71082, 71119), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""master"""', '""""""', '""""""'], {}), "('master', '', '')\n", (71101, 71119), False, 'from absl import flags\n'), ((71122, 71171), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ais_worker_pattern"""', '""""""', '""""""'], {}), "('ais_worker_pattern', '', '')\n", (71141, 71171), False, 'from absl import flags\n'), ((71174, 71214), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_shard"""', '(0)', '""""""'], {}), "('ais_shard', 0, '')\n", (71194, 71214), False, 'from absl import flags\n'), ((71217, 71263), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_num_workers"""', '(1)', '""""""'], {}), "('ais_num_workers', 1, '')\n", (71237, 71263), False, 'from absl import flags\n'), ((71266, 71311), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_num_chains"""', '(1)', '""""""'], {}), "('ais_num_chains', 1, '')\n", (71286, 71311), False, 'from absl import flags\n'), ((71314, 71361), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_num_replicas"""', '(1)', '""""""'], {}), "('ais_num_replicas', 1, '')\n", (71334, 71361), False, 'from absl import flags\n'), ((71364, 71431), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""ais_replicas"""', '""""""', '"""Manual listing of replicas"""'], {}), "('ais_replicas', '', 'Manual listing of replicas')\n", (71381, 71431), False, 'from absl import flags\n'), ((71434, 71480), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_batch_size"""', '(25)', '""""""'], {}), "('ais_batch_size', 25, '')\n", (71454, 71480), False, 'from absl import flags\n'), ((71483, 71530), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""polyak_averaging"""', '(0.0)', '""""""'], {}), "('polyak_averaging', 0.0, '')\n", (71501, 71530), False, 'from absl import flags\n'), ((71533, 71581), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""test_is_valid"""', '(False)', '""""""'], {}), "('test_is_valid', False, '')\n", (71553, 71581), False, 'from absl import flags\n'), ((71643, 71656), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (71650, 71656), False, 'from absl import app\n'), ((1792, 1830), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope', '"""conv_2d_wn"""'], {}), "(scope, 'conv_2d_wn')\n", (1809, 1830), True, 'import tensorflow as tf\n'), ((1840, 1956), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[kernel_size[0], kernel_size[1], num_inputs, num_filters]'], {'initializer': 'weights_initializer'}), "('w', [kernel_size[0], kernel_size[1], num_inputs,\n num_filters], initializer=weights_initializer)\n", (1855, 1956), True, 'import tensorflow as tf\n'), ((2198, 2207), 'tensorflow.exp', 'tf.exp', (['g'], {}), '(g)\n', (2204, 2207), True, 'import tensorflow as tf\n'), ((2299, 2357), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'w', '[1, stride[0], stride[1], 1]', 'pad'], {}), '(inputs, w, [1, stride[0], stride[1], 1], pad)\n', (2311, 2357), True, 'import tensorflow as tf\n'), ((2800, 2817), 'six.moves.range', 'range', (['num_inputs'], {}), '(num_inputs)\n', (2805, 2817), False, 'from six.moves import range\n'), ((2985, 3003), 'six.moves.range', 'range', (['num_outputs'], {}), '(num_outputs)\n', (2990, 3003), False, 'from six.moves import range\n'), ((4189, 4226), 'tensorflow.reshape', 'tf.reshape', (['g', '[1, 1, 1, num_filters]'], {}), '(g, [1, 1, 1, num_filters])\n', (4199, 4226), True, 'import tensorflow as tf\n'), ((4229, 4268), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['(w * mask)', '[0, 1, 2]'], {}), '(w * mask, [0, 1, 2])\n', (4247, 4268), True, 'import tensorflow as tf\n'), ((4339, 4376), 'tensorflow.reshape', 'tf.reshape', (['b', '[1, 1, 1, num_filters]'], {}), '(b, [1, 1, 1, num_filters])\n', (4349, 4376), True, 'import tensorflow as tf\n'), ((5018, 5030), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (5027, 5030), True, 'import tensorflow as tf\n'), ((5786, 5822), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope', '"""dense_wn"""'], {}), "(scope, 'dense_wn')\n", (5803, 5822), True, 'import tensorflow as tf\n'), ((5832, 5917), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[num_inputs, num_outputs]'], {'initializer': 'weights_initializer'}), "('w', [num_inputs, num_outputs], initializer=weights_initializer\n )\n", (5847, 5917), True, 'import tensorflow as tf\n'), ((6144, 6153), 'tensorflow.exp', 'tf.exp', (['g'], {}), '(g)\n', (6150, 6153), True, 'import tensorflow as tf\n'), ((6203, 6223), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'w'], {}), '(inputs, w)\n', (6212, 6223), True, 'import tensorflow as tf\n'), ((7802, 7819), 'six.moves.range', 'range', (['num_blocks'], {}), '(num_blocks)\n', (7807, 7819), False, 'from six.moves import range\n'), ((13476, 13522), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, filter_scale * 32 * 4 * 4]'], {}), '(x, [-1, filter_scale * 32 * 4 * 4])\n', (13486, 13522), True, 'import tensorflow as tf\n'), ((14070, 14123), 'tensorflow.logging.info', 'tf.logging.info', (['"""Encoding shape: %s"""', 'encoding.shape'], {}), "('Encoding shape: %s', encoding.shape)\n", (14085, 14123), True, 'import tensorflow as tf\n'), ((14416, 14460), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 4, 4, filter_scale * 32]'], {}), '(x, [-1, 4, 4, filter_scale * 32])\n', (14426, 14460), True, 'import tensorflow as tf\n'), ((25365, 25404), 'tensorflow.reshape', 'tf.reshape', (['encoding', '[-1, num_dims, 2]'], {}), '(encoding, [-1, num_dims, 2])\n', (25375, 25404), True, 'import tensorflow as tf\n'), ((25492, 25523), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (25498, 25523), True, 'import tensorflow as tf\n'), ((26320, 26349), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['tril_raw'], {}), '(tril_raw)\n', (26339, 26349), True, 'import tensorflow as tf\n'), ((28002, 28049), 'tensorflow.reshape', 'tf.reshape', (['encoding', '[-1, num_dims, num_parts]'], {}), '(encoding, [-1, num_dims, num_parts])\n', (28012, 28049), True, 'import tensorflow as tf\n'), ((29139, 29170), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (29145, 29170), True, 'import tensorflow as tf\n'), ((30094, 30141), 'tensorflow.reshape', 'tf.reshape', (['encoding', '[-1, num_dims, num_parts]'], {}), '(encoding, [-1, num_dims, num_parts])\n', (30104, 30141), True, 'import tensorflow as tf\n'), ((31336, 31367), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (31342, 31367), True, 'import tensorflow as tf\n'), ((33085, 33127), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'y.dtype.base_dtype'}), '(0.0, dtype=y.dtype.base_dtype)\n', (33096, 33127), True, 'import tensorflow as tf\n'), ((33181, 33223), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'x.dtype.base_dtype'}), '(0.0, dtype=x.dtype.base_dtype)\n', (33192, 33223), True, 'import tensorflow as tf\n'), ((34864, 34895), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (34870, 34895), True, 'import tensorflow as tf\n'), ((36158, 36189), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (36164, 36189), True, 'import tensorflow as tf\n'), ((37904, 37945), 'tensorflow.get_variable', 'tf.get_variable', (['"""scale"""'], {'initializer': '(1.0)'}), "('scale', initializer=1.0)\n", (37919, 37945), True, 'import tensorflow as tf\n'), ((37959, 37987), 'tensorflow.reshape', 'tf.reshape', (['scale', '[1, 1, 1]'], {}), '(scale, [1, 1, 1])\n', (37969, 37987), True, 'import tensorflow as tf\n'), ((38583, 38679), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 500 // TRAIN_BATCH]', '[0.001, 0.0001]'], {}), '(global_step, [train_size * 500 // TRAIN_BATCH],\n [0.001, 0.0001])\n', (38610, 38679), True, 'import tensorflow as tf\n'), ((39850, 39879), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_q_z'], {}), '(layer_log_q_z)\n', (39864, 39879), True, 'import tensorflow as tf\n'), ((39923, 39952), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_p_z'], {}), '(layer_log_p_z)\n', (39937, 39952), True, 'import tensorflow as tf\n'), ((40133, 40163), 'tensorflow.maximum', 'tf.maximum', (['min_kl', 'layer_klqp'], {}), '(min_kl, layer_klqp)\n', (40143, 40163), True, 'import tensorflow as tf\n'), ((42213, 42232), 'tensorflow.to_float', 'tf.to_float', (['min_kl'], {}), '(min_kl)\n', (42224, 42232), True, 'import tensorflow as tf\n'), ((47810, 47835), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_p_x_z'], {}), '(log_p_x_z)\n', (47824, 47835), True, 'import tensorflow as tf\n'), ((48028, 48042), 'tensorflow.add_n', 'tf.add_n', (['klqp'], {}), '(klqp)\n', (48036, 48042), True, 'import tensorflow as tf\n'), ((48738, 48828), 'tensorflow_probability.mcmc.sample_chain', 'tfp.mcmc.sample_chain', ([], {'num_results': 'self._num_hmc_steps', 'current_state': 'z', 'kernel': 'kernel'}), '(num_results=self._num_hmc_steps, current_state=z,\n kernel=kernel)\n', (48759, 48828), True, 'import tensorflow_probability as tfp\n'), ((50011, 50047), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (50045, 50047), True, 'import tensorflow as tf\n'), ((50499, 50550), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (50521, 50550), True, 'import tensorflow as tf\n'), ((52815, 52890), 'tensorflow.group', 'tf.group', (['recog_train_op', 'gen_train_op', 'chain_state_update_op', 'step_size_op'], {}), '(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)\n', (52823, 52890), True, 'import tensorflow as tf\n'), ((53132, 53289), 'neutra.utils.LogAndSummarizeMetrics', 'utils.LogAndSummarizeMetrics', (["{'elbo': outputs.elbo, 'xentpq': outputs.xentpq, 'mcmc_log_p': outputs.\n mcmc_log_p, 'mcmc_p_accept': outputs.p_accept}"], {}), "({'elbo': outputs.elbo, 'xentpq': outputs.\n xentpq, 'mcmc_log_p': outputs.mcmc_log_p, 'mcmc_p_accept': outputs.\n p_accept})\n", (53160, 53289), False, 'from neutra import utils\n'), ((53714, 53752), 'tensorflow.tile', 'tf.tile', (['images', '[num_chains, 1, 1, 1]'], {}), '(images, [num_chains, 1, 1, 1])\n', (53721, 53752), True, 'import tensorflow as tf\n'), ((54020, 54092), 'neutra.utils.AIS', 'utils.AIS', (['ProposalLogProbFn', 'TargetLogProbFn', 'z_init'], {'bijector': 'bijector'}), '(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)\n', (54029, 54092), False, 'from neutra import utils\n'), ((55564, 55583), 'tensorflow.to_float', 'tf.to_float', (['min_kl'], {}), '(min_kl)\n', (55575, 55583), True, 'import tensorflow as tf\n'), ((59399, 59435), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (59433, 59435), True, 'import tensorflow as tf\n'), ((59509, 59560), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (59531, 59560), True, 'import tensorflow as tf\n'), ((59852, 59896), 'neutra.utils.LogAndSummarizeMetrics', 'utils.LogAndSummarizeMetrics', (['metrics', '(False)'], {}), '(metrics, False)\n', (59880, 59896), False, 'from neutra import utils\n'), ((59998, 60127), 'tensorflow.contrib.training.create_train_op', 'tf.contrib.training.create_train_op', (['(-outputs.elbo)', 'opt'], {'summarize_gradients': '(True)', 'transform_grads_fn': 'utils.ProcessGradients'}), '(-outputs.elbo, opt, summarize_gradients\n =True, transform_grads_fn=utils.ProcessGradients)\n', (60033, 60127), True, 'import tensorflow as tf\n'), ((60675, 60712), 'neutra.utils.LogAndSummarizeMetrics', 'utils.LogAndSummarizeMetrics', (['metrics'], {}), '(metrics)\n', (60703, 60712), False, 'from neutra import utils\n'), ((61140, 61178), 'tensorflow.tile', 'tf.tile', (['images', '[num_chains, 1, 1, 1]'], {}), '(images, [num_chains, 1, 1, 1])\n', (61147, 61178), True, 'import tensorflow as tf\n'), ((61333, 61386), 'neutra.utils.AIS', 'utils.AIS', (['ProposalLogProbFn', 'TargetLogProbFn', 'z_init'], {}), '(ProposalLogProbFn, TargetLogProbFn, z_init)\n', (61342, 61386), False, 'from neutra import utils\n'), ((62182, 62223), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using polyak averaging"""'], {}), "('Using polyak averaging')\n", (62197, 62223), True, 'import tensorflow as tf\n'), ((62234, 62291), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': 'polyak_averaging'}), '(decay=polyak_averaging)\n', (62267, 62291), True, 'import tensorflow as tf\n'), ((62450, 62501), 'tensorflow.init_from_checkpoint', 'tf.init_from_checkpoint', (['warmstart_ckpt', "{'/': '/'}"], {}), "(warmstart_ckpt, {'/': '/'})\n", (62473, 62501), True, 'import tensorflow as tf\n'), ((62521, 62598), 'tensorflow.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': '(dataset.train_size * epochs // TRAIN_BATCH)'}), '(last_step=dataset.train_size * epochs // TRAIN_BATCH)\n', (62544, 62598), True, 'import tensorflow as tf\n'), ((63203, 63244), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using polyak averaging"""'], {}), "('Using polyak averaging')\n", (63218, 63244), True, 'import tensorflow as tf\n'), ((63255, 63300), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.99)'}), '(decay=0.99)\n', (63288, 63300), True, 'import tensorflow as tf\n'), ((63376, 63392), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (63390, 63392), True, 'import tensorflow as tf\n'), ((63511, 63583), 'tensorflow.contrib.training.StopAfterNEvalsHook', 'tf.contrib.training.StopAfterNEvalsHook', (['(dataset.test_size // TEST_BATCH)'], {}), '(dataset.test_size // TEST_BATCH)\n', (63550, 63583), True, 'import tensorflow as tf\n'), ((63591, 63637), 'tensorflow.contrib.training.SummaryAtEndHook', 'tf.contrib.training.SummaryAtEndHook', (['eval_dir'], {}), '(eval_dir)\n', (63627, 63637), True, 'import tensorflow as tf\n'), ((64559, 64584), 'neutra.utils.LogAndSaveHParams', 'utils.LogAndSaveHParams', ([], {}), '()\n', (64582, 64584), False, 'from neutra import utils\n'), ((64708, 64749), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using polyak averaging"""'], {}), "('Using polyak averaging')\n", (64723, 64749), True, 'import tensorflow as tf\n'), ((64760, 64805), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.99)'}), '(decay=0.99)\n', (64793, 64805), True, 'import tensorflow as tf\n'), ((64881, 64897), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (64895, 64897), True, 'import tensorflow as tf\n'), ((66754, 66778), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_p_ph'], {}), '(log_p_ph)\n', (66768, 66778), True, 'import tensorflow as tf\n'), ((66832, 66883), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'num_workers'}), '(max_workers=num_workers)\n', (66858, 66883), False, 'from concurrent import futures\n'), ((66931, 66949), 'six.moves.range', 'range', (['num_workers'], {}), '(num_workers)\n', (66936, 66949), False, 'from six.moves import range\n'), ((67297, 67310), 'numpy.zeros', 'np.zeros', (['[0]'], {}), '([0])\n', (67305, 67310), True, 'import numpy as np\n'), ((67493, 67505), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (67503, 67505), True, 'import tensorflow as tf\n'), ((67800, 67861), 'neutra.utils.MNISTDataset', 'utils.MNISTDataset', (['FLAGS.mnist_data_dir', 'FLAGS.test_is_valid'], {}), '(FLAGS.mnist_data_dir, FLAGS.test_is_valid)\n', (67818, 67861), False, 'from neutra import utils\n'), ((71597, 71619), 'neutra.utils.YAMLDictParser', 'utils.YAMLDictParser', ([], {}), '()\n', (71617, 71619), False, 'from neutra import utils\n'), ((1340, 1357), 'tensorflow.square', 'tf.square', (['tensor'], {}), '(tensor)\n', (1349, 1357), True, 'import tensorflow as tf\n'), ((2019, 2086), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[num_filters]'], {'initializer': 'biases_initializer'}), "('b', [num_filters], initializer=biases_initializer)\n", (2034, 2086), True, 'import tensorflow as tf\n'), ((2216, 2253), 'tensorflow.reshape', 'tf.reshape', (['g', '[1, 1, 1, num_filters]'], {}), '(g, [1, 1, 1, num_filters])\n', (2226, 2253), True, 'import tensorflow as tf\n'), ((2256, 2288), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['w', '[0, 1, 2]'], {}), '(w, [0, 1, 2])\n', (2274, 2288), True, 'import tensorflow as tf\n'), ((2411, 2448), 'tensorflow.reshape', 'tf.reshape', (['b', '[1, 1, 1, num_filters]'], {}), '(b, [1, 1, 1, num_filters])\n', (2421, 2448), True, 'import tensorflow as tf\n'), ((5971, 6038), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[num_outputs]'], {'initializer': 'biases_initializer'}), "('b', [num_outputs], initializer=biases_initializer)\n", (5986, 6038), True, 'import tensorflow as tf\n'), ((6166, 6192), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['w', '[0]'], {}), '(w, [0])\n', (6184, 6192), True, 'import tensorflow as tf\n'), ((6277, 6297), 'tensorflow.expand_dims', 'tf.expand_dims', (['b', '(0)'], {}), '(b, 0)\n', (6291, 6297), True, 'import tensorflow as tf\n'), ((7949, 7961), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (7958, 7961), True, 'import tensorflow as tf\n'), ((8123, 8172), 'tensorflow.split', 'tf.split', (['h', '[z_dims, z_dims, h_dims, h_dims]', '(-1)'], {}), '(h, [z_dims, z_dims, h_dims, h_dims], -1)\n', (8131, 8172), True, 'import tensorflow as tf\n'), ((8274, 8286), 'tensorflow.nn.elu', 'tf.nn.elu', (['h'], {}), '(h)\n', (8283, 8286), True, 'import tensorflow as tf\n'), ((9221, 9243), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (9241, 9243), True, 'import tensorflow as tf\n'), ((9441, 9453), 'six.moves.range', 'range', (['depth'], {}), '(depth)\n', (9446, 9453), False, 'from six.moves import range\n'), ((9552, 9564), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (9561, 9564), True, 'import tensorflow as tf\n'), ((9800, 9851), 'tensorflow.split', 'tf.split', (['h_p', '[z_dims, z_dims, h_dims, h_dims]', '(-1)'], {}), '(h_p, [z_dims, z_dims, h_dims, h_dims], -1)\n', (9808, 9851), True, 'import tensorflow as tf\n'), ((12011, 12040), 'tensorflow.concat', 'tf.concat', (['[z_val, h_det]', '(-1)'], {}), '([z_val, h_det], -1)\n', (12020, 12040), True, 'import tensorflow as tf\n'), ((21182, 21202), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x'], {}), '(x)\n', (21199, 21202), True, 'import tensorflow as tf\n'), ((22741, 22765), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (22763, 22765), False, 'from neutra import utils\n'), ((25575, 25608), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (25589, 25608), True, 'import tensorflow as tf\n'), ((25709, 25726), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (25722, 25726), True, 'import tensorflow as tf\n'), ((25739, 25758), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (25751, 25758), True, 'import tensorflow as tf\n'), ((26494, 26511), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (26507, 26511), True, 'import tensorflow as tf\n'), ((26524, 26543), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (26536, 26543), True, 'import tensorflow as tf\n'), ((27299, 27316), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (27312, 27316), True, 'import tensorflow as tf\n'), ((27329, 27348), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (27341, 27348), True, 'import tensorflow as tf\n'), ((28181, 28212), 'numpy.arange', 'np.arange', (['(num_dims - 1)', '(-1)', '(-1)'], {}), '(num_dims - 1, -1, -1)\n', (28190, 28212), True, 'import numpy as np\n'), ((29222, 29255), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (29236, 29255), True, 'import tensorflow as tf\n'), ((29397, 29414), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (29410, 29414), True, 'import tensorflow as tf\n'), ((29427, 29446), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (29439, 29446), True, 'import tensorflow as tf\n'), ((30268, 30299), 'numpy.arange', 'np.arange', (['(num_dims - 1)', '(-1)', '(-1)'], {}), '(num_dims - 1, -1, -1)\n', (30277, 30299), True, 'import numpy as np\n'), ((31419, 31452), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (31433, 31452), True, 'import tensorflow as tf\n'), ((31594, 31611), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (31607, 31611), True, 'import tensorflow as tf\n'), ((31624, 31643), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (31636, 31643), True, 'import tensorflow as tf\n'), ((32734, 32758), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['x'], {}), '(x)\n', (32755, 32758), True, 'import tensorflow as tf\n'), ((32819, 32846), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['y'], {}), '(y)\n', (32843, 32846), True, 'import tensorflow as tf\n'), ((34947, 34980), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (34961, 34980), True, 'import tensorflow as tf\n'), ((36241, 36274), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (36255, 36274), True, 'import tensorflow as tf\n'), ((36793, 36811), 'tensorflow.zeros', 'tf.zeros', (['num_dims'], {}), '(num_dims)\n', (36801, 36811), True, 'import tensorflow as tf\n'), ((36824, 36841), 'tensorflow.ones', 'tf.ones', (['num_dims'], {}), '(num_dims)\n', (36831, 36841), True, 'import tensorflow as tf\n'), ((38183, 38205), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['scales'], {}), '(scales)\n', (38197, 38205), True, 'import tensorflow as tf\n'), ((38719, 38856), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH]', '[0.001, 0.0001, 1e-05]'], {}), '(global_step, [train_size * 500 // TRAIN_BATCH, \n train_size * 800 // TRAIN_BATCH], [0.001, 0.0001, 1e-05])\n', (38746, 38856), True, 'import tensorflow as tf\n'), ((40100, 40121), 'six.moves.zip', 'zip', (['log_q_z', 'log_p_z'], {}), '(log_q_z, log_p_z)\n', (40103, 40121), False, 'from six.moves import zip\n'), ((42094, 42115), 'tensorflow.minimum', 'tf.minimum', (['frac', '(1.0)'], {}), '(frac, 1.0)\n', (42104, 42115), True, 'import tensorflow as tf\n'), ((42176, 42193), 'tensorflow.constant', 'tf.constant', (['beta'], {}), '(beta)\n', (42187, 42193), True, 'import tensorflow as tf\n'), ((42844, 42895), 'tensorflow.get_variable', 'tf.get_variable', (['"""step_size"""'], {'initializer': 'step_size'}), "('step_size', initializer=step_size)\n", (42859, 42895), True, 'import tensorflow as tf\n'), ((42930, 42952), 'tensorflow.constant', 'tf.constant', (['step_size'], {}), '(step_size)\n', (42941, 42952), True, 'import tensorflow as tf\n'), ((43011, 43096), 'tensorflow.get_variable', 'tf.get_variable', (['"""train_chain_state"""', '[self.train_size, z_dims]'], {'trainable': '(False)'}), "('train_chain_state', [self.train_size, z_dims], trainable=False\n )\n", (43026, 43096), True, 'import tensorflow as tf\n'), ((46593, 46629), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (46627, 46629), True, 'import tensorflow as tf\n'), ((47659, 47688), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_q_z'], {}), '(layer_log_q_z)\n', (47673, 47688), True, 'import tensorflow as tf\n'), ((47734, 47763), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_p_z'], {}), '(layer_log_p_z)\n', (47748, 47763), True, 'import tensorflow as tf\n'), ((47950, 47986), 'tensorflow.maximum', 'tf.maximum', (['self._min_kl', 'layer_klqp'], {}), '(self._min_kl, layer_klqp)\n', (47960, 47986), True, 'import tensorflow as tf\n'), ((48147, 48161), 'six.moves.zip', 'zip', (['post_z', 'z'], {}), '(post_z, z)\n', (48150, 48161), False, 'from six.moves import zip\n'), ((48620, 48696), 'tensorflow_probability.mcmc.TransformedTransitionKernel', 'tfp.mcmc.TransformedTransitionKernel', ([], {'inner_kernel': 'kernel', 'bijector': 'bijector'}), '(inner_kernel=kernel, bijector=bijector)\n', (48656, 48696), True, 'import tensorflow_probability as tfp\n'), ((48843, 48876), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['s[-1, Ellipsis]'], {}), '(s[-1, Ellipsis])\n', (48859, 48876), True, 'import tensorflow as tf\n'), ((50164, 50202), 'tensorflow.gather', 'tf.gather', (['self._chain_state', 'data_idx'], {}), '(self._chain_state, data_idx)\n', (50173, 50202), True, 'import tensorflow as tf\n'), ((50916, 50956), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.sample_means'], {}), '(outputs.sample_means)\n', (50934, 50956), False, 'from neutra import utils\n'), ((51229, 51239), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (51237, 51239), True, 'import tensorflow as tf\n'), ((51383, 51426), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_step_size', '(0.001)', '(0.5)'], {}), '(new_step_size, 0.001, 0.5)\n', (51399, 51426), True, 'import tensorflow as tf\n'), ((51611, 51621), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (51619, 51621), True, 'import tensorflow as tf\n'), ((51632, 51660), 'tensorflow.name_scope', 'tf.name_scope', (['"""recog_train"""'], {}), "('recog_train')\n", (51645, 51660), True, 'import tensorflow as tf\n'), ((52389, 52415), 'tensorflow.name_scope', 'tf.name_scope', (['"""gen_train"""'], {}), "('gen_train')\n", (52402, 52415), True, 'import tensorflow as tf\n'), ((52434, 52539), 'tensorflow.cond', 'tf.cond', (['(global_step < self._no_gen_train_steps)', '(lambda : -outputs.elbo)', '(lambda : -outputs.mcmc_log_p)'], {}), '(global_step < self._no_gen_train_steps, lambda : -outputs.elbo, lambda\n : -outputs.mcmc_log_p)\n', (52441, 52539), True, 'import tensorflow as tf\n'), ((52996, 53027), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (53014, 53027), False, 'from neutra import utils\n'), ((53074, 53118), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.recon_means[:64]'], {}), '(outputs.recon_means[:64])\n', (53092, 53118), False, 'from neutra import utils\n'), ((53531, 53548), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (53539, 53548), True, 'import tensorflow as tf\n'), ((54177, 54208), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (54195, 54208), False, 'from neutra import utils\n'), ((54246, 54277), 'neutra.utils.StitchImages', 'utils.StitchImages', (['recons[:64]'], {}), '(recons[:64])\n', (54264, 54277), False, 'from neutra import utils\n'), ((54313, 54349), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ais_outputs.p_accept'], {}), '(ais_outputs.p_accept)\n', (54327, 54349), True, 'import tensorflow as tf\n'), ((55445, 55466), 'tensorflow.minimum', 'tf.minimum', (['frac', '(1.0)'], {}), '(frac, 1.0)\n', (55455, 55466), True, 'import tensorflow as tf\n'), ((55527, 55544), 'tensorflow.constant', 'tf.constant', (['beta'], {}), '(beta)\n', (55538, 55544), True, 'import tensorflow as tf\n'), ((59944, 59984), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.sample_means'], {}), '(outputs.sample_means)\n', (59962, 59984), False, 'from neutra import utils\n'), ((60361, 60392), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (60379, 60392), False, 'from neutra import utils\n'), ((60439, 60483), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.recon_means[:64]'], {}), '(outputs.recon_means[:64])\n', (60457, 60483), False, 'from neutra import utils\n'), ((60957, 60974), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (60965, 60974), True, 'import tensorflow as tf\n'), ((61471, 61502), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (61489, 61502), False, 'from neutra import utils\n'), ((61540, 61571), 'neutra.utils.StitchImages', 'utils.StitchImages', (['recons[:64]'], {}), '(recons[:64])\n', (61558, 61571), False, 'from neutra import utils\n'), ((61607, 61643), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ais_outputs.p_accept'], {}), '(ais_outputs.p_accept)\n', (61621, 61643), True, 'import tensorflow as tf\n'), ((62301, 62336), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[train_op]'], {}), '([train_op])\n', (62324, 62336), True, 'import tensorflow as tf\n'), ((62663, 62688), 'neutra.utils.GetLoggingOutputs', 'utils.GetLoggingOutputs', ([], {}), '()\n', (62686, 62688), False, 'from neutra import utils\n'), ((63672, 63697), 'neutra.utils.GetLoggingOutputs', 'utils.GetLoggingOutputs', ([], {}), '()\n', (63695, 63697), False, 'from neutra import utils\n'), ((65179, 65203), 'tensorflow.Session.reset', 'tf.Session.reset', (['master'], {}), '(master)\n', (65195, 65203), True, 'import tensorflow as tf\n'), ((66957, 67002), 'tensorflow.logging.info', 'tf.logging.info', (['"""Submitting shard %d"""', 'shard'], {}), "('Submitting shard %d', shard)\n", (66972, 67002), True, 'import tensorflow as tf\n'), ((67386, 67415), 'numpy.hstack', 'np.hstack', (['[all_log_p, log_p]'], {}), '([all_log_p, log_p])\n', (67395, 67415), True, 'import numpy as np\n'), ((67919, 67988), 'neutra.utils.MNISTDataset', 'utils.MNISTDataset', (['FLAGS.fashion_mnist_data_dir', 'FLAGS.test_is_valid'], {}), '(FLAGS.fashion_mnist_data_dir, FLAGS.test_is_valid)\n', (67937, 67988), False, 'from neutra import utils\n'), ((6849, 6898), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': 'output_init_factor'}), '(factor=output_init_factor)\n', (6871, 6898), False, 'from neutra import utils\n'), ((7070, 7119), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': 'output_init_factor'}), '(factor=output_init_factor)\n', (7092, 7119), False, 'from neutra import utils\n'), ((9106, 9122), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (9114, 9122), True, 'import tensorflow as tf\n'), ((9150, 9164), 'tensorflow.shape', 'tf.shape', (['z[0]'], {}), '(z[0])\n', (9158, 9164), True, 'import tensorflow as tf\n'), ((9484, 9501), 'six.moves.range', 'range', (['num_blocks'], {}), '(num_blocks)\n', (9489, 9501), False, 'from six.moves import range\n'), ((10231, 10266), 'tensorflow.split', 'tf.split', (['h_q', '[z_dims, z_dims]', '(-1)'], {}), '(h_q, [z_dims, z_dims], -1)\n', (10239, 10266), True, 'import tensorflow as tf\n'), ((12138, 12184), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', 'new_shape'], {}), '(x, new_shape)\n', (12170, 12184), True, 'import tensorflow as tf\n'), ((12197, 12243), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['h', 'new_shape'], {}), '(h, new_shape)\n', (12229, 12243), True, 'import tensorflow as tf\n'), ((17417, 17445), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', (['(0.01)'], {}), '(0.01)\n', (17439, 17445), False, 'from neutra import utils\n'), ((20490, 20518), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', (['(0.01)'], {}), '(0.01)\n', (20512, 20518), False, 'from neutra import utils\n'), ((22128, 22156), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', (['(0.01)'], {}), '(0.01)\n', (22150, 22156), False, 'from neutra import utils\n'), ((22655, 22679), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (22677, 22679), False, 'from neutra import utils\n'), ((23197, 23221), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (23219, 23221), False, 'from neutra import utils\n'), ((23350, 23385), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (23372, 23385), False, 'from neutra import utils\n'), ((24171, 24210), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(x * 255.0)', '(0.0)', '(255.0)'], {}), '(x * 255.0, 0.0, 255.0)\n', (24187, 24210), True, 'import tensorflow as tf\n'), ((28445, 28480), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (28467, 28480), False, 'from neutra import utils\n'), ((30730, 30765), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (30752, 30765), False, 'from neutra import utils\n'), ((34213, 34248), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (34235, 34248), False, 'from neutra import utils\n'), ((35123, 35140), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (35136, 35140), True, 'import tensorflow as tf\n'), ((35148, 35167), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (35160, 35167), True, 'import tensorflow as tf\n'), ((36376, 36393), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (36389, 36393), True, 'import tensorflow as tf\n'), ((36401, 36420), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (36413, 36420), True, 'import tensorflow as tf\n'), ((37109, 37124), 'tensorflow.zeros', 'tf.zeros', (['shape'], {}), '(shape)\n', (37117, 37124), True, 'import tensorflow as tf\n'), ((37132, 37146), 'tensorflow.ones', 'tf.ones', (['shape'], {}), '(shape)\n', (37139, 37146), True, 'import tensorflow as tf\n'), ((38069, 38090), 'tensorflow.ones_initializer', 'tf.ones_initializer', ([], {}), '()\n', (38088, 38090), True, 'import tensorflow as tf\n'), ((38302, 38324), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['scales'], {}), '(scales)\n', (38316, 38324), True, 'import tensorflow as tf\n'), ((38917, 39055), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH]', '[0.0005, 0.0001, 1e-05]'], {}), '(global_step, [train_size * 500 // TRAIN_BATCH, \n train_size * 800 // TRAIN_BATCH], [0.0005, 0.0001, 1e-05])\n', (38944, 39055), True, 'import tensorflow as tf\n'), ((42057, 42080), 'tensorflow.to_float', 'tf.to_float', (['beta_steps'], {}), '(beta_steps)\n', (42068, 42080), True, 'import tensorflow as tf\n'), ((46830, 46867), 'tensorflow.minimum', 'tf.minimum', (['max_step', 'self._step_size'], {}), '(max_step, self._step_size)\n', (46840, 46867), True, 'import tensorflow as tf\n'), ((47915, 47936), 'six.moves.zip', 'zip', (['log_q_z', 'log_p_z'], {}), '(log_q_z, log_p_z)\n', (47918, 47936), False, 'from six.moves import zip\n'), ((48171, 48234), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shape here: %s %s"""', 'post_z_e.shape', 'z_e.shape'], {}), "('Shape here: %s %s', post_z_e.shape, z_e.shape)\n", (48186, 48234), True, 'import tensorflow as tf\n'), ((48364, 48381), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (48372, 48381), True, 'import tensorflow as tf\n'), ((49237, 49254), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (49245, 49254), True, 'import tensorflow as tf\n'), ((49470, 49503), 'tensorflow.minimum', 'tf.minimum', (['log_accept_ratio', '(0.0)'], {}), '(log_accept_ratio, 0.0)\n', (49480, 49503), True, 'import tensorflow as tf\n'), ((51001, 51042), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[outputs.post_z]'], {}), '([outputs.post_z])\n', (51024, 51042), True, 'import tensorflow as tf\n'), ((51076, 51138), 'tensorflow.scatter_update', 'tf.scatter_update', (['self._chain_state', 'data_idx', 'outputs.post_z'], {}), '(self._chain_state, data_idx, outputs.post_z)\n', (51093, 51138), True, 'import tensorflow as tf\n'), ((51481, 51559), 'tensorflow.where', 'tf.where', (['(global_step > self._step_size_warmup)', 'new_step_size', 'self._step_size'], {}), '(global_step > self._step_size_warmup, new_step_size, self._step_size)\n', (51489, 51559), True, 'import tensorflow as tf\n'), ((53670, 53687), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (53678, 53687), True, 'import tensorflow as tf\n'), ((55408, 55431), 'tensorflow.to_float', 'tf.to_float', (['beta_steps'], {}), '(beta_steps)\n', (55419, 55431), True, 'import tensorflow as tf\n'), ((61096, 61113), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (61104, 61113), True, 'import tensorflow as tf\n'), ((65215, 65233), 'tensorflow.Session', 'tf.Session', (['master'], {}), '(master)\n', (65225, 65233), True, 'import tensorflow as tf\n'), ((65263, 65276), 'numpy.zeros', 'np.zeros', (['[0]'], {}), '([0])\n', (65271, 65276), True, 'import numpy as np\n'), ((68040, 68105), 'neutra.utils.CIFAR10Dataset', 'utils.CIFAR10Dataset', (['FLAGS.cifar10_data_dir', 'FLAGS.test_is_valid'], {}), '(FLAGS.cifar10_data_dir, FLAGS.test_is_valid)\n', (68060, 68105), False, 'from neutra import utils\n'), ((69258, 69280), 'numpy.mean', 'np.mean', (['replica_log_p'], {}), '(replica_log_p)\n', (69265, 69280), True, 'import numpy as np\n'), ((69297, 69318), 'numpy.std', 'np.std', (['replica_log_p'], {}), '(replica_log_p)\n', (69303, 69318), True, 'import numpy as np\n'), ((69429, 69476), 'tensorflow.logging.info', 'tf.logging.info', (['"""All log_p: %s"""', 'replica_log_p'], {}), "('All log_p: %s', replica_log_p)\n", (69444, 69476), True, 'import tensorflow as tf\n'), ((13700, 13724), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (13722, 13724), False, 'from neutra import utils\n'), ((39111, 39205), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 800 // TRAIN_BATCH]', '[0.01, 1e-05]'], {}), '(global_step, [train_size * 800 // TRAIN_BATCH],\n [0.01, 1e-05])\n', (39138, 39205), True, 'import tensorflow as tf\n'), ((39365, 39389), 'tensorflow.to_float', 'tf.to_float', (['global_step'], {}), '(global_step)\n', (39376, 39389), True, 'import tensorflow as tf\n'), ((42017, 42053), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (42051, 42053), True, 'import tensorflow as tf\n'), ((46670, 46694), 'tensorflow.to_float', 'tf.to_float', (['global_step'], {}), '(global_step)\n', (46681, 46694), True, 'import tensorflow as tf\n'), ((47606, 47626), 'six.moves.zip', 'zip', (['z', 'other_z_init'], {}), '(z, other_z_init)\n', (47609, 47626), False, 'from six.moves import zip\n'), ((48981, 49010), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_q_z'], {}), '(layer_log_q_z)\n', (48995, 49010), True, 'import tensorflow as tf\n'), ((55368, 55404), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (55402, 55404), True, 'import tensorflow as tf\n'), ((65305, 65342), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['train_dir'], {}), '(train_dir)\n', (65331, 65342), True, 'import tensorflow as tf\n'), ((68154, 68178), 'neutra.utils.FakeMNISTDataset', 'utils.FakeMNISTDataset', ([], {}), '()\n', (68176, 68178), False, 'from neutra import utils\n'), ((9931, 9958), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['p_raw_scale'], {}), '(p_raw_scale)\n', (9945, 9958), True, 'import tensorflow as tf\n'), ((10672, 10707), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (10694, 10707), False, 'from neutra import utils\n'), ((24800, 24826), 'tensorflow.floor', 'tf.floor', (['(sample / binsize)'], {}), '(sample / binsize)\n', (24808, 24826), True, 'import tensorflow as tf\n'), ((47494, 47520), 'tensorflow.identity', 'tf.identity', (['other_layer_z'], {}), '(other_layer_z)\n', (47505, 47520), True, 'import tensorflow as tf\n'), ((47554, 47574), 'tensorflow.identity', 'tf.identity', (['layer_z'], {}), '(layer_z)\n', (47565, 47574), True, 'import tensorflow as tf\n'), ((52075, 52092), 'tensorflow.identity', 'tf.identity', (['loss'], {}), '(loss)\n', (52086, 52092), True, 'import tensorflow as tf\n'), ((52125, 52151), 'tensorflow.identity', 'tf.identity', (['(-outputs.elbo)'], {}), '(-outputs.elbo)\n', (52136, 52151), True, 'import tensorflow as tf\n'), ((53870, 53886), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (53878, 53886), True, 'import tensorflow as tf\n'), ((54422, 54469), 'tensorflow.reshape', 'tf.reshape', (['ais_outputs.log_p', '[num_chains, -1]'], {}), '(ais_outputs.log_p, [num_chains, -1])\n', (54432, 54469), True, 'import tensorflow as tf\n'), ((61293, 61309), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (61301, 61309), True, 'import tensorflow as tf\n'), ((61716, 61763), 'tensorflow.reshape', 'tf.reshape', (['ais_outputs.log_p', '[num_chains, -1]'], {}), '(ais_outputs.log_p, [num_chains, -1])\n', (61726, 61763), True, 'import tensorflow as tf\n'), ((65698, 65759), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shard %d step %d started."""', 'shard', 'step_num'], {}), "('Shard %d step %d started.', shard, step_num)\n", (65713, 65759), True, 'import tensorflow as tf\n'), ((65808, 65866), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shard %d step %d done."""', 'shard', 'step_num'], {}), "('Shard %d step %d done.', shard, step_num)\n", (65823, 65866), True, 'import tensorflow as tf\n'), ((66074, 66112), 'numpy.hstack', 'np.hstack', (["[all_log_p, fetch['log_p']]"], {}), "([all_log_p, fetch['log_p']])\n", (66083, 66112), True, 'import numpy as np\n'), ((66366, 66406), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shard %d done."""', 'shard'], {}), "('Shard %d done.', shard)\n", (66381, 66406), True, 'import tensorflow as tf\n'), ((68751, 68780), 'six.moves.range', 'range', (['FLAGS.ais_num_replicas'], {}), '(FLAGS.ais_num_replicas)\n', (68756, 68780), False, 'from six.moves import range\n'), ((69666, 69690), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (69688, 69690), True, 'import tensorflow as tf\n'), ((69874, 69935), 'os.path.join', 'os.path.join', (['eval_dir', "('ais_shard_%d_done' % FLAGS.ais_shard)"], {}), "(eval_dir, 'ais_shard_%d_done' % FLAGS.ais_shard)\n", (69886, 69935), False, 'import os\n'), ((69945, 69979), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['sentinel_filename'], {}), '(sentinel_filename)\n', (69960, 69979), True, 'import tensorflow as tf\n'), ((70120, 70151), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['eval_dir'], {}), '(eval_dir)\n', (70141, 70151), True, 'import tensorflow as tf\n'), ((70353, 70380), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['eval_dir'], {}), '(eval_dir)\n', (70370, 70380), True, 'import tensorflow as tf\n'), ((11474, 11501), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['q_raw_scale'], {}), '(q_raw_scale)\n', (11488, 11501), True, 'import tensorflow as tf\n'), ((11604, 11625), 'tensorflow.zeros_like', 'tf.zeros_like', (['q_mean'], {}), '(q_mean)\n', (11617, 11625), True, 'import tensorflow as tf\n'), ((11633, 11658), 'tensorflow.ones_like', 'tf.ones_like', (['q_raw_scale'], {}), '(q_raw_scale)\n', (11645, 11658), True, 'import tensorflow as tf\n'), ((24914, 24951), 'tensorflow.sigmoid', 'tf.sigmoid', (['(sample + binsize / scales)'], {}), '(sample + binsize / scales)\n', (24924, 24951), True, 'import tensorflow as tf\n'), ((24954, 24972), 'tensorflow.sigmoid', 'tf.sigmoid', (['sample'], {}), '(sample)\n', (24964, 24972), True, 'import tensorflow as tf\n'), ((54496, 54519), 'tensorflow.to_float', 'tf.to_float', (['num_chains'], {}), '(num_chains)\n', (54507, 54519), True, 'import tensorflow as tf\n'), ((61790, 61813), 'tensorflow.to_float', 'tf.to_float', (['num_chains'], {}), '(num_chains)\n', (61801, 61813), True, 'import tensorflow as tf\n'), ((65969, 65992), 'numpy.mean', 'np.mean', (["fetch['log_p']"], {}), "(fetch['log_p'])\n", (65976, 65992), True, 'import numpy as np\n'), ((66022, 66048), 'numpy.mean', 'np.mean', (["fetch['p_accept']"], {}), "(fetch['p_accept'])\n", (66029, 66048), True, 'import numpy as np\n'), ((69606, 69635), 'six.moves.range', 'range', (['FLAGS.ais_num_replicas'], {}), '(FLAGS.ais_num_replicas)\n', (69611, 69635), False, 'from six.moves import range\n'), ((70483, 70503), 'numpy.savetxt', 'np.savetxt', (['f', 'log_p'], {}), '(f, log_p)\n', (70493, 70503), True, 'import numpy as np\n'), ((70515, 70552), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['sentinel_filename', '"""w"""'], {}), "(sentinel_filename, 'w')\n", (70528, 70552), True, 'import tensorflow as tf\n'), ((70406, 70462), 'os.path.join', 'os.path.join', (['eval_dir', "('ais_shard_%d' % FLAGS.ais_shard)"], {}), "(eval_dir, 'ais_shard_%d' % FLAGS.ais_shard)\n", (70418, 70462), False, 'import os\n')]
|
from flask import request, session, url_for
from requests_oauthlib import OAuth2Session
class OAuth2Login(object):
def __init__(self, app=None):
if app:
self.init_app(app)
self.app = app
def get_config(self, app, name, default_value=None):
return app.config.get(self.config_prefix + name, default_value)
def init_app(self, app):
self.client_id = self.get_config(app, "CLIENT_ID")
self.client_secret = self.get_config(app, "CLIENT_SECRET")
self.scope = self.get_config(app, "SCOPE", self.default_scope).split(",")
self.redirect_scheme = self.get_config(app, "REDIRECT_SCHEME", "https")
app.add_url_rule(
self.get_config(app, "REDIRECT_PATH", self.default_redirect_path),
self.redirect_endpoint,
self.login,
)
@property
def redirect_uri(self):
return url_for(
self.redirect_endpoint,
_external=True,
_scheme=self.redirect_scheme,
)
def session(self):
return OAuth2Session(
self.client_id,
redirect_uri=self.redirect_uri,
scope=self.scope,
)
def authorization_url(self, **kwargs):
sess = self.session()
auth_url, state = sess.authorization_url(self.auth_url, **kwargs)
session[self.state_session_key] = state
return auth_url
def login(self):
sess = self.session()
# Get token
try:
sess.fetch_token(
self.token_url,
code=request.args["code"],
client_secret=self.client_secret,
)
# TODO: Check state
except Warning:
# Ignore warnings
pass
except Exception as e:
return self.login_failure_func(e)
# Get profile
try:
profile = self.get_profile(sess)
except Exception as e:
return self.login_failure_func(e)
return self.login_success_func(sess.token, profile)
def login_success(self, f):
self.login_success_func = f
return f
def login_failure(self, f):
self.login_failure_func = f
return f
def get_profile(self, sess):
raise NotImplementedError
|
[
"requests_oauthlib.OAuth2Session",
"flask.url_for"
] |
[((832, 909), 'flask.url_for', 'url_for', (['self.redirect_endpoint'], {'_external': '(True)', '_scheme': 'self.redirect_scheme'}), '(self.redirect_endpoint, _external=True, _scheme=self.redirect_scheme)\n', (839, 909), False, 'from flask import request, session, url_for\n'), ((968, 1047), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', (['self.client_id'], {'redirect_uri': 'self.redirect_uri', 'scope': 'self.scope'}), '(self.client_id, redirect_uri=self.redirect_uri, scope=self.scope)\n', (981, 1047), False, 'from requests_oauthlib import OAuth2Session\n')]
|
# coding: utf-8
import socketserver
import re
import socket
import datetime
import os
import mimetypes as MT
import sys
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
# status codes could be handled
STATUS_CODE_RESPONSE = {
0: " 0 Surprise!",
200: " 200 OK",
301: " 301 Moved Permanently",
404: " 404 Not Found",
405: " 405 Method Not Allowed"
}
# methods could be handled
HTTP_REQUEST_METHODS = {
"GET": 1,
}
# some hard coded text
END_OF_LINE_RESPONSE = "\r\n"
PROTOCOL_RESPONSE = "HTTP/1.1"
DIRECTORY_TO_SERVE = "www"
# open file error here
GOODFILE = 1
ISADIRECTORY = 2
NOFILE = 3
# response generate class
class MyServerResponse:
def __init__(self, status=0, expire_time="-1", content_type="default", \
accept_ranges="none"):
self.response_header = {
"status_response": PROTOCOL_RESPONSE + STATUS_CODE_RESPONSE[status],
"date_response": "Date: " + datetime.datetime.now().\
strftime('%A, %d %b %Y %X %Z'),
"expires": "Expires: " + expire_time,
"content_type": "Content-Type: " + content_type,
"accept_ranges": "Accept-Ranges: " + accept_ranges,
"redirect_address": "Location: http://",
"allow_header": "ALlow: GET"
}
# send header via various status_code
def send_header(self, conn, status_code):
tmp = self.response_header["status_response"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
if status_code == 200:
tmp = self.response_header["expires"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
tmp = self.response_header["content_type"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
elif status_code == 301:
tmp = self.response_header["redirect_address"] + \
END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
elif status_code == 405:
tmp = self.response_header["allow_header"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
def set_status_response(self, status_code):
self.response_header["status_response"] = \
PROTOCOL_RESPONSE + STATUS_CODE_RESPONSE[status_code]
# request for storing received request attributes
class MyServerRequest:
def __init__(self):
self.method = None
self.url = None
def method_is_valid(self):
if self.method in HTTP_REQUEST_METHODS:
return True
else:
return False
# add more implementation here
def url_is_valid(self):
return True
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
rest_protocol_flag = False
standard_rest_cmd = "GET / HTTP/1.1"
# init the socket
self.request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
full_data = b""
with self.request as conn:
# declaration here
new_request = MyServerRequest()
status_code = 0
open_file = True
file = None
content_type = "void of magic"
file_name = "none"
type_of_file = "default"
open_result = -100
new_response = MyServerResponse()
# recv all data
while True:
data = conn.recv(1024)
if not data: break
full_data += data
if b"\r\n" in data:
break
if b"utf" in full_data:
print(full_data)
pass
str_full_data = full_data.decode("utf-8")
splited_commands = re.split('[\r|\n]+', str_full_data)
whole_request = splited_commands[0].split(' ')
# if we can find request from recved data
if len(whole_request) > 0:
new_request.method = whole_request[0] # try to pick methods
new_request.url = whole_request[1] # try to pick url
# if method we get could not be handled
if not new_request.method_is_valid():
status_code = 405
open_file = False
content_type = "none"
new_response.set_status_response(status_code)
# if no errors occured and then try to open requested url
if open_file:
open_result, file, file_name = openRequestedFile(new_request.url)
# try opening requested file, and return corresponding status_code
status_code = checkErrorsOfOpenedFile\
(status_code, open_result, file, file_name)
# SECURITY: check permission of opened file
status_code = checkPermissionOfRequestedFile\
(status_code, open_result, file, file_name)
new_response.set_status_response(status_code)
if status_code == 200 and file_name != None:
type_of_file = MT.guess_type(file_name, False)[0]
elif status_code == 301:
new_response.response_header["redirect_address"] += \
self.server.server_address[0] + ":" + \
str(self.server.server_address[1]) + \
new_request.url + "/"
new_response.set_status_response(status_code)
if open_result == GOODFILE and type_of_file != None:
new_response.response_header["content_type"] = "Content-Type: "
new_response.response_header["content_type"] += type_of_file
new_response.send_header(conn, status_code)
self.request.sendall(b"\r\n")
# then open file/directory and send it
if file:
self.request.sendfile(file)
#self.request.sendall(b"\r\n")
conn.close()
# argument: requested url
# return value: open file result, opened file object, local path
def openRequestedFile(client_request_url):
cru = client_request_url
if cru[-1] == r'/':
cru += "index.html"
complete_path = DIRECTORY_TO_SERVE + cru
try:
result = open(complete_path, 'rb')
content_type = cru.split(".")
return GOODFILE, result, cru
except IsADirectoryError as e:
return ISADIRECTORY, None, None
except FileNotFoundError as n:
return NOFILE, None, None
# check type and error of opened file
def checkErrorsOfOpenedFile(status_code,open_result, file, file_name):
if open_result == GOODFILE:
status_code = 200
type_of_file = MT.guess_type(file_name, False)[0]
elif open_result == ISADIRECTORY:
status_code = 301
elif open_result == NOFILE:
status_code = 404
return status_code
# SECURITY: check the permission of opened file
def checkPermissionOfRequestedFile(status_code,open_result, file, file_name):
if file_name == None:
return status_code
abs_path_of_serving_dir = os.getcwd()
abs_path_of_serving_dir += "/www/"
length_of_serving_dir = len(abs_path_of_serving_dir)
abs_path_of_request = os.path.abspath(file.name)
length_of_requested_object = len(abs_path_of_request)
if length_of_serving_dir > length_of_requested_object:
status_code = 404
elif abs_path_of_serving_dir != abs_path_of_request[:length_of_serving_dir]:
status_code = 404
return status_code
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# https://stackoverflow.com/questions/15260558/python-tcpserver-address-already-in-use-but-i-close-the-server-and-i-use-allow
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
try:
server.serve_forever()
except KeyboardInterrupt: # exit if ctrl+C
sys.exit(0)
|
[
"re.split",
"socketserver.TCPServer",
"mimetypes.guess_type",
"os.getcwd",
"datetime.datetime.now",
"sys.exit",
"os.path.abspath"
] |
[((8002, 8013), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8011, 8013), False, 'import os\n'), ((8136, 8162), 'os.path.abspath', 'os.path.abspath', (['file.name'], {}), '(file.name)\n', (8151, 8162), False, 'import os\n'), ((8629, 8678), 'socketserver.TCPServer', 'socketserver.TCPServer', (['(HOST, PORT)', 'MyWebServer'], {}), '((HOST, PORT), MyWebServer)\n', (8651, 8678), False, 'import socketserver\n'), ((4607, 4642), 're.split', 're.split', (["'[\\r|\\n]+'", 'str_full_data'], {}), "('[\\r|\\n]+', str_full_data)\n", (4615, 4642), False, 'import re\n'), ((7609, 7640), 'mimetypes.guess_type', 'MT.guess_type', (['file_name', '(False)'], {}), '(file_name, False)\n', (7622, 7640), True, 'import mimetypes as MT\n'), ((9004, 9015), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9012, 9015), False, 'import sys\n'), ((1756, 1779), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1777, 1779), False, 'import datetime\n'), ((5982, 6013), 'mimetypes.guess_type', 'MT.guess_type', (['file_name', '(False)'], {}), '(file_name, False)\n', (5995, 6013), True, 'import mimetypes as MT\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from cloudkitty import rating
from cloudkitty.rating.hash.controllers import field as field_api
from cloudkitty.rating.hash.controllers import group as group_api
from cloudkitty.rating.hash.controllers import mapping as mapping_api
from cloudkitty.rating.hash.controllers import service as service_api
from cloudkitty.rating.hash.controllers import threshold as threshold_api
from cloudkitty.rating.hash.datamodels import mapping as mapping_models
class HashMapConfigController(rating.RatingRestControllerBase):
"""Controller exposing all management sub controllers."""
_custom_actions = {
'types': ['GET']
}
services = service_api.HashMapServicesController()
fields = field_api.HashMapFieldsController()
groups = group_api.HashMapGroupsController()
mappings = mapping_api.HashMapMappingsController()
thresholds = threshold_api.HashMapThresholdsController()
@wsme_pecan.wsexpose([wtypes.text])
def get_types(self):
"""Return the list of every mapping type available.
"""
return mapping_models.MAP_TYPE.values
|
[
"cloudkitty.rating.hash.controllers.group.HashMapGroupsController",
"cloudkitty.rating.hash.controllers.threshold.HashMapThresholdsController",
"cloudkitty.rating.hash.controllers.field.HashMapFieldsController",
"cloudkitty.rating.hash.controllers.mapping.HashMapMappingsController",
"cloudkitty.rating.hash.controllers.service.HashMapServicesController",
"wsmeext.pecan.wsexpose"
] |
[((1341, 1380), 'cloudkitty.rating.hash.controllers.service.HashMapServicesController', 'service_api.HashMapServicesController', ([], {}), '()\n', (1378, 1380), True, 'from cloudkitty.rating.hash.controllers import service as service_api\n'), ((1394, 1429), 'cloudkitty.rating.hash.controllers.field.HashMapFieldsController', 'field_api.HashMapFieldsController', ([], {}), '()\n', (1427, 1429), True, 'from cloudkitty.rating.hash.controllers import field as field_api\n'), ((1443, 1478), 'cloudkitty.rating.hash.controllers.group.HashMapGroupsController', 'group_api.HashMapGroupsController', ([], {}), '()\n', (1476, 1478), True, 'from cloudkitty.rating.hash.controllers import group as group_api\n'), ((1494, 1533), 'cloudkitty.rating.hash.controllers.mapping.HashMapMappingsController', 'mapping_api.HashMapMappingsController', ([], {}), '()\n', (1531, 1533), True, 'from cloudkitty.rating.hash.controllers import mapping as mapping_api\n'), ((1551, 1594), 'cloudkitty.rating.hash.controllers.threshold.HashMapThresholdsController', 'threshold_api.HashMapThresholdsController', ([], {}), '()\n', (1592, 1594), True, 'from cloudkitty.rating.hash.controllers import threshold as threshold_api\n'), ((1601, 1635), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['[wtypes.text]'], {}), '([wtypes.text])\n', (1620, 1635), True, 'import wsmeext.pecan as wsme_pecan\n')]
|
import os, re, errno
import markdown
import cgi
from cuddlefish import packaging
from cuddlefish.docs import apirenderer
from cuddlefish._version import get_versions
INDEX_PAGE = '/doc/static-files/base.html'
BASE_URL_INSERTION_POINT = '<base '
VERSION_INSERTION_POINT = '<div id="version">'
THIRD_PARTY_PACKAGE_SUMMARIES = '<ul id="third-party-package-summaries">'
HIGH_LEVEL_PACKAGE_SUMMARIES = '<ul id="high-level-package-summaries">'
LOW_LEVEL_PACKAGE_SUMMARIES = '<ul id="low-level-package-summaries">'
CONTENT_ID = '<div id="main-content">'
TITLE_ID = '<title>'
DEFAULT_TITLE = 'Add-on SDK Documentation'
def get_documentation(package_name, modules_json, doc_path):
documented_modules = []
for root, dirs, files in os.walk(doc_path):
subdir_path = root.split(os.sep)[len(doc_path.split(os.sep)):]
for filename in files:
if filename.endswith(".md"):
modname = filename[:-len(".md")]
modpath = subdir_path + [modname]
documented_modules.append(modpath)
return documented_modules
def tag_wrap(text, tag, attributes={}):
result = '\n<' + tag
for name in attributes.keys():
result += ' ' + name + '=' + '"' + attributes[name] + '"'
result +='>' + text + '</'+ tag + '>\n'
return result
def is_third_party(package_json):
return (not is_high_level(package_json)) and \
(not(is_low_level(package_json)))
def is_high_level(package_json):
return 'jetpack-high-level' in package_json.get('keywords', [])
def is_low_level(package_json):
return 'jetpack-low-level' in package_json.get('keywords', [])
def insert_after(target, insertion_point_id, text_to_insert):
insertion_point = target.find(insertion_point_id) + len(insertion_point_id)
return target[:insertion_point] + text_to_insert + target[insertion_point:]
class WebDocs(object):
def __init__(self, root, base_url = None):
self.root = root
self.pkg_cfg = packaging.build_pkg_cfg(root)
self.packages_json = packaging.build_pkg_index(self.pkg_cfg)
self.base_page = self._create_base_page(root, base_url)
def create_guide_page(self, path):
path, ext = os.path.splitext(path)
md_path = path + '.md'
md_content = unicode(open(md_path, 'r').read(), 'utf8')
guide_content = markdown.markdown(md_content)
return self._create_page(guide_content)
def create_module_page(self, path):
path, ext = os.path.splitext(path)
md_path = path + '.md'
module_content = apirenderer.md_to_div(md_path)
return self._create_page(module_content)
def create_package_page(self, package_name):
package_content = self._create_package_detail(package_name)
return self._create_page(package_content)
def _create_page(self, page_content):
page = self._insert_title(self.base_page, page_content)
page = insert_after(page, CONTENT_ID, page_content)
return page.encode('utf8')
def _create_module_list(self, package_json):
package_name = package_json['name']
libs = package_json['files'][1]['lib'][1]
doc_path = package_json.get('doc', None)
if not doc_path:
return ''
modules = get_documentation(package_name, libs, doc_path)
modules.sort()
module_items = ''
relative_doc_path = doc_path[len(self.root) + 1:]
relative_doc_path_pieces = relative_doc_path.split(os.sep)
del relative_doc_path_pieces[-1]
relative_doc_URL = "/".join(relative_doc_path_pieces)
for module in modules:
module_link = tag_wrap('/'.join(module), 'a', \
{'href': relative_doc_URL + '/' + '/'.join(module) + '.html'})
module_items += module_link
return module_items
def _create_package_summaries(self, packages_json, include):
packages = ''
for package_name in packages_json.keys():
package_json = packages_json[package_name]
if not include(package_json):
continue
package_path = self.pkg_cfg["packages"][package_name]["root_dir"]
package_directory = package_path[len(self.root) + 1:]
package_directory = "/".join(package_directory.split(os.sep))
package_link = tag_wrap(package_name, 'a', {'href': \
package_directory + "/" \
+ 'index.html'})
text = tag_wrap(package_link, 'h4')
text += self._create_module_list(package_json)
packages += tag_wrap(text, 'li', {'class':'package-summary', \
'style':'display: block;'})
return packages
def _create_base_page(self, root, base_url):
base_page = unicode(open(root + INDEX_PAGE, 'r').read(), 'utf8')
if base_url:
base_tag = 'href="' + base_url + '"'
base_page = insert_after(base_page, BASE_URL_INSERTION_POINT, base_tag)
sdk_version = get_versions()["version"]
base_page = insert_after(base_page, VERSION_INSERTION_POINT, "Version " + sdk_version)
third_party_summaries = \
self._create_package_summaries(self.packages_json, is_third_party)
base_page = insert_after(base_page, \
THIRD_PARTY_PACKAGE_SUMMARIES, third_party_summaries)
high_level_summaries = \
self._create_package_summaries(self.packages_json, is_high_level)
base_page = insert_after(base_page, \
HIGH_LEVEL_PACKAGE_SUMMARIES, high_level_summaries)
low_level_summaries = \
self._create_package_summaries(self.packages_json, is_low_level)
base_page = insert_after(base_page, \
LOW_LEVEL_PACKAGE_SUMMARIES, low_level_summaries)
return base_page
def _create_package_detail_row(self, field_value, \
field_descriptor, field_name):
meta = tag_wrap(tag_wrap(field_descriptor, 'span', \
{'class':'meta-header'}), 'td')
value = tag_wrap(tag_wrap(field_value, 'span', \
{'class':field_name}), 'td')
return tag_wrap(meta + value, 'tr')
def _create_package_detail_table(self, package_json):
table_contents = ''
if package_json.get('author', None):
table_contents += self._create_package_detail_row(\
cgi.escape(package_json['author']), 'Author', 'author')
if package_json.get('version', None):
table_contents += self._create_package_detail_row(\
package_json['version'], 'Version', 'version')
if package_json.get('license', None):
table_contents += self._create_package_detail_row(\
package_json['license'], 'License', 'license')
if package_json.get('dependencies', None):
table_contents += self._create_package_detail_row(\
', '.join(package_json['dependencies']), \
'Dependencies', 'dependencies')
table_contents += self._create_package_detail_row(\
self._create_module_list(package_json), 'Modules', 'modules')
return tag_wrap(tag_wrap(table_contents, 'tbody'), 'table', \
{'class':'meta-table'})
def _create_package_detail(self, package_name):
package_json = self.packages_json.get(package_name, None)
if not package_json:
raise IOError(errno.ENOENT, 'Package not found')
# pieces of the package detail: 1) title, 2) table, 3) description
package_title = tag_wrap(package_name, 'h1')
table = self._create_package_detail_table(package_json)
description = ''
if package_json.get('readme', None):
description += tag_wrap(tag_wrap(\
markdown.markdown(\
package_json['readme']), 'p'), 'div', {'class':'docs'})
return tag_wrap(package_title + table + description, 'div', \
{'class':'package-detail'})
def _insert_title(self, target, content):
match = re.search('<h1>.*</h1>', content)
if match:
title = match.group(0)[len('<h1>'):-len('</h1>')] + ' - ' + \
DEFAULT_TITLE
else:
title = DEFAULT_TITLE
target = insert_after(target, TITLE_ID, title)
return target
|
[
"markdown.markdown",
"cuddlefish.packaging.build_pkg_index",
"cuddlefish._version.get_versions",
"cuddlefish.packaging.build_pkg_cfg",
"os.path.splitext",
"cuddlefish.docs.apirenderer.md_to_div",
"cgi.escape",
"os.walk",
"re.search"
] |
[((732, 749), 'os.walk', 'os.walk', (['doc_path'], {}), '(doc_path)\n', (739, 749), False, 'import os, re, errno\n'), ((1978, 2007), 'cuddlefish.packaging.build_pkg_cfg', 'packaging.build_pkg_cfg', (['root'], {}), '(root)\n', (2001, 2007), False, 'from cuddlefish import packaging\n'), ((2037, 2076), 'cuddlefish.packaging.build_pkg_index', 'packaging.build_pkg_index', (['self.pkg_cfg'], {}), '(self.pkg_cfg)\n', (2062, 2076), False, 'from cuddlefish import packaging\n'), ((2201, 2223), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (2217, 2223), False, 'import os, re, errno\n'), ((2343, 2372), 'markdown.markdown', 'markdown.markdown', (['md_content'], {}), '(md_content)\n', (2360, 2372), False, 'import markdown\n'), ((2482, 2504), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (2498, 2504), False, 'import os, re, errno\n'), ((2561, 2591), 'cuddlefish.docs.apirenderer.md_to_div', 'apirenderer.md_to_div', (['md_path'], {}), '(md_path)\n', (2582, 2591), False, 'from cuddlefish.docs import apirenderer\n'), ((8150, 8183), 're.search', 're.search', (['"""<h1>.*</h1>"""', 'content'], {}), "('<h1>.*</h1>', content)\n", (8159, 8183), False, 'import os, re, errno\n'), ((5038, 5052), 'cuddlefish._version.get_versions', 'get_versions', ([], {}), '()\n', (5050, 5052), False, 'from cuddlefish._version import get_versions\n'), ((6471, 6505), 'cgi.escape', 'cgi.escape', (["package_json['author']"], {}), "(package_json['author'])\n", (6481, 6505), False, 'import cgi\n'), ((7869, 7910), 'markdown.markdown', 'markdown.markdown', (["package_json['readme']"], {}), "(package_json['readme'])\n", (7886, 7910), False, 'import markdown\n')]
|
# ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import jax
import numpy as onp
import chex
from ..reward_tracing import TransitionBatch
from ..utils import SumTree
from ._base import BaseReplayBuffer
__all__ = (
'PrioritizedReplayBuffer',
)
class PrioritizedReplayBuffer(BaseReplayBuffer):
r"""
A simple ring buffer for experience replay, with prioritized sampling.
This class uses *proportional* sampling, which means that the transitions are sampled with
relative probability :math:`p_i` defined as:
.. math::
p_i\ =\ \frac
{\left(|\mathcal{A}_i| + \epsilon\right)^\alpha}
{\sum_{j=1}^N \left(|\mathcal{A}_j| + \epsilon\right)^\alpha}
Here :math:`\mathcal{A}_i` are advantages provided at insertion time and :math:`N` is the
capacity of the buffer, which may be quite large. The :math:`\mathcal{A}_i` are typically just
TD errors collected from a value-function updater, e.g. :func:`QLearning.td_error
<coax.td_learning.QLearning.td_error>`.
Since the prioritized samples are biased, the :attr:`sample` method also produces non-trivial
importance weights (stored in the :class:`TransitionBatch.W
<coax.reward_tracing.TransitionBatch>` attribute). The logic for constructing these weights for
a sample of batch size :math:`n` is:
.. math::
w_i\ =\ \frac{\left(Np_i\right)^{-\beta}}{\max_{j=1}^n \left(Np_j\right)^{-\beta}}
See section 3.4 of https://arxiv.org/abs/1511.05952 for more details.
Parameters
----------
capacity : positive int
The capacity of the experience replay buffer.
alpha : positive float, optional
The sampling temperature :math:`\alpha>0`.
beta : positive float, optional
The importance-weight exponent :math:`\beta>0`.
epsilon : positive float, optional
The small regulator :math:`\epsilon>0`.
random_seed : int, optional
To get reproducible results.
"""
def __init__(self, capacity, alpha=1.0, beta=1.0, epsilon=1e-4, random_seed=None):
if not (isinstance(capacity, int) and capacity > 0):
raise TypeError(f"capacity must be a positive int, got: {capacity}")
if not (isinstance(alpha, (float, int)) and alpha > 0):
raise TypeError(f"alpha must be a positive float, got: {alpha}")
if not (isinstance(beta, (float, int)) and beta > 0):
raise TypeError(f"beta must be a positive float, got: {beta}")
if not (isinstance(epsilon, (float, int)) and epsilon > 0):
raise TypeError(f"epsilon must be a positive float, got: {epsilon}")
self._capacity = int(capacity)
self._alpha = float(alpha)
self._beta = float(beta)
self._epsilon = float(epsilon)
self._random_seed = random_seed
self._rnd = onp.random.RandomState(random_seed)
self.clear() # sets: self._deque, self._index
@property
def capacity(self):
return self._capacity
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, new_alpha):
if not (isinstance(new_alpha, (float, int)) and new_alpha > 0):
raise TypeError(f"alpha must be a positive float, got: {new_alpha}")
if onp.isclose(new_alpha, self._alpha, rtol=0.01):
return # noop if new value is too close to old value (not worth the computation cost)
new_values = onp.where(
self._sumtree.values <= 0, 0., # only change exponents for positive values
onp.exp(onp.log(onp.maximum(self._sumtree.values, 1e-15)) * (new_alpha / self._alpha)))
self._sumtree.set_values(..., new_values)
self._alpha = float(new_alpha)
@property
def beta(self):
return self._beta
@beta.setter
def beta(self, new_beta):
if not (isinstance(new_beta, (float, int)) and new_beta > 0):
raise TypeError(f"beta must be a positive float, got: {new_beta}")
self._beta = float(new_beta)
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, new_epsilon):
if not (isinstance(new_epsilon, (float, int)) and new_epsilon > 0):
raise TypeError(f"epsilon must be a positive float, got: {new_epsilon}")
self._epsilon = float(new_epsilon)
def add(self, transition_batch, Adv):
r"""
Add a transition to the experience replay buffer.
Parameters
----------
transition_batch : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
Adv : ndarray
A batch of advantages, used to construct the priorities :math:`p_i`.
"""
if not isinstance(transition_batch, TransitionBatch):
raise TypeError(
f"transition_batch must be a TransitionBatch, got: {type(transition_batch)}")
transition_batch.idx = self._index + onp.arange(transition_batch.batch_size)
idx = transition_batch.idx % self.capacity # wrap around
chex.assert_equal_shape([idx, Adv])
self._storage[idx] = list(transition_batch.to_singles())
self._sumtree.set_values(idx, onp.power(onp.abs(Adv) + self.epsilon, self.alpha))
self._index += transition_batch.batch_size
def sample(self, batch_size=32):
r"""
Get a batch of transitions to be used for bootstrapped updates.
Parameters
----------
batch_size : positive int, optional
The desired batch size of the sample.
Returns
-------
transitions : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
"""
idx = self._sumtree.sample(n=batch_size)
P = self._sumtree.values[idx] / self._sumtree.root_value # prioritized, biased propensities
W = onp.power(P * len(self), -self.beta) # inverse propensity weights (β≈1)
W /= W.max() # for stability, ensure only down-weighting (see sec. 3.4 of arxiv:1511.05952)
transition_batch = _concatenate_leaves(self._storage[idx])
chex.assert_equal_shape([transition_batch.W, W])
transition_batch.W *= W
return transition_batch
def update(self, idx, Adv):
r"""
Update the priority weights of transitions previously added to the buffer.
Parameters
----------
idx : 1d array of ints
The identifiers of the transitions to be updated.
Adv : ndarray
The corresponding updated advantages.
"""
idx = onp.asarray(idx, dtype='int32')
Adv = onp.asarray(Adv, dtype='float32')
chex.assert_equal_shape([idx, Adv])
chex.assert_rank([idx, Adv], 1)
idx_lookup = idx % self.capacity # wrap around
new_values = onp.where(
_get_transition_batch_idx(self._storage[idx_lookup]) == idx, # only update if ids match
onp.power(onp.abs(Adv) + self.epsilon, self.alpha),
self._sumtree.values[idx_lookup])
self._sumtree.set_values(idx_lookup, new_values)
def clear(self):
r""" Clear the experience replay buffer. """
self._storage = onp.full(shape=(self.capacity,), fill_value=None, dtype='object')
self._sumtree = SumTree(capacity=self.capacity)
self._index = 0
def __len__(self):
return min(self.capacity, self._index)
def __bool__(self):
return bool(len(self))
def __iter__(self):
return iter(self._storage[:len(self)])
def _concatenate_leaves(pytrees):
return jax.tree_multimap(lambda *leaves: onp.concatenate(leaves, axis=0), *pytrees)
@onp.vectorize
def _get_transition_batch_idx(transition):
return transition.idx
|
[
"numpy.abs",
"numpy.isclose",
"chex.assert_rank",
"numpy.arange",
"numpy.asarray",
"chex.assert_equal_shape",
"numpy.concatenate",
"numpy.full",
"numpy.maximum",
"numpy.random.RandomState"
] |
[((4815, 4850), 'numpy.random.RandomState', 'onp.random.RandomState', (['random_seed'], {}), '(random_seed)\n', (4837, 4850), True, 'import numpy as onp\n'), ((5253, 5299), 'numpy.isclose', 'onp.isclose', (['new_alpha', 'self._alpha'], {'rtol': '(0.01)'}), '(new_alpha, self._alpha, rtol=0.01)\n', (5264, 5299), True, 'import numpy as onp\n'), ((7078, 7113), 'chex.assert_equal_shape', 'chex.assert_equal_shape', (['[idx, Adv]'], {}), '([idx, Adv])\n', (7101, 7113), False, 'import chex\n'), ((8175, 8223), 'chex.assert_equal_shape', 'chex.assert_equal_shape', (['[transition_batch.W, W]'], {}), '([transition_batch.W, W])\n', (8198, 8223), False, 'import chex\n'), ((8652, 8683), 'numpy.asarray', 'onp.asarray', (['idx'], {'dtype': '"""int32"""'}), "(idx, dtype='int32')\n", (8663, 8683), True, 'import numpy as onp\n'), ((8698, 8731), 'numpy.asarray', 'onp.asarray', (['Adv'], {'dtype': '"""float32"""'}), "(Adv, dtype='float32')\n", (8709, 8731), True, 'import numpy as onp\n'), ((8740, 8775), 'chex.assert_equal_shape', 'chex.assert_equal_shape', (['[idx, Adv]'], {}), '([idx, Adv])\n', (8763, 8775), False, 'import chex\n'), ((8784, 8815), 'chex.assert_rank', 'chex.assert_rank', (['[idx, Adv]', '(1)'], {}), '([idx, Adv], 1)\n', (8800, 8815), False, 'import chex\n'), ((9272, 9337), 'numpy.full', 'onp.full', ([], {'shape': '(self.capacity,)', 'fill_value': 'None', 'dtype': '"""object"""'}), "(shape=(self.capacity,), fill_value=None, dtype='object')\n", (9280, 9337), True, 'import numpy as onp\n'), ((6964, 7003), 'numpy.arange', 'onp.arange', (['transition_batch.batch_size'], {}), '(transition_batch.batch_size)\n', (6974, 7003), True, 'import numpy as onp\n'), ((9698, 9729), 'numpy.concatenate', 'onp.concatenate', (['leaves'], {'axis': '(0)'}), '(leaves, axis=0)\n', (9713, 9729), True, 'import numpy as onp\n'), ((7227, 7239), 'numpy.abs', 'onp.abs', (['Adv'], {}), '(Adv)\n', (7234, 7239), True, 'import numpy as onp\n'), ((9028, 9040), 'numpy.abs', 'onp.abs', (['Adv'], {}), '(Adv)\n', (9035, 9040), True, 'import numpy as onp\n'), ((5548, 5588), 'numpy.maximum', 'onp.maximum', (['self._sumtree.values', '(1e-15)'], {}), '(self._sumtree.values, 1e-15)\n', (5559, 5588), True, 'import numpy as onp\n')]
|
"""
@author: ludvigolsen
"""
from typing import Union
import numpy as np
import pandas as pd
from utipy.utils.check_instance import check_instance
from utipy.utils.convert_to_type import convert_to_type
def blend(x1: Union[list, np.ndarray, pd.Series], x2: Union[list, np.ndarray, pd.Series], amount: float = 0.5) -> Union[list, np.ndarray, pd.Series]:
"""
Blend two arrays
Parameters
----------
x1 : list, np.ndarray, pd.Series
The first array.
x2 : list, np.ndarray, pd.Series
The second array.
amount : float
Blend rate.
Percentage between 0-1
0: Keep only x1.
1: Keep only x2.
0.1: 10% x2 / 90% x1.
A value in-between 0-1 will result in integers becoming floats.
Returns
-------
list, np.ndarray, pd.Series
Blended array with type of the original (x1)
Examples
--------
Uncomment code to run.
# x1 = [1,2,3,4,5]
# x2 = [4,5,6,7,8]
# blend(x1, x2, amount = 0.5)
returns [2.5,3.5,4.5,5.5,6.5]
"""
# Get instance types (np.ndarray, list, pd.Series)
instance_type = check_instance(x1)
x1_weighted = np.multiply(x1, (1 - amount))
x2_weighted = np.multiply(x2, amount)
blended = x1_weighted + x2_weighted
# Convert to original type (np.ndarray, list, pd.Series)
return convert_to_type(blended, instance_type)
|
[
"utipy.utils.check_instance.check_instance",
"numpy.multiply",
"utipy.utils.convert_to_type.convert_to_type"
] |
[((1154, 1172), 'utipy.utils.check_instance.check_instance', 'check_instance', (['x1'], {}), '(x1)\n', (1168, 1172), False, 'from utipy.utils.check_instance import check_instance\n'), ((1192, 1219), 'numpy.multiply', 'np.multiply', (['x1', '(1 - amount)'], {}), '(x1, 1 - amount)\n', (1203, 1219), True, 'import numpy as np\n'), ((1240, 1263), 'numpy.multiply', 'np.multiply', (['x2', 'amount'], {}), '(x2, amount)\n', (1251, 1263), True, 'import numpy as np\n'), ((1378, 1417), 'utipy.utils.convert_to_type.convert_to_type', 'convert_to_type', (['blended', 'instance_type'], {}), '(blended, instance_type)\n', (1393, 1417), False, 'from utipy.utils.convert_to_type import convert_to_type\n')]
|
from typing import List
from pydantic import BaseModel
from icolos.core.containers.compound import Conformer, unroll_conformers
from icolos.utils.enums.step_enums import StepRMSDEnum, StepDataManipulationEnum
from icolos.core.workflow_steps.step import _LE
from icolos.core.workflow_steps.calculation.base import StepCalculationBase
_SR = StepRMSDEnum()
_SDM = StepDataManipulationEnum()
class StepRMSD(StepCalculationBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# extend parameters
if _SR.METHOD not in self.settings.additional.keys():
self.settings.additional[_SR.METHOD] = _SR.METHOD_ALIGNMOL
def _calculate_RMSD(self, conformers: List[Conformer]):
for conf in conformers:
rmsd_matrix = self._calculate_rms_matrix(
conformers=[conf] + conf.get_extra_data()[_SDM.KEY_MATCHED],
rms_method=self._get_rms_method(),
)
# use the specified tag name if it is the first value and append an index in case there are more
for idx, col in enumerate(rmsd_matrix.columns[1:]):
combined_tag = "".join([_SR.RMSD_TAG, "" if idx == 0 else str(idx)])
rmsd_value = rmsd_matrix.iloc[[0]][col][0]
conf.get_molecule().SetProp(combined_tag, str(rmsd_value))
conf.get_extra_data()[_SDM.KEY_MATCHED][idx].get_molecule().SetProp(
combined_tag, str(rmsd_value)
)
def execute(self):
# this assumes that the conformers that are to be matched for the calculation of the RMSD matrix, are attached
# as a list in a generic data field with a specified key
conformers = unroll_conformers(compounds=self.get_compounds())
self._calculate_RMSD(conformers=conformers)
self._logger.log(
f"Annotated {len(conformers)} conformers with RMSD values (tag: {_SR.RMSD_TAG}).",
_LE.INFO,
)
# TODO: add a nice pandas DF with the RMSD values to a generic data field
|
[
"icolos.utils.enums.step_enums.StepRMSDEnum",
"icolos.utils.enums.step_enums.StepDataManipulationEnum"
] |
[((341, 355), 'icolos.utils.enums.step_enums.StepRMSDEnum', 'StepRMSDEnum', ([], {}), '()\n', (353, 355), False, 'from icolos.utils.enums.step_enums import StepRMSDEnum, StepDataManipulationEnum\n'), ((363, 389), 'icolos.utils.enums.step_enums.StepDataManipulationEnum', 'StepDataManipulationEnum', ([], {}), '()\n', (387, 389), False, 'from icolos.utils.enums.step_enums import StepRMSDEnum, StepDataManipulationEnum\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class IOTHomepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from IOT")
delete_page_cache('iot_home')
|
[
"frappe._",
"frappe.website.utils.delete_page_cache"
] |
[((434, 463), 'frappe.website.utils.delete_page_cache', 'delete_page_cache', (['"""iot_home"""'], {}), "('iot_home')\n", (451, 463), False, 'from frappe.website.utils import delete_page_cache\n'), ((369, 431), 'frappe._', 'frappe._', (['"""This is an example website auto-generated from IOT"""'], {}), "('This is an example website auto-generated from IOT')\n", (377, 431), False, 'import frappe\n')]
|
from __future__ import annotations
from typing import Callable, Sequence, TYPE_CHECKING
import functools
if TYPE_CHECKING:
from .build import BuildStepCallable
def split_step_name(name: str, new = ' ', old='_'):
return name.replace(old, new).capitalize()
def print_step_name(formatter=split_step_name, args: Sequence=()):
"""Gets a decorator that formats the name of the build step and prints it"""
fmt_args = args
def format_step_name(func: Callable):
@functools.wraps(func)
def decorated(*args, **kwargs):
print(formatter(func.__name__, *fmt_args))
return func(*args, **kwargs)
return decorated
return format_step_name
def print_step_doc():
def decorate_with(func: Callable):
@functools.wraps(func)
def output_func_doc(*args, **kwargs):
print(func.__doc__)
return func(*args, *kwargs)
return output_func_doc
return decorate_with
def composed(*decorators: BuildStepCallable) -> BuildStepCallable:
"""
Used to compose a decorator. Useful for defining specific
outputs and progress reports to a build step and resusing
"""
def decorated(func: BuildStepCallable):
for decorator in reversed(decorators):
func = decorator(func)
return func
return decorated
|
[
"functools.wraps"
] |
[((484, 505), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (499, 505), False, 'import functools\n'), ((766, 787), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (781, 787), False, 'import functools\n')]
|
"""
metrics application instance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
import os
from byceps.config import ConfigurationError
from byceps.metrics.application import create_app
ENV_VAR_NAME_DATABASE_URI = 'DATABASE_URI'
database_uri = os.environ.get(ENV_VAR_NAME_DATABASE_URI)
if not database_uri:
raise ConfigurationError(
f"No database URI was specified via the '{ENV_VAR_NAME_DATABASE_URI}' "
"environment variable.",
)
app = create_app(database_uri)
|
[
"byceps.metrics.application.create_app",
"os.environ.get",
"byceps.config.ConfigurationError"
] |
[((320, 361), 'os.environ.get', 'os.environ.get', (['ENV_VAR_NAME_DATABASE_URI'], {}), '(ENV_VAR_NAME_DATABASE_URI)\n', (334, 361), False, 'import os\n'), ((539, 563), 'byceps.metrics.application.create_app', 'create_app', (['database_uri'], {}), '(database_uri)\n', (549, 563), False, 'from byceps.metrics.application import create_app\n'), ((393, 515), 'byceps.config.ConfigurationError', 'ConfigurationError', (['f"""No database URI was specified via the \'{ENV_VAR_NAME_DATABASE_URI}\' environment variable."""'], {}), '(\n f"No database URI was specified via the \'{ENV_VAR_NAME_DATABASE_URI}\' environment variable."\n )\n', (411, 515), False, 'from byceps.config import ConfigurationError\n')]
|
from enum import Enum
import pytest
import gino
from gino.dialects.aiomysql import AsyncEnum
pytestmark = pytest.mark.asyncio
db = gino.Gino()
class MyEnum(Enum):
ONE = "one"
TWO = "two"
class Blog(db.Model):
__tablename__ = "s_blog"
id = db.Column(db.BigInteger(), primary_key=True)
title = db.Column(db.Unicode(255), index=True, comment="Title Comment")
visits = db.Column(db.BigInteger(), default=0)
comment_id = db.Column(db.ForeignKey("s_comment.id"))
number = db.Column(db.Enum(MyEnum), nullable=False, default=MyEnum.TWO)
number2 = db.Column(AsyncEnum(MyEnum), nullable=False, default=MyEnum.TWO)
class Comment(db.Model):
__tablename__ = "s_comment"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id", name="blog_id_fk"))
blog_seq = db.Sequence("blog_seq", metadata=db, schema="schema_test")
async def test(engine, define=True):
async with engine.acquire() as conn:
assert not await engine.dialect.has_table(conn, "non_exist")
Blog.__table__.comment = "Blog Comment"
db.bind = engine
await db.gino.create_all()
await Blog.number.type.create_async(engine, checkfirst=True)
await Blog.number2.type.create_async(engine, checkfirst=True)
await db.gino.create_all(tables=[Blog.__table__], checkfirst=True)
await blog_seq.gino.create(checkfirst=True)
await Blog.__table__.gino.create(checkfirst=True)
await db.gino.drop_all()
await db.gino.drop_all(tables=[Blog.__table__], checkfirst=True)
await Blog.__table__.gino.drop(checkfirst=True)
await blog_seq.gino.drop(checkfirst=True)
if define:
class Comment2(db.Model):
__tablename__ = "s_comment_2"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id"))
await db.gino.create_all()
await db.gino.drop_all()
|
[
"gino.Gino",
"gino.dialects.aiomysql.AsyncEnum"
] |
[((134, 145), 'gino.Gino', 'gino.Gino', ([], {}), '()\n', (143, 145), False, 'import gino\n'), ((593, 610), 'gino.dialects.aiomysql.AsyncEnum', 'AsyncEnum', (['MyEnum'], {}), '(MyEnum)\n', (602, 610), False, 'from gino.dialects.aiomysql import AsyncEnum\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from gdcmdtools.base import BASE_INFO
from gdcmdtools.base import DEBUG_LEVEL
from gdcmdtools.get import GDGet
from gdcmdtools.get import export_format
import argparse
from argparse import RawTextHelpFormatter
from pprint import pprint
import logging
logger = logging.getLogger()
__THIS_APP = 'gdget'
__THIS_DESCRIPTION = 'Tool to download file from Google Drive'
__THIS_VERSION = BASE_INFO["version"]
def test():
assert True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='%s v%s - %s - %s (%s)' %
(__THIS_APP,
__THIS_VERSION,
__THIS_DESCRIPTION,
BASE_INFO["app"],
BASE_INFO["description"]),
formatter_class=RawTextHelpFormatter)
arg_parser.add_argument(
'file_id',
help='The file id or drive link for the file you\'re going to download')
help_export_format = "\n".join(
[
re.search(
".*google-apps\.(.*)",
k).group(1) +
": " +
", ".join(
export_format[k]) for k in export_format.iterkeys()])
arg_parser.add_argument(
'-f',
'--export_format',
metavar='FORMAT',
default='raw',
required=False,
help='specify the export format for downloading,\ngoogle_format: export_format\n%s' %
help_export_format)
arg_parser.add_argument(
'-s',
'--save_as',
metavar='NEW_FILE_NAME',
help='save the downloaded file as ')
arg_parser.add_argument('--debug',
choices=DEBUG_LEVEL,
default=DEBUG_LEVEL[-1],
help='define the debug level')
args = arg_parser.parse_args()
# set debug devel
logger.setLevel(getattr(logging, args.debug.upper()))
logger.debug(args)
get = GDGet(args.file_id, args.export_format, args.save_as)
result = get.run()
sys.exit(0)
|
[
"logging.getLogger",
"gdcmdtools.get.export_format.iterkeys",
"argparse.ArgumentParser",
"gdcmdtools.get.GDGet",
"sys.exit",
"re.search"
] |
[((333, 352), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (350, 352), False, 'import logging\n'), ((552, 754), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('%s v%s - %s - %s (%s)' % (__THIS_APP, __THIS_VERSION, __THIS_DESCRIPTION,\n BASE_INFO['app'], BASE_INFO['description']))", 'formatter_class': 'RawTextHelpFormatter'}), "(description='%s v%s - %s - %s (%s)' % (__THIS_APP,\n __THIS_VERSION, __THIS_DESCRIPTION, BASE_INFO['app'], BASE_INFO[\n 'description']), formatter_class=RawTextHelpFormatter)\n", (575, 754), False, 'import argparse\n'), ((1949, 2002), 'gdcmdtools.get.GDGet', 'GDGet', (['args.file_id', 'args.export_format', 'args.save_as'], {}), '(args.file_id, args.export_format, args.save_as)\n', (1954, 2002), False, 'from gdcmdtools.get import GDGet\n'), ((2031, 2042), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2039, 2042), False, 'import sys\n'), ((1161, 1185), 'gdcmdtools.get.export_format.iterkeys', 'export_format.iterkeys', ([], {}), '()\n', (1183, 1185), False, 'from gdcmdtools.get import export_format\n'), ((996, 1032), 're.search', 're.search', (['""".*google-apps\\\\.(.*)"""', 'k'], {}), "('.*google-apps\\\\.(.*)', k)\n", (1005, 1032), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
from Lotus.app import app
from flask import render_template
@app.route('/')
def index():
return 'welcome'
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html')
@app.errorhandler(405)
def request_method_error(error):
return render_template('405.html')
|
[
"flask.render_template",
"Lotus.app.app.errorhandler",
"Lotus.app.app.route"
] |
[((87, 101), 'Lotus.app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (96, 101), False, 'from Lotus.app import app\n'), ((139, 160), 'Lotus.app.app.errorhandler', 'app.errorhandler', (['(404)'], {}), '(404)\n', (155, 160), False, 'from Lotus.app import app\n'), ((230, 251), 'Lotus.app.app.errorhandler', 'app.errorhandler', (['(405)'], {}), '(405)\n', (246, 251), False, 'from Lotus.app import app\n'), ((199, 226), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (214, 226), False, 'from flask import render_template\n'), ((296, 323), 'flask.render_template', 'render_template', (['"""405.html"""'], {}), "('405.html')\n", (311, 323), False, 'from flask import render_template\n')]
|
"""Git interface."""
from __future__ import annotations
import contextlib
import functools
import operator
import re
import subprocess # noqa: S404
from dataclasses import dataclass
from dataclasses import field
from pathlib import Path
from typing import Any
from typing import cast
from typing import Iterator
from typing import List
from typing import Optional
import pygit2
from retrocookie.utils import removeprefix
def git(
*args: str, check: bool = True, **kwargs: Any
) -> subprocess.CompletedProcess[str]:
"""Invoke git."""
return subprocess.run( # noqa: S603,S607
["git", *args], check=check, text=True, capture_output=True, **kwargs
)
VERSION_PATTERN = re.compile(
r"""
(?P<major>\d+)\.
(?P<minor>\d+)
(\.(?P<patch>\d+))?
""",
re.VERBOSE,
)
@dataclass(frozen=True, order=True)
class Version:
"""Simplistic representation of git versions."""
major: int
minor: int
patch: int
_text: Optional[str] = field(default=None, compare=False)
@classmethod
def parse(cls, text: str) -> Version:
"""Extract major.minor[.patch] from the start of the text."""
match = VERSION_PATTERN.match(text)
if match is None:
raise ValueError(f"invalid version {text!r}")
parts = match.groupdict(default="0")
return cls(
int(parts["major"]), int(parts["minor"]), int(parts["patch"]), _text=text
)
def __str__(self) -> str:
"""Return the original representation."""
return (
self._text
if self._text is not None
else f"{self.major}.{self.minor}.{self.patch}"
)
def version() -> Version:
"""Return the git version."""
text = git("version").stdout.strip()
text = removeprefix(text, "git version ")
return Version.parse(text)
def get_default_branch() -> str:
"""Return the default branch for new repositories."""
get_configs = [
pygit2.Config.get_global_config,
pygit2.Config.get_system_config,
]
for get_config in get_configs:
with contextlib.suppress(IOError, KeyError):
config = get_config()
branch = config["init.defaultBranch"]
assert isinstance(branch, str) # noqa: S101
return branch
return "master"
class Repository:
"""Git repository."""
def __init__(
self, path: Optional[Path] = None, *, repo: Optional[pygit2.Repository] = None
) -> None:
"""Initialize."""
if repo is None:
self.path = path or Path.cwd()
self.repo = pygit2.Repository(self.path)
else:
self.path = Path(repo.workdir or repo.path)
self.repo = repo
def git(self, *args: str, **kwargs: Any) -> subprocess.CompletedProcess[str]:
"""Invoke git."""
return git(*args, cwd=self.path, **kwargs)
@classmethod
def init(cls, path: Path, *, bare: bool = False) -> Repository:
"""Create a repository."""
# https://github.com/libgit2/libgit2/issues/2849
path.parent.mkdir(exist_ok=True, parents=True)
repo = pygit2.init_repository(path, bare=bare)
return cls(path, repo=repo)
@classmethod
def clone(cls, url: str, path: Path, *, mirror: bool = False) -> Repository:
"""Clone a repository."""
options = ["--mirror"] if mirror else []
git("clone", *options, url, str(path))
return cls(path)
def create_branch(self, branch: str, ref: str = "HEAD") -> None:
"""Create a branch."""
commit = self.repo.revparse_single(ref)
self.repo.branches.create(branch, commit)
def get_current_branch(self) -> str:
"""Return the current branch."""
return self.repo.head.shorthand # type: ignore[no-any-return]
def exists_branch(self, branch: str) -> bool:
"""Return True if the branch exists."""
return branch in self.repo.branches
def switch_branch(self, branch: str) -> None:
"""Switch the current branch."""
self.repo.checkout(self.repo.branches[branch])
def update_remote(self) -> None:
"""Update the remotes."""
self.git("remote", "update")
def fetch_commits(self, source: Repository, *commits: str) -> None:
"""Fetch the given commits and their immediate parents."""
path = source.path.resolve()
self.git("fetch", "--no-tags", "--depth=2", str(path), *commits)
def push(self, remote: str, *refs: str, force: bool = False) -> None:
"""Update remote refs."""
options = ["--force-with-lease"] if force else []
self.git("push", *options, remote, *refs)
def parse_revisions(self, *revisions: str) -> List[str]:
"""Parse revisions using the format specified in gitrevisions(7)."""
process = self.git("rev-list", "--no-walk", *revisions)
result = process.stdout.split()
result.reverse()
return result
def lookup_replacement(self, commit: str) -> str:
"""Lookup the replace ref for the given commit."""
refname = f"refs/replace/{commit}"
ref = self.repo.lookup_reference(refname)
return cast(str, ref.target.hex)
def _ensure_relative(self, path: Path) -> Path:
"""Interpret the path relative to the repository root."""
return path.relative_to(self.path) if path.is_absolute() else path
def read_text(self, path: Path, *, ref: str = "HEAD") -> str:
"""Return the contents of the blob at the given path."""
commit = self.repo.revparse_single(ref)
path = self._ensure_relative(path)
blob = functools.reduce(operator.truediv, path.parts, commit.tree)
return cast(str, blob.data.decode())
def exists(self, path: Path, *, ref: str = "HEAD") -> bool:
"""Return True if a blob exists at the given path."""
commit = self.repo.revparse_single(ref)
path = self._ensure_relative(path)
try:
functools.reduce(operator.truediv, path.parts, commit.tree)
return True
except KeyError:
return False
def add(self, *paths: Path) -> None:
"""Add paths to the index."""
for path in paths:
path = self._ensure_relative(path)
self.repo.index.add(path)
else:
self.repo.index.add_all()
self.repo.index.write()
def commit(self, message: str) -> None:
"""Create a commit."""
try:
head = self.repo.head
refname = head.name
parents = [head.target]
except pygit2.GitError:
branch = get_default_branch()
refname = f"refs/heads/{branch}"
parents = []
tree = self.repo.index.write_tree()
author = committer = self.repo.default_signature
self.repo.create_commit(refname, author, committer, message, tree, parents)
def cherrypick(self, *refs: str) -> None:
"""Cherry-pick the given commits."""
self.git("cherry-pick", *refs)
@contextlib.contextmanager
def worktree(
self,
branch: str,
path: Path,
*,
base: str = "HEAD",
force: bool = False,
force_remove: bool = False,
) -> Iterator[Repository]:
"""Context manager to add and remove a worktree."""
repository = self.add_worktree(branch, path, base=base, force=force)
try:
yield repository
finally:
self.remove_worktree(path, force=force_remove)
def add_worktree(
self,
branch: str,
path: Path,
*,
base: str = "HEAD",
force: bool = False,
) -> Repository:
"""Add a worktree."""
self.git(
"worktree",
"add",
str(path),
"--no-track",
"-B" if force else "-b",
branch,
base,
)
return Repository(path)
def remove_worktree(self, path: Path, *, force: bool = False) -> None:
"""Remove a worktree."""
if force:
self.git("worktree", "remove", "--force", str(path))
else:
self.git("worktree", "remove", str(path))
|
[
"re.compile",
"functools.reduce",
"pathlib.Path",
"retrocookie.utils.removeprefix",
"subprocess.run",
"dataclasses.dataclass",
"pathlib.Path.cwd",
"typing.cast",
"pygit2.init_repository",
"contextlib.suppress",
"pygit2.Repository",
"dataclasses.field"
] |
[((697, 811), 're.compile', 're.compile', (['"""\n (?P<major>\\\\d+)\\\\.\n (?P<minor>\\\\d+)\n (\\\\.(?P<patch>\\\\d+))?\n """', 're.VERBOSE'], {}), '(\n """\n (?P<major>\\\\d+)\\\\.\n (?P<minor>\\\\d+)\n (\\\\.(?P<patch>\\\\d+))?\n """\n , re.VERBOSE)\n', (707, 811), False, 'import re\n'), ((812, 846), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'order': '(True)'}), '(frozen=True, order=True)\n', (821, 846), False, 'from dataclasses import dataclass\n'), ((558, 647), 'subprocess.run', 'subprocess.run', (["['git', *args]"], {'check': 'check', 'text': '(True)', 'capture_output': '(True)'}), "(['git', *args], check=check, text=True, capture_output=True,\n **kwargs)\n", (572, 647), False, 'import subprocess\n'), ((988, 1022), 'dataclasses.field', 'field', ([], {'default': 'None', 'compare': '(False)'}), '(default=None, compare=False)\n', (993, 1022), False, 'from dataclasses import field\n'), ((1787, 1821), 'retrocookie.utils.removeprefix', 'removeprefix', (['text', '"""git version """'], {}), "(text, 'git version ')\n", (1799, 1821), False, 'from retrocookie.utils import removeprefix\n'), ((3151, 3190), 'pygit2.init_repository', 'pygit2.init_repository', (['path'], {'bare': 'bare'}), '(path, bare=bare)\n', (3173, 3190), False, 'import pygit2\n'), ((5212, 5237), 'typing.cast', 'cast', (['str', 'ref.target.hex'], {}), '(str, ref.target.hex)\n', (5216, 5237), False, 'from typing import cast\n'), ((5670, 5729), 'functools.reduce', 'functools.reduce', (['operator.truediv', 'path.parts', 'commit.tree'], {}), '(operator.truediv, path.parts, commit.tree)\n', (5686, 5729), False, 'import functools\n'), ((2102, 2140), 'contextlib.suppress', 'contextlib.suppress', (['IOError', 'KeyError'], {}), '(IOError, KeyError)\n', (2121, 2140), False, 'import contextlib\n'), ((2615, 2643), 'pygit2.Repository', 'pygit2.Repository', (['self.path'], {}), '(self.path)\n', (2632, 2643), False, 'import pygit2\n'), ((2682, 2713), 'pathlib.Path', 'Path', (['(repo.workdir or repo.path)'], {}), '(repo.workdir or repo.path)\n', (2686, 2713), False, 'from pathlib import Path\n'), ((6018, 6077), 'functools.reduce', 'functools.reduce', (['operator.truediv', 'path.parts', 'commit.tree'], {}), '(operator.truediv, path.parts, commit.tree)\n', (6034, 6077), False, 'import functools\n'), ((2580, 2590), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (2588, 2590), False, 'from pathlib import Path\n')]
|
from datetime import datetime
from enum import Enum
from json.encoder import ESCAPE_ASCII, ESCAPE_DCT # type: ignore
from typing import List, Optional, Tuple, TypedDict
class JsonTruncText:
def __init__(self, text="", truncated=False, added_bytes=0):
self.text = text
self.truncated = truncated
self._added_bytes = max(0, added_bytes)
def __eq__(self, other):
if not isinstance(other, JsonTruncText):
return False
return (self.text, self.truncated) == (other.text, other.truncated)
def __repr__(self):
return f'JsonTruncText(text="{self.text}", truncated={self.truncated})'
@property
def byte_size(self) -> int:
return len(self.text) + self._added_bytes
@staticmethod
def json_char_len(char: str) -> int:
try:
return len(ESCAPE_DCT[char])
except KeyError:
return 6 if ord(char) < 0x10000 else 12
@classmethod
def truncate(cls, s: str, limit: int):
limit = max(limit, 0)
s_init_len = len(s)
s = s[:limit]
added_bytes = 0
for match in ESCAPE_ASCII.finditer(s):
start, end = match.span(0)
markup = cls.json_char_len(match.group(0)) - 1
added_bytes += markup
if end + added_bytes > limit:
return cls(
text=s[:start],
truncated=True,
added_bytes=added_bytes - markup,
)
if end + added_bytes == limit:
s = s[:end]
return cls(
text=s,
truncated=len(s) < s_init_len,
added_bytes=added_bytes,
)
return cls(
text=s,
truncated=len(s) < s_init_len,
added_bytes=added_bytes,
)
class ObservabilityEventTypes(str, Enum):
API_CALL = "api_call"
EVENT_DELIVERY_ATTEMPT = "event_delivery_attempt"
HttpHeaders = List[Tuple[str, str]]
class App(TypedDict):
id: str
name: str
class Webhook(TypedDict):
id: str
name: str
target_url: str
subscription_query: Optional[JsonTruncText]
class ObservabilityEventBase(TypedDict):
event_type: ObservabilityEventTypes
class GraphQLOperation(TypedDict):
name: Optional[JsonTruncText]
operation_type: Optional[str]
query: Optional[JsonTruncText]
result: Optional[JsonTruncText]
result_invalid: bool
class ApiCallRequest(TypedDict):
id: str
method: str
url: str
time: float
headers: HttpHeaders
content_length: int
class ApiCallResponse(TypedDict):
headers: HttpHeaders
status_code: Optional[int]
content_length: int
class ApiCallPayload(ObservabilityEventBase):
request: ApiCallRequest
response: ApiCallResponse
app: Optional[App]
gql_operations: List[GraphQLOperation]
class EventDeliveryPayload(TypedDict):
content_length: int
body: JsonTruncText
class EventDelivery(TypedDict):
id: str
status: str
event_type: str
event_sync: bool
payload: EventDeliveryPayload
class EventDeliveryAttemptRequest(TypedDict):
headers: HttpHeaders
class EventDeliveryAttemptResponse(TypedDict):
headers: HttpHeaders
status_code: Optional[int]
content_length: int
body: JsonTruncText
class EventDeliveryAttemptPayload(ObservabilityEventBase):
id: str
time: datetime
duration: Optional[float]
status: str
next_retry: Optional[datetime]
request: EventDeliveryAttemptRequest
response: EventDeliveryAttemptResponse
event_delivery: EventDelivery
webhook: Webhook
app: App
|
[
"json.encoder.ESCAPE_ASCII.finditer"
] |
[((1126, 1150), 'json.encoder.ESCAPE_ASCII.finditer', 'ESCAPE_ASCII.finditer', (['s'], {}), '(s)\n', (1147, 1150), False, 'from json.encoder import ESCAPE_ASCII, ESCAPE_DCT\n')]
|
# (c) 2013 <NAME> and contributors; written to work with Django and Paste (http://pythonpaste.org)
# Paste CGI "middleware" for Django by <NAME> <<EMAIL>>
# Open Technology Group, Inc <http://www.otg-nc.com>
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import os
import sys
import subprocess
import urllib
try:
import select
except ImportError:
select = None
from paste.util import converters
from paste.cgiapp import *
from paste.cgiapp import StdinReader, proc_communicate
from paste.cgiapp import CGIApplication as PasteCGIApplication
import urllib
from django.http import HttpResponse
# Taken from http://plumberjack.blogspot.com/2009/09/how-to-treat-logger-like-output-stream.html
import logging
mod_logger=logging.getLogger(__name__)
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message.strip() and message != '\n':
self.logger.log(self.level, message)
class CGIApplication(PasteCGIApplication):
def __call__(self, request, environ, logger=None):
if not logger:
self.logger=LoggerWriter(logging.getLogger(__name__), logging.ERROR)
else:
self.logger=logger
if 'REQUEST_URI' not in environ:
environ['REQUEST_URI'] = (
urllib.quote(environ.get('SCRIPT_NAME', ''))
+ urllib.quote(environ.get('PATH_INFO', '')))
if self.include_os_environ:
cgi_environ = os.environ.copy()
else:
cgi_environ = {}
for name in environ:
# Should unicode values be encoded?
if (name.upper() == name
and isinstance(environ[name], str)):
cgi_environ[name] = environ[name]
if self.query_string is not None:
old = cgi_environ.get('QUERY_STRING', '')
if old:
old += '&'
cgi_environ['QUERY_STRING'] = old + self.query_string
cgi_environ['SCRIPT_FILENAME'] = self.script
proc = subprocess.Popen(
[self.script],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=cgi_environ,
cwd=os.path.dirname(self.script),
)
writer = CGIWriter()
if select and sys.platform != 'win32':
proc_communicate(
proc,
stdin=request,
stdout=writer,
stderr=self.logger)
else:
stdout, stderr = proc.communicate(request.read())
if stderr:
self.logger.write(stderr)
writer.write(stdout)
if not writer.headers_finished:
return HttpResponse(status=400)
return writer.response
class CGIWriter(object):
def __init__(self):
self.status = '200 OK'
self.headers = []
self.headers_finished = False
self.writer = None
self.buffer = ''
def write(self, data):
if self.headers_finished:
self.response.write(data)
return
self.buffer += data
while '\n' in self.buffer:
if '\r\n' in self.buffer and self.buffer.find('\r\n') < self.buffer.find('\n'):
line1, self.buffer = self.buffer.split('\r\n', 1)
else:
line1, self.buffer = self.buffer.split('\n', 1)
if not line1:
self.headers_finished = True
self.response=HttpResponse(status=int(self.status.split(' ')[0]))
for name, value in self.headers:
self.response[name]=value
self.response.write(self.buffer)
del self.buffer
del self.headers
del self.status
break
elif ':' not in line1:
raise CGIError(
"Bad header line: %r" % line1)
else:
name, value = line1.split(':', 1)
value = value.lstrip()
name = name.strip()
if name.lower() == 'status':
if ' ' not in value:
# WSGI requires this space, sometimes CGI scripts don't set it:
value = '%s General' % value
self.status = value
else:
self.headers.append((name, value))
|
[
"logging.getLogger",
"django.http.HttpResponse",
"os.environ.copy",
"os.path.dirname",
"paste.cgiapp.proc_communicate"
] |
[((761, 788), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (778, 788), False, 'import logging\n'), ((1546, 1563), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1561, 1563), False, 'import os\n'), ((2430, 2502), 'paste.cgiapp.proc_communicate', 'proc_communicate', (['proc'], {'stdin': 'request', 'stdout': 'writer', 'stderr': 'self.logger'}), '(proc, stdin=request, stdout=writer, stderr=self.logger)\n', (2446, 2502), False, 'from paste.cgiapp import StdinReader, proc_communicate\n'), ((2802, 2826), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(400)'}), '(status=400)\n', (2814, 2826), False, 'from django.http import HttpResponse\n'), ((1192, 1219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1209, 1219), False, 'import logging\n'), ((2298, 2326), 'os.path.dirname', 'os.path.dirname', (['self.script'], {}), '(self.script)\n', (2313, 2326), False, 'import os\n')]
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
import socket
import errno
import sys
from pysnmp.carrier.asyncore.base import AbstractSocketTransport
from pysnmp.carrier import sockfix, sockmsg, error
from pysnmp import debug
# Ignore these socket errors
sockErrors = {errno.ESHUTDOWN: True,
errno.ENOTCONN: True,
errno.ECONNRESET: False,
errno.ECONNREFUSED: False,
errno.EAGAIN: False,
errno.EWOULDBLOCK: False}
if hasattr(errno, 'EBADFD'):
# bad FD may happen upon FD closure on n-1 select() event
sockErrors[errno.EBADFD] = True
class DgramSocketTransport(AbstractSocketTransport):
sockType = socket.SOCK_DGRAM
retryCount = 3
retryInterval = 1
addressType = lambda x: x
def __init__(self, sock=None, sockMap=None):
self.__outQueue = []
self._sendto = lambda s, b, a: s.sendto(b, a)
def __recvfrom(s, sz):
d, a = s.recvfrom(sz)
return d, self.addressType(a)
self._recvfrom = __recvfrom
AbstractSocketTransport.__init__(self, sock, sockMap)
def openClientMode(self, iface=None):
if iface is not None:
try:
self.socket.bind(iface)
except socket.error:
raise error.CarrierError(
'bind() for %s failed: %s' % (iface is None and "<all local>" or iface, sys.exc_info()[1]))
return self
def openServerMode(self, iface):
try:
self.socket.bind(iface)
except socket.error:
raise error.CarrierError('bind() for %s failed: %s' % (iface, sys.exc_info()[1],))
return self
def enableBroadcast(self, flag=1):
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, flag
)
except socket.error:
raise error.CarrierError('setsockopt() for SO_BROADCAST failed: %s' % (sys.exc_info()[1],))
debug.logger & debug.flagIO and debug.logger('enableBroadcast: %s option SO_BROADCAST on socket %s' % (flag and "enabled" or "disabled", self.socket.fileno()))
return self
def enablePktInfo(self, flag=1):
if (not hasattr(self.socket, 'sendmsg') or
not hasattr(self.socket, 'recvmsg')):
raise error.CarrierError('sendmsg()/recvmsg() interface is not supported by this OS and/or Python version')
try:
if self.socket.family == socket.AF_INET:
self.socket.setsockopt(socket.SOL_IP, socket.IP_PKTINFO, flag)
if self.socket.family == socket.AF_INET6:
self.socket.setsockopt(socket.SOL_IPV6, socket.IPV6_RECVPKTINFO, flag)
except socket.error:
raise error.CarrierError('setsockopt() for %s failed: %s' % (self.socket.family == socket.AF_INET6 and "IPV6_RECVPKTINFO" or "IP_PKTINFO", sys.exc_info()[1]))
self._sendto = sockmsg.getSendTo(self.addressType)
self._recvfrom = sockmsg.getRecvFrom(self.addressType)
debug.logger & debug.flagIO and debug.logger('enablePktInfo: %s option %s on socket %s' % (self.socket.family == socket.AF_INET6 and "IPV6_RECVPKTINFO" or "IP_PKTINFO", flag and "enabled" or "disabled", self.socket.fileno()))
return self
def enableTransparent(self, flag=1):
try:
if self.socket.family == socket.AF_INET:
self.socket.setsockopt(
socket.SOL_IP, socket.IP_TRANSPARENT, flag
)
if self.socket.family == socket.AF_INET6:
self.socket.setsockopt(
socket.SOL_IPV6, socket.IPV6_TRANSPARENT, flag
)
except socket.error:
raise error.CarrierError('setsockopt() for IP_TRANSPARENT failed: %s' % sys.exc_info()[1])
except OSError:
raise error.CarrierError('IP_TRANSPARENT socket option requires superusre previleges')
debug.logger & debug.flagIO and debug.logger('enableTransparent: %s option IP_TRANSPARENT on socket %s' % (flag and "enabled" or "disabled", self.socket.fileno()))
return self
def sendMessage(self, outgoingMessage, transportAddress):
self.__outQueue.append(
(outgoingMessage, self.normalizeAddress(transportAddress))
)
debug.logger & debug.flagIO and debug.logger('sendMessage: outgoingMessage queued (%d octets) %s' % (len(outgoingMessage), debug.hexdump(outgoingMessage)))
def normalizeAddress(self, transportAddress):
if not isinstance(transportAddress, self.addressType):
transportAddress = self.addressType(transportAddress)
if not transportAddress.getLocalAddress():
transportAddress.setLocalAddress(self.getLocalAddress())
return transportAddress
def getLocalAddress(self):
# one evil OS does not seem to support getsockname() for DGRAM sockets
try:
return self.socket.getsockname()
except Exception:
return '0.0.0.0', 0
# asyncore API
def handle_connect(self):
pass
def writable(self):
return self.__outQueue
def handle_write(self):
outgoingMessage, transportAddress = self.__outQueue.pop(0)
debug.logger & debug.flagIO and debug.logger('handle_write: transportAddress %r -> %r outgoingMessage (%d octets) %s' % (transportAddress.getLocalAddress(), transportAddress, len(outgoingMessage), debug.hexdump(outgoingMessage)))
if not transportAddress:
debug.logger & debug.flagIO and debug.logger('handle_write: missing dst address, loosing outgoing msg')
return
try:
self._sendto(
self.socket, outgoingMessage, transportAddress
)
except socket.error:
if sys.exc_info()[1].args[0] in sockErrors:
debug.logger & debug.flagIO and debug.logger('handle_write: ignoring socket error %s' % (sys.exc_info()[1],))
else:
raise error.CarrierError('sendto() failed for %s: %s' % (transportAddress, sys.exc_info()[1]))
def readable(self):
return 1
def handle_read(self):
try:
incomingMessage, transportAddress = self._recvfrom(self.socket, 65535)
transportAddress = self.normalizeAddress(transportAddress)
debug.logger & debug.flagIO and debug.logger(
'handle_read: transportAddress %r -> %r incomingMessage (%d octets) %s' % (transportAddress, transportAddress.getLocalAddress(), len(incomingMessage), debug.hexdump(incomingMessage)))
if not incomingMessage:
self.handle_close()
return
else:
self._cbFun(self, transportAddress, incomingMessage)
return
except socket.error:
if sys.exc_info()[1].args[0] in sockErrors:
debug.logger & debug.flagIO and debug.logger('handle_read: known socket error %s' % (sys.exc_info()[1],))
sockErrors[sys.exc_info()[1].args[0]] and self.handle_close()
return
else:
raise error.CarrierError('recvfrom() failed: %s' % (sys.exc_info()[1],))
def handle_close(self):
pass # no datagram connection
|
[
"pysnmp.carrier.error.CarrierError",
"pysnmp.debug.logger",
"pysnmp.carrier.asyncore.base.AbstractSocketTransport.__init__",
"pysnmp.carrier.sockmsg.getSendTo",
"sys.exc_info",
"pysnmp.carrier.sockmsg.getRecvFrom",
"pysnmp.debug.hexdump"
] |
[((1151, 1204), 'pysnmp.carrier.asyncore.base.AbstractSocketTransport.__init__', 'AbstractSocketTransport.__init__', (['self', 'sock', 'sockMap'], {}), '(self, sock, sockMap)\n', (1183, 1204), False, 'from pysnmp.carrier.asyncore.base import AbstractSocketTransport\n'), ((3034, 3069), 'pysnmp.carrier.sockmsg.getSendTo', 'sockmsg.getSendTo', (['self.addressType'], {}), '(self.addressType)\n', (3051, 3069), False, 'from pysnmp.carrier import sockfix, sockmsg, error\n'), ((3095, 3132), 'pysnmp.carrier.sockmsg.getRecvFrom', 'sockmsg.getRecvFrom', (['self.addressType'], {}), '(self.addressType)\n', (3114, 3132), False, 'from pysnmp.carrier import sockfix, sockmsg, error\n'), ((2419, 2530), 'pysnmp.carrier.error.CarrierError', 'error.CarrierError', (['"""sendmsg()/recvmsg() interface is not supported by this OS and/or Python version"""'], {}), "(\n 'sendmsg()/recvmsg() interface is not supported by this OS and/or Python version'\n )\n", (2437, 2530), False, 'from pysnmp.carrier import sockfix, sockmsg, error\n'), ((3972, 4057), 'pysnmp.carrier.error.CarrierError', 'error.CarrierError', (['"""IP_TRANSPARENT socket option requires superusre previleges"""'], {}), "('IP_TRANSPARENT socket option requires superusre previleges'\n )\n", (3990, 4057), False, 'from pysnmp.carrier import sockfix, sockmsg, error\n'), ((5678, 5749), 'pysnmp.debug.logger', 'debug.logger', (['"""handle_write: missing dst address, loosing outgoing msg"""'], {}), "('handle_write: missing dst address, loosing outgoing msg')\n", (5690, 5749), False, 'from pysnmp import debug\n'), ((4553, 4583), 'pysnmp.debug.hexdump', 'debug.hexdump', (['outgoingMessage'], {}), '(outgoingMessage)\n', (4566, 4583), False, 'from pysnmp import debug\n'), ((5568, 5598), 'pysnmp.debug.hexdump', 'debug.hexdump', (['outgoingMessage'], {}), '(outgoingMessage)\n', (5581, 5598), False, 'from pysnmp import debug\n'), ((3910, 3924), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3922, 3924), False, 'import sys\n'), ((6687, 6717), 'pysnmp.debug.hexdump', 'debug.hexdump', (['incomingMessage'], {}), '(incomingMessage)\n', (6700, 6717), False, 'from pysnmp import debug\n'), ((1732, 1746), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1744, 1746), False, 'import sys\n'), ((2049, 2063), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2061, 2063), False, 'import sys\n'), ((2990, 3004), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3002, 3004), False, 'import sys\n'), ((5929, 5943), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5941, 5943), False, 'import sys\n'), ((6969, 6983), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6981, 6983), False, 'import sys\n'), ((1502, 1516), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1514, 1516), False, 'import sys\n'), ((6205, 6219), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6217, 6219), False, 'import sys\n'), ((7319, 7333), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7331, 7333), False, 'import sys\n'), ((6075, 6089), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6087, 6089), False, 'import sys\n'), ((7111, 7125), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7123, 7125), False, 'import sys\n'), ((7159, 7173), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7171, 7173), False, 'import sys\n')]
|
#!/usr/bin/env python3
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title : superpmi_setup.py
#
# Notes:
#
# Script to run "superpmi replay" for various collections under various COMPlus_JitStressRegs value.
################################################################################
################################################################################
import argparse
from os import path
import os
from os import listdir
from coreclr_arguments import *
from superpmi_setup import run_command
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-arch", help="Architecture")
parser.add_argument("-platform", help="OS platform")
parser.add_argument("-jit_directory", help="path to the directory containing clrjit binaries")
parser.add_argument("-log_directory", help="path to the directory containing superpmi log files")
jit_flags = [
"JitStressRegs=0",
"JitStressRegs=1",
"JitStressRegs=2",
"JitStressRegs=3",
"JitStressRegs=4",
"JitStressRegs=8",
"JitStressRegs=0x10",
"JitStressRegs=0x80",
"JitStressRegs=0x1000",
]
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(args,
"platform",
lambda unused: True,
"Unable to set platform")
coreclr_args.verify(args,
"jit_directory",
lambda jit_directory: os.path.isdir(jit_directory),
"jit_directory doesn't exist")
coreclr_args.verify(args,
"log_directory",
lambda log_directory: True,
"log_directory doesn't exist")
return coreclr_args
def main(main_args):
"""Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
python_path = sys.executable
cwd = os.path.dirname(os.path.realpath(__file__))
coreclr_args = setup_args(main_args)
spmi_location = path.join(cwd, "artifacts", "spmi")
log_directory = coreclr_args.log_directory
platform_name = coreclr_args.platform
os_name = "win" if platform_name.lower() == "windows" else "unix"
arch_name = coreclr_args.arch
host_arch_name = "x64" if arch_name.endswith("64") else "x86"
jit_path = path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))
print("Running superpmi.py download")
run_command([python_path, path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name,
"-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location], _exit_on_fail=True)
failed_runs = []
for jit_flag in jit_flags:
log_file = path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_")))
print("Running superpmi.py replay for {}".format(jit_flag))
_, _, return_code = run_command([
python_path, path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd,
"-jitoption", jit_flag, "-jitoption", "TieredCompilation=0",
"-target_os", platform_name, "-target_arch", arch_name,
"-arch", host_arch_name,
"-jit_path", jit_path, "-spmi_location", spmi_location,
"-log_level", "debug", "-log_file", log_file])
if return_code != 0:
failed_runs.append("Failure in {}".format(log_file))
# Consolidate all superpmi_*.logs in superpmi_platform_architecture.log
final_log_name = path.join(log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))
print("Consolidating final {}".format(final_log_name))
with open(final_log_name, "a") as final_superpmi_log:
for superpmi_log in listdir(log_directory):
if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"):
continue
print("Appending {}".format(superpmi_log))
final_superpmi_log.write("======================================================={}".format(os.linesep))
final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep))
final_superpmi_log.write("======================================================={}".format(os.linesep))
with open(path.join(log_directory, superpmi_log), "r") as current_superpmi_log:
contents = current_superpmi_log.read()
final_superpmi_log.write(contents)
# Log failures summary
if len(failed_runs) > 0:
final_superpmi_log.write(os.linesep)
final_superpmi_log.write(os.linesep)
final_superpmi_log.write("========Failed runs summary========".format(os.linesep))
final_superpmi_log.write(os.linesep.join(failed_runs))
return 0 if len(failed_runs) == 0 else 1
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
|
[
"os.linesep.join",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.path.realpath",
"os.path.isdir"
] |
[((635, 685), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""description"""'}), "(description='description')\n", (658, 685), False, 'import argparse\n'), ((2594, 2629), 'os.path.join', 'path.join', (['cwd', '"""artifacts"""', '"""spmi"""'], {}), "(cwd, 'artifacts', 'spmi')\n", (2603, 2629), False, 'from os import path\n'), ((2505, 2531), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2521, 2531), False, 'import os\n'), ((4357, 4379), 'os.listdir', 'listdir', (['log_directory'], {}), '(log_directory)\n', (4364, 4379), False, 'from os import listdir\n'), ((2039, 2067), 'os.path.isdir', 'os.path.isdir', (['jit_directory'], {}), '(jit_directory)\n', (2052, 2067), False, 'import os\n'), ((3081, 3110), 'os.path.join', 'path.join', (['cwd', '"""superpmi.py"""'], {}), "(cwd, 'superpmi.py')\n", (3090, 3110), False, 'from os import path\n'), ((3574, 3603), 'os.path.join', 'path.join', (['cwd', '"""superpmi.py"""'], {}), "(cwd, 'superpmi.py')\n", (3583, 3603), False, 'from os import path\n'), ((5378, 5406), 'os.linesep.join', 'os.linesep.join', (['failed_runs'], {}), '(failed_runs)\n', (5393, 5406), False, 'import os\n'), ((4907, 4945), 'os.path.join', 'path.join', (['log_directory', 'superpmi_log'], {}), '(log_directory, superpmi_log)\n', (4916, 4945), False, 'from os import path\n')]
|
from django.conf.urls import url
from .views import serve_all
urlpatterns = (
url(r'^.*$', serve_all, name="localsrv:serve_all"),
)
|
[
"django.conf.urls.url"
] |
[((83, 132), 'django.conf.urls.url', 'url', (['"""^.*$"""', 'serve_all'], {'name': '"""localsrv:serve_all"""'}), "('^.*$', serve_all, name='localsrv:serve_all')\n", (86, 132), False, 'from django.conf.urls import url\n')]
|
from transformer import Encoder
from torch import nn,optim
from torch.nn.functional import cross_entropy,softmax, relu
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import torch
import utils
import os
import pickle
class GPT(nn.Module):
def __init__(self, model_dim, max_len, num_layer, num_head, n_vocab, lr, max_seg=3, drop_rate=0.2,padding_idx=0):
super().__init__()
self.padding_idx = padding_idx
self.n_vocab = n_vocab
self.max_len = max_len
self.word_emb = nn.Embedding(n_vocab,model_dim)
self.word_emb.weight.data.normal_(0,0.1)
self.segment_emb = nn.Embedding(num_embeddings= max_seg, embedding_dim=model_dim)
self.segment_emb.weight.data.normal_(0,0.1)
self.position_emb = torch.empty(1,max_len,model_dim)
nn.init.kaiming_normal_(self.position_emb,mode='fan_out', nonlinearity='relu')
self.position_emb = nn.Parameter(self.position_emb)
self.encoder = Encoder(n_head=num_head, emb_dim=model_dim, drop_rate=drop_rate, n_layer=num_layer)
self.task_mlm = nn.Linear(in_features=model_dim, out_features=n_vocab)
self.task_nsp = nn.Linear(in_features=model_dim*self.max_len, out_features=2)
self.opt = optim.Adam(self.parameters(),lr)
def forward(self,seqs, segs, training=False):
embed = self.input_emb(seqs, segs)
z = self.encoder(embed, training, mask = self.mask(seqs)) # [n, step, model_dim]
mlm_logits = self.task_mlm(z) # [n, step, n_vocab]
nsp_logits = self.task_nsp(z.reshape(z.shape[0],-1)) # [n, n_cls]
return mlm_logits, nsp_logits
def step(self, seqs, segs, seqs_, nsp_labels):
self.opt.zero_grad()
mlm_logits, nsp_logits = self(seqs, segs, training=True)
pred_loss = cross_entropy(mlm_logits.reshape(-1,self.n_vocab),seqs_.reshape(-1))
nsp_loss = cross_entropy(nsp_logits,nsp_labels.reshape(-1))
loss = pred_loss + 0.2 * nsp_loss
loss.backward()
self.opt.step()
return loss.cpu().data.numpy(), mlm_logits
def input_emb(self,seqs, segs):
# device = next(self.parameters()).device
# self.position_emb = self.position_emb.to(device)
return self.word_emb(seqs) + self.segment_emb(segs) + self.position_emb
def mask(self, seqs):
device = next(self.parameters()).device
batch_size, seq_len = seqs.shape
mask = torch.triu(torch.ones((seq_len,seq_len), dtype=torch.long), diagonal=1).to(device) # [seq_len ,seq_len]
pad = torch.eq(seqs,self.padding_idx) # [n, seq_len]
mask = torch.where(pad[:,None,None,:],1,mask[None,None,:,:]).to(device) # [n, 1, seq_len, seq_len]
return mask>0 # [n, 1, seq_len, seq_len]
@property
def attentions(self):
attentions = {
"encoder": [l.mh.attention.cpu().data.numpy() for l in self.encoder.encoder_layers]
}
return attentions
def train():
MODEL_DIM = 256
N_LAYER = 4
LEARNING_RATE = 1e-4
dataset = utils.MRPCData("./MRPC",2000)
print("num word: ",dataset.num_word)
model = GPT(
model_dim=MODEL_DIM, max_len=dataset.max_len-1, num_layer=N_LAYER, num_head=4, n_vocab=dataset.num_word,
lr=LEARNING_RATE, max_seg=dataset.num_seg, drop_rate=0.2, padding_idx=dataset.pad_id
)
if torch.cuda.is_available():
print("GPU train avaliable")
device =torch.device("cuda")
model = model.cuda()
else:
device = torch.device("cpu")
model = model.cpu()
loader = DataLoader(dataset,batch_size=32,shuffle=True)
for epoch in range(100):
for batch_idx, batch in enumerate(loader):
seqs, segs,xlen,nsp_labels = batch
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
# pred: [n, step, n_vocab]
loss,pred = model.step(seqs=seqs[:,:-1], segs= segs[:,:-1], seqs_=seqs[:,1:], nsp_labels=nsp_labels)
if batch_idx %100 == 0:
pred = pred[0].cpu().data.numpy().argmax(axis = 1) # [step]
print(
"Epoch: ",epoch,
"|batch: ", batch_idx,
"| loss: %.3f" % loss,
"\n| tgt: ", " ".join([dataset.i2v[i] for i in seqs[0, 1:].cpu().data.numpy()[:xlen[0].sum()+1]]),
"\n| prd: ", " ".join([dataset.i2v[i] for i in pred[:xlen[0].sum()+1]]),
)
os.makedirs("./visual/models/gpt",exist_ok=True)
torch.save(model.state_dict(),"./visual/models/gpt/model.pth")
export_attention(model,device,dataset)
def export_attention(model,device,data,name="gpt"):
model.load_state_dict(torch.load("./visual/models/gpt/model.pth",map_location=device))
seqs, segs,xlen,nsp_labels = data[:32]
seqs, segs,xlen,nsp_labels = torch.from_numpy(seqs),torch.from_numpy(segs),torch.from_numpy(xlen),torch.from_numpy(nsp_labels)
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
model(seqs[:,:-1],segs[:,:-1],False)
seqs = seqs.cpu().data.numpy()
data = {"src": [[data.i2v[i] for i in seqs[j]] for j in range(len(seqs))], "attentions": model.attentions}
path = "./visual/tmp/%s_attention_matrix.pkl" % name
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(data, f)
if __name__ == "__main__":
train()
|
[
"torch.ones",
"pickle.dump",
"os.makedirs",
"torch.load",
"torch.nn.init.kaiming_normal_",
"torch.from_numpy",
"torch.eq",
"utils.MRPCData",
"os.path.dirname",
"torch.cuda.is_available",
"torch.nn.Parameter",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"transformer.Encoder",
"torch.empty",
"torch.where",
"torch.nn.Embedding",
"torch.device"
] |
[((3123, 3153), 'utils.MRPCData', 'utils.MRPCData', (['"""./MRPC"""', '(2000)'], {}), "('./MRPC', 2000)\n", (3137, 3153), False, 'import utils\n'), ((3430, 3455), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3453, 3455), False, 'import torch\n'), ((3653, 3701), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(32)', 'shuffle': '(True)'}), '(dataset, batch_size=32, shuffle=True)\n', (3663, 3701), False, 'from torch.utils.data import DataLoader\n'), ((4593, 4642), 'os.makedirs', 'os.makedirs', (['"""./visual/models/gpt"""'], {'exist_ok': '(True)'}), "('./visual/models/gpt', exist_ok=True)\n", (4604, 4642), False, 'import os\n'), ((569, 601), 'torch.nn.Embedding', 'nn.Embedding', (['n_vocab', 'model_dim'], {}), '(n_vocab, model_dim)\n', (581, 601), False, 'from torch import nn, optim\n'), ((678, 739), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'max_seg', 'embedding_dim': 'model_dim'}), '(num_embeddings=max_seg, embedding_dim=model_dim)\n', (690, 739), False, 'from torch import nn, optim\n'), ((821, 855), 'torch.empty', 'torch.empty', (['(1)', 'max_len', 'model_dim'], {}), '(1, max_len, model_dim)\n', (832, 855), False, 'import torch\n'), ((862, 941), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.position_emb'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(self.position_emb, mode='fan_out', nonlinearity='relu')\n", (885, 941), False, 'from torch import nn, optim\n'), ((969, 1000), 'torch.nn.Parameter', 'nn.Parameter', (['self.position_emb'], {}), '(self.position_emb)\n', (981, 1000), False, 'from torch import nn, optim\n'), ((1026, 1114), 'transformer.Encoder', 'Encoder', ([], {'n_head': 'num_head', 'emb_dim': 'model_dim', 'drop_rate': 'drop_rate', 'n_layer': 'num_layer'}), '(n_head=num_head, emb_dim=model_dim, drop_rate=drop_rate, n_layer=\n num_layer)\n', (1033, 1114), False, 'from transformer import Encoder\n'), ((1134, 1188), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'model_dim', 'out_features': 'n_vocab'}), '(in_features=model_dim, out_features=n_vocab)\n', (1143, 1188), False, 'from torch import nn, optim\n'), ((1213, 1276), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(model_dim * self.max_len)', 'out_features': '(2)'}), '(in_features=model_dim * self.max_len, out_features=2)\n', (1222, 1276), False, 'from torch import nn, optim\n'), ((2625, 2657), 'torch.eq', 'torch.eq', (['seqs', 'self.padding_idx'], {}), '(seqs, self.padding_idx)\n', (2633, 2657), False, 'import torch\n'), ((3510, 3530), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3522, 3530), False, 'import torch\n'), ((3587, 3606), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3599, 3606), False, 'import torch\n'), ((4831, 4895), 'torch.load', 'torch.load', (['"""./visual/models/gpt/model.pth"""'], {'map_location': 'device'}), "('./visual/models/gpt/model.pth', map_location=device)\n", (4841, 4895), False, 'import torch\n'), ((4972, 4994), 'torch.from_numpy', 'torch.from_numpy', (['seqs'], {}), '(seqs)\n', (4988, 4994), False, 'import torch\n'), ((4995, 5017), 'torch.from_numpy', 'torch.from_numpy', (['segs'], {}), '(segs)\n', (5011, 5017), False, 'import torch\n'), ((5018, 5040), 'torch.from_numpy', 'torch.from_numpy', (['xlen'], {}), '(xlen)\n', (5034, 5040), False, 'import torch\n'), ((5041, 5069), 'torch.from_numpy', 'torch.from_numpy', (['nsp_labels'], {}), '(nsp_labels)\n', (5057, 5069), False, 'import torch\n'), ((5459, 5480), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (5474, 5480), False, 'import os\n'), ((5537, 5557), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5548, 5557), False, 'import pickle\n'), ((2689, 2750), 'torch.where', 'torch.where', (['pad[:, None, None, :]', '(1)', 'mask[None, None, :, :]'], {}), '(pad[:, None, None, :], 1, mask[None, None, :, :])\n', (2700, 2750), False, 'import torch\n'), ((2517, 2565), 'torch.ones', 'torch.ones', (['(seq_len, seq_len)'], {'dtype': 'torch.long'}), '((seq_len, seq_len), dtype=torch.long)\n', (2527, 2565), False, 'import torch\n')]
|
import numpy as np
import pandas as pd
from bokeh.core.json_encoder import serialize_json
from bokeh.core.properties import List, String
from bokeh.document import Document
from bokeh.layouts import row, column
from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button
from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs
from bokeh.palettes import viridis
from bokeh.plotting import figure, ColumnDataSource
from bokeh.util.compiler import bundle_all_models
from bokeh.util.serialization import make_id
from matplotlib import cm
from matplotlib.colors import rgb2hex
import os
from skyportal.models import (
DBSession,
Obj,
Photometry,
Group,
Instrument,
Telescope,
PHOT_ZP,
)
import sncosmo
DETECT_THRESH = 5 # sigma
SPEC_LINES = {
'H': ([3970, 4102, 4341, 4861, 6563], '#ff0000'),
'He': ([3886, 4472, 5876, 6678, 7065], '#002157'),
'He II': ([3203, 4686], '#003b99'),
'C II': ([3919, 4267, 6580, 7234, 9234], '#570199'),
'C III': ([4650, 5696], '#a30198'),
'C IV': ([5801], '#ff0073'),
'O': ([7772, 7774, 7775, 8447, 9266], '#007236'),
'O II': ([3727], '#00a64d'),
'O III': ([4959, 5007], '#00bf59'),
'Na': ([5890, 5896, 8183, 8195], '#aba000'),
'Mg': ([2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184], '#8c6239'),
'Mg II': ([2791, 2796, 2803, 4481], '#bf874e'),
'Si II': ([3856, 5041, 5056, 5670, 6347, 6371], '#5674b9'),
'S II': ([5433, 5454, 5606, 5640, 5647, 6715], '#a38409'),
'Ca II': ([3934, 3969, 7292, 7324, 8498, 8542, 8662], '#005050'),
'Fe II': ([5018, 5169], '#f26c4f'),
'Fe III': ([4397, 4421, 4432, 5129, 5158], '#f9917b'),
}
# TODO add groups
# Galaxy lines
#
# 'H': '4341, 4861, 6563;
# 'N II': '6548, 6583;
# 'O I': '6300;'
# 'O II': '3727;
# 'O III': '4959, 5007;
# 'Mg II': '2798;
# 'S II': '6717, 6731'
# 'H': '3970, 4102, 4341, 4861, 6563'
# 'Na': '5890, 5896, 8183, 8195'
# 'He': '3886, 4472, 5876, 6678, 7065'
# 'Mg': '2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184'
# 'He II': '3203, 4686'
# 'Mg II': '2791, 2796, 2803, 4481'
# 'O': '7772, 7774, 7775, 8447, 9266'
# 'Si II': '3856, 5041, 5056, 5670 6347, 6371'
# 'O II': '3727'
# 'Ca II': '3934, 3969, 7292, 7324, 8498, 8542, 8662'
# 'O III': '4959, 5007'
# 'Fe II': '5018, 5169'
# 'S II': '5433, 5454, 5606, 5640, 5647, 6715'
# 'Fe III': '4397, 4421, 4432, 5129, 5158'
#
# Other
#
# 'Tel: 6867-6884, 7594-7621'
# 'Tel': '#b7b7b7',
# 'H: 4341, 4861, 6563;
# 'N II': 6548, 6583;
# 'O I': 6300;
# 'O II': 3727;
# 'O III': 4959, 5007;
# 'Mg II': 2798;
# 'S II': 6717, 6731'
class CheckboxWithLegendGroup(CheckboxGroup):
colors = List(String, help="List of legend colors")
__implementation__ = """
import {empty, input, label, div} from "core/dom"
import * as p from "core/properties"
import {CheckboxGroup, CheckboxGroupView} from "models/widgets/checkbox_group"
export class CheckboxWithLegendGroupView extends CheckboxGroupView
render: () ->
super()
empty(@el)
active = @model.active
colors = @model.colors
for text, i in @model.labels
inputEl = input({type: "checkbox", value: "#{i}"})
inputEl.addEventListener("change", () => @change_input())
if @model.disabled then inputEl.disabled = true
if i in active then inputEl.checked = true
attrs = {
style: "border-left: 12px solid #{colors[i]}; padding-left: 0.3em;"
}
labelEl = label(attrs, inputEl, text)
if @model.inline
labelEl.classList.add("bk-bs-checkbox-inline")
@el.appendChild(labelEl)
else
divEl = div({class: "bk-bs-checkbox"}, labelEl)
@el.appendChild(divEl)
return @
export class CheckboxWithLegendGroup extends CheckboxGroup
type: "CheckboxWithLegendGroup"
default_view: CheckboxWithLegendGroupView
@define {
colors: [ p.Array, [] ]
}
"""
# TODO replace with (script, div) method
def _plot_to_json(plot):
"""Convert plot to JSON objects necessary for rendering with `bokehJS`.
Parameters
----------
plot : bokeh.plotting.figure.Figure
Bokeh plot object to be rendered.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
render_items = [{'docid': plot._id, 'elementid': make_id()}]
doc = Document()
doc.add_root(plot)
docs_json_inner = doc.to_json()
docs_json = {render_items[0]['docid']: docs_json_inner}
docs_json = serialize_json(docs_json)
render_items = serialize_json(render_items)
custom_model_js = bundle_all_models()
return docs_json, render_items, custom_model_js
tooltip_format = [
('mjd', '@mjd{0.000000}'),
('flux', '@flux'),
('filter', '@filter'),
('fluxerr', '@fluxerr'),
('mag', '@mag'),
('magerr', '@magerr'),
('lim_mag', '@lim_mag'),
('instrument', '@instrument'),
('stacked', '@stacked'),
]
cmap = cm.get_cmap('jet_r')
def get_color(bandpass_name, cmap_limits=(3000.0, 10000.0)):
if bandpass_name.startswith('ztf'):
return {'ztfg': 'green', 'ztfi': 'orange', 'ztfr': 'red'}[bandpass_name]
else:
bandpass = sncosmo.get_bandpass(bandpass_name)
wave = bandpass.wave_eff
rgb = cmap((cmap_limits[1] - wave) / (cmap_limits[1] - cmap_limits[0]))[:3]
bandcolor = rgb2hex(rgb)
return bandcolor
# TODO make async so that thread isn't blocked
def photometry_plot(obj_id, user, width=600, height=300):
"""Create scatter plot of photometry for object.
Parameters
----------
obj_id : str
ID of Obj to be plotted.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
data = pd.read_sql(
DBSession()
.query(
Photometry,
Telescope.nickname.label("telescope"),
Instrument.name.label("instrument"),
)
.join(Instrument, Instrument.id == Photometry.instrument_id)
.join(Telescope, Telescope.id == Instrument.telescope_id)
.filter(Photometry.obj_id == obj_id)
.filter(
Photometry.groups.any(Group.id.in_([g.id for g in user.accessible_groups]))
)
.statement,
DBSession().bind,
)
if data.empty:
return None, None, None
data['color'] = [get_color(f) for f in data['filter']]
data['label'] = [
f'{i} {f}-band' for i, f in zip(data['instrument'], data['filter'])
]
data['zp'] = PHOT_ZP
data['magsys'] = 'ab'
data['alpha'] = 1.0
data['lim_mag'] = -2.5 * np.log10(data['fluxerr'] * DETECT_THRESH) + data['zp']
# Passing a dictionary to a bokeh datasource causes the frontend to die,
# deleting the dictionary column fixes that
del data['original_user_data']
# keep track of things that are only upper limits
data['hasflux'] = ~data['flux'].isna()
# calculate the magnitudes - a photometry point is considered "significant"
# or "detected" (and thus can be represented by a magnitude) if its snr
# is above DETECT_THRESH
obsind = data['hasflux'] & (
data['flux'].fillna(0.0) / data['fluxerr'] >= DETECT_THRESH
)
data.loc[~obsind, 'mag'] = None
data.loc[obsind, 'mag'] = -2.5 * np.log10(data[obsind]['flux']) + PHOT_ZP
# calculate the magnitude errors using standard error propagation formulae
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae
data.loc[~obsind, 'magerr'] = None
coeff = 2.5 / np.log(10)
magerrs = np.abs(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])
data.loc[obsind, 'magerr'] = magerrs
data['obs'] = obsind
data['stacked'] = False
split = data.groupby('label', sort=False)
finite = np.isfinite(data['flux'])
fdata = data[finite]
lower = np.min(fdata['flux']) * 0.95
upper = np.max(fdata['flux']) * 1.05
plot = figure(
plot_width=width,
plot_height=height,
active_drag='box_zoom',
tools='box_zoom,wheel_zoom,pan,reset,save',
y_range=(lower, upper),
)
imhover = HoverTool(tooltips=tooltip_format)
plot.add_tools(imhover)
model_dict = {}
for i, (label, sdf) in enumerate(split):
# for the flux plot, we only show things that have a flux value
df = sdf[sdf['hasflux']]
key = f'obs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='flux',
color='color',
marker='circle',
fill_color='color',
alpha='alpha',
source=ColumnDataSource(df),
)
imhover.renderers.append(model_dict[key])
key = f'bin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='flux',
color='color',
marker='circle',
fill_color='color',
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
stacked=[],
instrument=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = 'obserr' + str(i)
y_err_x = []
y_err_y = []
for d, ro in df.iterrows():
px = ro['mjd']
py = ro['flux']
err = ro['fluxerr']
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
alpha='alpha',
source=ColumnDataSource(
data=dict(
xs=y_err_x, ys=y_err_y, color=df['color'], alpha=[1.0] * len(df)
)
),
)
key = f'binerr{i}'
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
)
plot.xaxis.axis_label = 'MJD'
plot.yaxis.axis_label = 'Flux (μJy)'
plot.toolbar.logo = None
toggle = CheckboxWithLegendGroup(
labels=list(data.label.unique()),
active=list(range(len(data.label.unique()))),
colors=list(data.color.unique()),
)
# TODO replace `eval` with Namespaces
# https://github.com/bokeh/bokeh/pull/6340
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglef.js')
).read(),
)
slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')
callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackf.js')
)
.read()
.replace('default_zp', str(PHOT_ZP))
.replace('detect_thresh', str(DETECT_THRESH)),
)
slider.js_on_change('value', callback)
# Mark the first and last detections
detection_dates = data[data['hasflux']]['mjd']
if len(detection_dates) > 0:
first = round(detection_dates.min(), 6)
last = round(detection_dates.max(), 6)
first_color = "#34b4eb"
last_color = "#8992f5"
midpoint = (upper + lower) / 2
line_top = 5 * upper - 4 * midpoint
line_bottom = 5 * lower - 4 * midpoint
first_x = np.full(5000, first)
last_x = np.full(5000, last)
y = np.linspace(line_bottom, line_top, num=5000)
first_r = plot.line(
x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2,
)
plot.add_tools(
HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],)
)
last_r = plot.line(
x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2
)
plot.add_tools(
HoverTool(tooltips=[("Last detection", f'{last}')], renderers=[last_r],)
)
layout = row(plot, toggle)
layout = column(slider, layout)
p1 = Panel(child=layout, title='Flux')
# now make the mag light curve
ymax = np.nanmax(data['mag']) + 0.1
ymin = np.nanmin(data['mag']) - 0.1
plot = figure(
plot_width=width,
plot_height=height,
active_drag='box_zoom',
tools='box_zoom,wheel_zoom,pan,reset,save',
y_range=(ymax, ymin),
toolbar_location='above',
)
# Mark the first and last detections again
if len(detection_dates) > 0:
midpoint = (ymax + ymin) / 2
line_top = 5 * ymax - 4 * midpoint
line_bottom = 5 * ymin - 4 * midpoint
y = np.linspace(line_bottom, line_top, num=5000)
first_r = plot.line(
x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2,
)
plot.add_tools(
HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],)
)
last_r = plot.line(
x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2
)
plot.add_tools(
HoverTool(
tooltips=[("Last detection", f'{last}')],
renderers=[last_r],
point_policy='follow_mouse',
)
)
imhover = HoverTool(tooltips=tooltip_format)
plot.add_tools(imhover)
model_dict = {}
for i, (label, df) in enumerate(split):
key = f'obs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='mag',
color='color',
marker='circle',
fill_color='color',
alpha='alpha',
source=ColumnDataSource(df[df['obs']]),
)
imhover.renderers.append(model_dict[key])
unobs_source = df[~df['obs']].copy()
unobs_source.loc[:, 'alpha'] = 0.8
key = f'unobs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='lim_mag',
color='color',
marker='inverted_triangle',
fill_color='white',
line_color='color',
alpha='alpha',
source=ColumnDataSource(unobs_source),
)
imhover.renderers.append(model_dict[key])
key = f'bin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='mag',
color='color',
marker='circle',
fill_color='color',
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
instrument=[],
stacked=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = 'obserr' + str(i)
y_err_x = []
y_err_y = []
for d, ro in df[df['obs']].iterrows():
px = ro['mjd']
py = ro['mag']
err = ro['magerr']
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
alpha='alpha',
source=ColumnDataSource(
data=dict(
xs=y_err_x,
ys=y_err_y,
color=df[df['obs']]['color'],
alpha=[1.0] * len(df[df['obs']]),
)
),
)
key = f'binerr{i}'
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
)
key = f'unobsbin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='lim_mag',
color='color',
marker='inverted_triangle',
fill_color='white',
line_color='color',
alpha=0.8,
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
instrument=[],
stacked=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = f'all{i}'
model_dict[key] = ColumnDataSource(df)
key = f'bold{i}'
model_dict[key] = ColumnDataSource(
df[
[
'mjd',
'flux',
'fluxerr',
'mag',
'magerr',
'filter',
'zp',
'magsys',
'lim_mag',
'stacked',
]
]
)
plot.xaxis.axis_label = 'MJD'
plot.yaxis.axis_label = 'AB mag'
plot.toolbar.logo = None
toggle = CheckboxWithLegendGroup(
labels=list(data.label.unique()),
active=list(range(len(data.label.unique()))),
colors=list(data.color.unique()),
)
# TODO replace `eval` with Namespaces
# https://github.com/bokeh/bokeh/pull/6340
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglem.js')
).read(),
)
slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')
button = Button(label="Export Bold Light Curve to CSV")
button.callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(
os.path.dirname(__file__), '../static/js/plotjs', "download.js"
)
)
.read()
.replace('objname', obj_id)
.replace('default_zp', str(PHOT_ZP)),
)
toplay = row(slider, button)
callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackm.js')
)
.read()
.replace('default_zp', str(PHOT_ZP))
.replace('detect_thresh', str(DETECT_THRESH)),
)
slider.js_on_change('value', callback)
layout = row(plot, toggle)
layout = column(toplay, layout)
p2 = Panel(child=layout, title='Mag')
tabs = Tabs(tabs=[p2, p1])
return _plot_to_json(tabs)
# TODO make async so that thread isn't blocked
def spectroscopy_plot(obj_id, spec_id=None):
"""TODO normalization? should this be handled at data ingestion or plot-time?"""
obj = Obj.query.get(obj_id)
spectra = Obj.query.get(obj_id).spectra
if spec_id is not None:
spectra = [spec for spec in spectra if spec.id == int(spec_id)]
if len(spectra) == 0:
return None, None, None
color_map = dict(zip([s.id for s in spectra], viridis(len(spectra))))
data = pd.concat(
[
pd.DataFrame(
{
'wavelength': s.wavelengths,
'flux': s.fluxes,
'id': s.id,
'instrument': s.instrument.telescope.nickname,
}
)
for i, s in enumerate(spectra)
]
)
split = data.groupby('id')
hover = HoverTool(
tooltips=[('wavelength', '$x'), ('flux', '$y'), ('instrument', '@instrument')]
)
plot = figure(
plot_width=600,
plot_height=300,
sizing_mode='scale_both',
tools='box_zoom,wheel_zoom,pan,reset',
active_drag='box_zoom',
)
plot.add_tools(hover)
model_dict = {}
for i, (key, df) in enumerate(split):
model_dict['s' + str(i)] = plot.line(
x='wavelength', y='flux', color=color_map[key], source=ColumnDataSource(df)
)
plot.xaxis.axis_label = 'Wavelength (Å)'
plot.yaxis.axis_label = 'Flux'
plot.toolbar.logo = None
# TODO how to choose a good default?
plot.y_range = Range1d(0, 1.03 * data.flux.max())
toggle = CheckboxWithLegendGroup(
labels=[s.instrument.telescope.nickname for s in spectra],
active=list(range(len(spectra))),
width=100,
colors=[color_map[k] for k, df in split],
)
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code="""
for (let i = 0; i < toggle.labels.length; i++) {
eval("s" + i).visible = (toggle.active.includes(i))
}
""",
)
elements = CheckboxWithLegendGroup(
labels=list(SPEC_LINES.keys()),
active=[],
width=80,
colors=[c for w, c in SPEC_LINES.values()],
)
z = TextInput(value=str(obj.redshift), title="z:")
v_exp = TextInput(value='0', title="v_exp:")
for i, (wavelengths, color) in enumerate(SPEC_LINES.values()):
el_data = pd.DataFrame({'wavelength': wavelengths})
el_data['x'] = el_data['wavelength'] * (1 + obj.redshift)
model_dict[f'el{i}'] = plot.segment(
x0='x',
x1='x',
# TODO change limits
y0=0,
y1=1e-13,
color=color,
source=ColumnDataSource(el_data),
)
model_dict[f'el{i}'].visible = False
# TODO callback policy: don't require submit for text changes?
elements.callback = CustomJS(
args={'elements': elements, 'z': z, 'v_exp': v_exp, **model_dict},
code="""
let c = 299792.458; // speed of light in km / s
for (let i = 0; i < elements.labels.length; i++) {
let el = eval("el" + i);
el.visible = (elements.active.includes(i))
el.data_source.data.x = el.data_source.data.wavelength.map(
x_i => (x_i * (1 + parseFloat(z.value)) /
(1 + parseFloat(v_exp.value) / c))
);
el.data_source.change.emit();
}
""",
)
z.callback = elements.callback
v_exp.callback = elements.callback
layout = row(plot, toggle, elements, column(z, v_exp))
return _plot_to_json(layout)
|
[
"bokeh.layouts.column",
"bokeh.models.widgets.TextInput",
"numpy.log10",
"bokeh.util.compiler.bundle_all_models",
"bokeh.plotting.figure",
"bokeh.layouts.row",
"skyportal.models.Group.id.in_",
"skyportal.models.Telescope.nickname.label",
"numpy.log",
"bokeh.util.serialization.make_id",
"numpy.isfinite",
"bokeh.models.Slider",
"matplotlib.colors.rgb2hex",
"numpy.nanmin",
"bokeh.models.CustomJS",
"numpy.max",
"numpy.linspace",
"numpy.nanmax",
"numpy.min",
"pandas.DataFrame",
"skyportal.models.DBSession",
"skyportal.models.Instrument.name.label",
"matplotlib.cm.get_cmap",
"numpy.abs",
"bokeh.core.properties.List",
"os.path.dirname",
"skyportal.models.Obj.query.get",
"bokeh.models.Button",
"bokeh.models.widgets.Tabs",
"bokeh.models.HoverTool",
"bokeh.core.json_encoder.serialize_json",
"bokeh.plotting.ColumnDataSource",
"sncosmo.get_bandpass",
"bokeh.models.widgets.Panel",
"numpy.full",
"bokeh.document.Document"
] |
[((4916, 4936), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet_r"""'], {}), "('jet_r')\n", (4927, 4936), False, 'from matplotlib import cm\n'), ((2656, 2698), 'bokeh.core.properties.List', 'List', (['String'], {'help': '"""List of legend colors"""'}), "(String, help='List of legend colors')\n", (2660, 2698), False, 'from bokeh.core.properties import List, String\n'), ((4319, 4329), 'bokeh.document.Document', 'Document', ([], {}), '()\n', (4327, 4329), False, 'from bokeh.document import Document\n'), ((4466, 4491), 'bokeh.core.json_encoder.serialize_json', 'serialize_json', (['docs_json'], {}), '(docs_json)\n', (4480, 4491), False, 'from bokeh.core.json_encoder import serialize_json\n'), ((4511, 4539), 'bokeh.core.json_encoder.serialize_json', 'serialize_json', (['render_items'], {}), '(render_items)\n', (4525, 4539), False, 'from bokeh.core.json_encoder import serialize_json\n'), ((4562, 4581), 'bokeh.util.compiler.bundle_all_models', 'bundle_all_models', ([], {}), '()\n', (4579, 4581), False, 'from bokeh.util.compiler import bundle_all_models\n'), ((7544, 7606), 'numpy.abs', 'np.abs', (["(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])"], {}), "(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])\n", (7550, 7606), True, 'import numpy as np\n'), ((7762, 7787), 'numpy.isfinite', 'np.isfinite', (["data['flux']"], {}), "(data['flux'])\n", (7773, 7787), True, 'import numpy as np\n'), ((7907, 8048), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'width', 'plot_height': 'height', 'active_drag': '"""box_zoom"""', 'tools': '"""box_zoom,wheel_zoom,pan,reset,save"""', 'y_range': '(lower, upper)'}), "(plot_width=width, plot_height=height, active_drag='box_zoom', tools=\n 'box_zoom,wheel_zoom,pan,reset,save', y_range=(lower, upper))\n", (7913, 8048), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((8106, 8140), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltip_format'}), '(tooltips=tooltip_format)\n', (8115, 8140), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((10772, 10844), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0.0)', 'end': '(15.0)', 'value': '(0.0)', 'step': '(1.0)', 'title': '"""Binsize (days)"""'}), "(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')\n", (10778, 10844), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((12259, 12276), 'bokeh.layouts.row', 'row', (['plot', 'toggle'], {}), '(plot, toggle)\n', (12262, 12276), False, 'from bokeh.layouts import row, column\n'), ((12290, 12312), 'bokeh.layouts.column', 'column', (['slider', 'layout'], {}), '(slider, layout)\n', (12296, 12312), False, 'from bokeh.layouts import row, column\n'), ((12323, 12356), 'bokeh.models.widgets.Panel', 'Panel', ([], {'child': 'layout', 'title': '"""Flux"""'}), "(child=layout, title='Flux')\n", (12328, 12356), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((12485, 12654), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'width', 'plot_height': 'height', 'active_drag': '"""box_zoom"""', 'tools': '"""box_zoom,wheel_zoom,pan,reset,save"""', 'y_range': '(ymax, ymin)', 'toolbar_location': '"""above"""'}), "(plot_width=width, plot_height=height, active_drag='box_zoom', tools=\n 'box_zoom,wheel_zoom,pan,reset,save', y_range=(ymax, ymin),\n toolbar_location='above')\n", (12491, 12654), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((13550, 13584), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltip_format'}), '(tooltips=tooltip_format)\n', (13559, 13584), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((17922, 17994), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0.0)', 'end': '(15.0)', 'value': '(0.0)', 'step': '(1.0)', 'title': '"""Binsize (days)"""'}), "(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')\n", (17928, 17994), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((18009, 18055), 'bokeh.models.Button', 'Button', ([], {'label': '"""Export Bold Light Curve to CSV"""'}), "(label='Export Bold Light Curve to CSV')\n", (18015, 18055), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((18420, 18439), 'bokeh.layouts.row', 'row', (['slider', 'button'], {}), '(slider, button)\n', (18423, 18439), False, 'from bokeh.layouts import row, column\n'), ((18826, 18843), 'bokeh.layouts.row', 'row', (['plot', 'toggle'], {}), '(plot, toggle)\n', (18829, 18843), False, 'from bokeh.layouts import row, column\n'), ((18857, 18879), 'bokeh.layouts.column', 'column', (['toplay', 'layout'], {}), '(toplay, layout)\n', (18863, 18879), False, 'from bokeh.layouts import row, column\n'), ((18890, 18922), 'bokeh.models.widgets.Panel', 'Panel', ([], {'child': 'layout', 'title': '"""Mag"""'}), "(child=layout, title='Mag')\n", (18895, 18922), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((18935, 18954), 'bokeh.models.widgets.Tabs', 'Tabs', ([], {'tabs': '[p2, p1]'}), '(tabs=[p2, p1])\n', (18939, 18954), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((19175, 19196), 'skyportal.models.Obj.query.get', 'Obj.query.get', (['obj_id'], {}), '(obj_id)\n', (19188, 19196), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((19870, 19963), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('wavelength', '$x'), ('flux', '$y'), ('instrument', '@instrument')]"}), "(tooltips=[('wavelength', '$x'), ('flux', '$y'), ('instrument',\n '@instrument')])\n", (19879, 19963), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((19985, 20118), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(600)', 'plot_height': '(300)', 'sizing_mode': '"""scale_both"""', 'tools': '"""box_zoom,wheel_zoom,pan,reset"""', 'active_drag': '"""box_zoom"""'}), "(plot_width=600, plot_height=300, sizing_mode='scale_both', tools=\n 'box_zoom,wheel_zoom,pan,reset', active_drag='box_zoom')\n", (19991, 20118), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((20843, 21055), 'bokeh.models.CustomJS', 'CustomJS', ([], {'args': "{'toggle': toggle, **model_dict}", 'code': '"""\n for (let i = 0; i < toggle.labels.length; i++) {\n eval("s" + i).visible = (toggle.active.includes(i))\n }\n """'}), '(args={\'toggle\': toggle, **model_dict}, code=\n """\n for (let i = 0; i < toggle.labels.length; i++) {\n eval("s" + i).visible = (toggle.active.includes(i))\n }\n """\n )\n', (20851, 21055), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((21312, 21348), 'bokeh.models.widgets.TextInput', 'TextInput', ([], {'value': '"""0"""', 'title': '"""v_exp:"""'}), "(value='0', title='v_exp:')\n", (21321, 21348), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((21918, 22514), 'bokeh.models.CustomJS', 'CustomJS', ([], {'args': "{'elements': elements, 'z': z, 'v_exp': v_exp, **model_dict}", 'code': '"""\n let c = 299792.458; // speed of light in km / s\n for (let i = 0; i < elements.labels.length; i++) {\n let el = eval("el" + i);\n el.visible = (elements.active.includes(i))\n el.data_source.data.x = el.data_source.data.wavelength.map(\n x_i => (x_i * (1 + parseFloat(z.value)) /\n (1 + parseFloat(v_exp.value) / c))\n );\n el.data_source.change.emit();\n }\n """'}), '(args={\'elements\': elements, \'z\': z, \'v_exp\': v_exp, **model_dict},\n code=\n """\n let c = 299792.458; // speed of light in km / s\n for (let i = 0; i < elements.labels.length; i++) {\n let el = eval("el" + i);\n el.visible = (elements.active.includes(i))\n el.data_source.data.x = el.data_source.data.wavelength.map(\n x_i => (x_i * (1 + parseFloat(z.value)) /\n (1 + parseFloat(v_exp.value) / c))\n );\n el.data_source.change.emit();\n }\n """\n )\n', (21926, 22514), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((5150, 5185), 'sncosmo.get_bandpass', 'sncosmo.get_bandpass', (['bandpass_name'], {}), '(bandpass_name)\n', (5170, 5185), False, 'import sncosmo\n'), ((5323, 5335), 'matplotlib.colors.rgb2hex', 'rgb2hex', (['rgb'], {}), '(rgb)\n', (5330, 5335), False, 'from matplotlib.colors import rgb2hex\n'), ((7519, 7529), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (7525, 7529), True, 'import numpy as np\n'), ((7825, 7846), 'numpy.min', 'np.min', (["fdata['flux']"], {}), "(fdata['flux'])\n", (7831, 7846), True, 'import numpy as np\n'), ((7866, 7887), 'numpy.max', 'np.max', (["fdata['flux']"], {}), "(fdata['flux'])\n", (7872, 7887), True, 'import numpy as np\n'), ((11651, 11671), 'numpy.full', 'np.full', (['(5000)', 'first'], {}), '(5000, first)\n', (11658, 11671), True, 'import numpy as np\n'), ((11689, 11708), 'numpy.full', 'np.full', (['(5000)', 'last'], {}), '(5000, last)\n', (11696, 11708), True, 'import numpy as np\n'), ((11721, 11765), 'numpy.linspace', 'np.linspace', (['line_bottom', 'line_top'], {'num': '(5000)'}), '(line_bottom, line_top, num=5000)\n', (11732, 11765), True, 'import numpy as np\n'), ((12404, 12426), 'numpy.nanmax', 'np.nanmax', (["data['mag']"], {}), "(data['mag'])\n", (12413, 12426), True, 'import numpy as np\n'), ((12444, 12466), 'numpy.nanmin', 'np.nanmin', (["data['mag']"], {}), "(data['mag'])\n", (12453, 12466), True, 'import numpy as np\n'), ((12920, 12964), 'numpy.linspace', 'np.linspace', (['line_bottom', 'line_top'], {'num': '(5000)'}), '(line_bottom, line_top, num=5000)\n', (12931, 12964), True, 'import numpy as np\n'), ((16865, 16885), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (16881, 16885), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((16938, 17055), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (["df[['mjd', 'flux', 'fluxerr', 'mag', 'magerr', 'filter', 'zp', 'magsys',\n 'lim_mag', 'stacked']]"], {}), "(df[['mjd', 'flux', 'fluxerr', 'mag', 'magerr', 'filter',\n 'zp', 'magsys', 'lim_mag', 'stacked']])\n", (16954, 17055), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((19211, 19232), 'skyportal.models.Obj.query.get', 'Obj.query.get', (['obj_id'], {}), '(obj_id)\n', (19224, 19232), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((21434, 21475), 'pandas.DataFrame', 'pd.DataFrame', (["{'wavelength': wavelengths}"], {}), "({'wavelength': wavelengths})\n", (21446, 21475), True, 'import pandas as pd\n'), ((22640, 22656), 'bokeh.layouts.column', 'column', (['z', 'v_exp'], {}), '(z, v_exp)\n', (22646, 22656), False, 'from bokeh.layouts import row, column\n'), ((4296, 4305), 'bokeh.util.serialization.make_id', 'make_id', ([], {}), '()\n', (4303, 4305), False, 'from bokeh.util.serialization import make_id\n'), ((6236, 6247), 'skyportal.models.DBSession', 'DBSession', ([], {}), '()\n', (6245, 6247), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((6581, 6622), 'numpy.log10', 'np.log10', (["(data['fluxerr'] * DETECT_THRESH)"], {}), "(data['fluxerr'] * DETECT_THRESH)\n", (6589, 6622), True, 'import numpy as np\n'), ((7261, 7291), 'numpy.log10', 'np.log10', (["data[obsind]['flux']"], {}), "(data[obsind]['flux'])\n", (7269, 7291), True, 'import numpy as np\n'), ((11923, 11997), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('First detection', f'{first}')]", 'renderers': '[first_r]'}), "(tooltips=[('First detection', f'{first}')], renderers=[first_r])\n", (11932, 11997), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((12162, 12233), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('Last detection', f'{last}')]", 'renderers': '[last_r]'}), "(tooltips=[('Last detection', f'{last}')], renderers=[last_r])\n", (12171, 12233), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((13122, 13196), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('First detection', f'{first}')]", 'renderers': '[first_r]'}), "(tooltips=[('First detection', f'{first}')], renderers=[first_r])\n", (13131, 13196), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((13361, 13465), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('Last detection', f'{last}')]", 'renderers': '[last_r]', 'point_policy': '"""follow_mouse"""'}), "(tooltips=[('Last detection', f'{last}')], renderers=[last_r],\n point_policy='follow_mouse')\n", (13370, 13465), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((19518, 19642), 'pandas.DataFrame', 'pd.DataFrame', (["{'wavelength': s.wavelengths, 'flux': s.fluxes, 'id': s.id, 'instrument': s\n .instrument.telescope.nickname}"], {}), "({'wavelength': s.wavelengths, 'flux': s.fluxes, 'id': s.id,\n 'instrument': s.instrument.telescope.nickname})\n", (19530, 19642), True, 'import pandas as pd\n'), ((8584, 8604), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (8600, 8604), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((13920, 13951), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (["df[df['obs']]"], {}), "(df[df['obs']])\n", (13936, 13951), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((14393, 14423), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['unobs_source'], {}), '(unobs_source)\n', (14409, 14423), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((20362, 20382), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (20378, 20382), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((21744, 21769), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['el_data'], {}), '(el_data)\n', (21760, 21769), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((6144, 6196), 'skyportal.models.Group.id.in_', 'Group.id.in_', (['[g.id for g in user.accessible_groups]'], {}), '([g.id for g in user.accessible_groups])\n', (6156, 6196), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((10670, 10695), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10685, 10695), False, 'import os\n'), ((17820, 17845), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (17835, 17845), False, 'import os\n'), ((10980, 11005), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10995, 11005), False, 'import os\n'), ((18214, 18239), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (18229, 18239), False, 'import os\n'), ((18574, 18599), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (18589, 18599), False, 'import os\n'), ((5815, 5852), 'skyportal.models.Telescope.nickname.label', 'Telescope.nickname.label', (['"""telescope"""'], {}), "('telescope')\n", (5839, 5852), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((5866, 5901), 'skyportal.models.Instrument.name.label', 'Instrument.name.label', (['"""instrument"""'], {}), "('instrument')\n", (5887, 5901), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((5751, 5762), 'skyportal.models.DBSession', 'DBSession', ([], {}), '()\n', (5760, 5762), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n')]
|
# coding:utf-8
import uuid
import string
import hashlib
import logging
from lib.errors import SessionExpiredError, SessionConsumedError
from datetime import datetime as dt
from random import SystemRandom
LOG = logging.getLogger("errbot.plugin.st2.session")
def generate_password(length=8):
rnd = SystemRandom()
if length > 255:
length = 255
return "".join([rnd.choice(string.hexdigits) for _ in range(length)])
class Session(object):
def __init__(self, user_id, user_secret, session_ttl=3600):
self.bot_secret = None
self.user_id = user_id
self._is_sealed = True
self.session_id = uuid.uuid4()
self.create_date = int(dt.now().timestamp())
self.modified_date = self.create_date
self.ttl_in_seconds = session_ttl
self._hashed_secret = self.hash_secret(user_secret)
del user_secret
def is_expired(self):
"""
Returns False if both create and modified timestamps have exceeded the ttl.
"""
now = int(dt.now().timestamp())
modified_expiry = self.modified_date + self.ttl_in_seconds
if modified_expiry < now:
raise SessionExpiredError
return False
def attributes(self):
return {
"UserID": self.user_id,
"IsSealed": self._is_sealed,
"SessionID": self.session_id,
"CreationDate": str(dt.fromtimestamp(self.create_date)),
"ModifiedDate": str(dt.fromtimestamp(self.modified_date)),
"ExpiryDate": str(dt.fromtimestamp(self.modified_date + self.ttl_in_seconds)),
}
def __repr__(self):
return " ".join(
[
"UserID: {},".format(str(self.user_id)),
"Is Sealed: {},".format(str(self._is_sealed)),
"SessionID: {},".format(str(self.session_id)),
"Creation Date: {},".format(str(dt.fromtimestamp(self.create_date))),
"Modified Date: {},".format(str(dt.fromtimestamp(self.modified_date))),
"Expiry Date: {}".format(
str(dt.fromtimestamp(self.modified_date + self.ttl_in_seconds))
),
]
)
def unseal(self):
"""
Mark the session as being consumed. Returns true if the session was available to be
consumed or raises SessionConsumedError if the session has already been marked as consumed.
"""
self.is_expired()
if self._is_sealed:
self._is_sealed = False
else:
raise SessionConsumedError
return True
def is_sealed(self):
"""
Query the state of the one time use flag.
Returns True if the session has not been consumed or False if the session has already been
consumed.
"""
self.is_expired()
return self._is_sealed
def id(self):
"""
Return the UUID for the session.
"""
return str(self.session_id)
def ttl(self, ttl=None):
"""
Get/Set the time to live for the session.
param: ttl[int] The number of seconds the session should remain valid since creation or
modification.
Returns the number of seconds the ttl has been set to if no agrument is provided otherwise
the ttl is set to the number of seconds provided to the ttl argument.
"""
self.is_expired()
if ttl is None:
return self.ttl_in_seconds
if isinstance(ttl, int):
self.ttl_in_seconds = ttl
self.modified_date = int(dt.now().timestamp())
else:
LOG.warning("session ttl must be an integer type, got '{}'".format(ttl))
def hash_secret(self, user_secret):
"""
Generate a unique token by hashing a random bot secret with the user secrets.
param: user_secret[string] - The users secret provided in the chat backend.
"""
self.is_expired()
if self.bot_secret is None:
self.bot_secret = generate_password(8)
h = hashlib.sha256()
h.update(bytes(user_secret, "utf-8"))
del user_secret
h.update(bytes(self.bot_secret, "utf-8"))
return h.hexdigest()
def match_secret(self, user_secret):
"""
Compare a secret with the session's hashed secret.
param: user_secret[string] the secret to compare.
Return True if the user_secret hash has matches the session hash or False if it does not.
"""
self.is_expired()
return self._hashed_secret == self.hash_secret(user_secret)
|
[
"logging.getLogger",
"hashlib.sha256",
"datetime.datetime.fromtimestamp",
"uuid.uuid4",
"datetime.datetime.now",
"random.SystemRandom"
] |
[((211, 257), 'logging.getLogger', 'logging.getLogger', (['"""errbot.plugin.st2.session"""'], {}), "('errbot.plugin.st2.session')\n", (228, 257), False, 'import logging\n'), ((303, 317), 'random.SystemRandom', 'SystemRandom', ([], {}), '()\n', (315, 317), False, 'from random import SystemRandom\n'), ((642, 654), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (652, 654), False, 'import uuid\n'), ((4084, 4100), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (4098, 4100), False, 'import hashlib\n'), ((1410, 1444), 'datetime.datetime.fromtimestamp', 'dt.fromtimestamp', (['self.create_date'], {}), '(self.create_date)\n', (1426, 1444), True, 'from datetime import datetime as dt\n'), ((1479, 1515), 'datetime.datetime.fromtimestamp', 'dt.fromtimestamp', (['self.modified_date'], {}), '(self.modified_date)\n', (1495, 1515), True, 'from datetime import datetime as dt\n'), ((1548, 1606), 'datetime.datetime.fromtimestamp', 'dt.fromtimestamp', (['(self.modified_date + self.ttl_in_seconds)'], {}), '(self.modified_date + self.ttl_in_seconds)\n', (1564, 1606), True, 'from datetime import datetime as dt\n'), ((686, 694), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (692, 694), True, 'from datetime import datetime as dt\n'), ((1033, 1041), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (1039, 1041), True, 'from datetime import datetime as dt\n'), ((1914, 1948), 'datetime.datetime.fromtimestamp', 'dt.fromtimestamp', (['self.create_date'], {}), '(self.create_date)\n', (1930, 1948), True, 'from datetime import datetime as dt\n'), ((2000, 2036), 'datetime.datetime.fromtimestamp', 'dt.fromtimestamp', (['self.modified_date'], {}), '(self.modified_date)\n', (2016, 2036), True, 'from datetime import datetime as dt\n'), ((2106, 2164), 'datetime.datetime.fromtimestamp', 'dt.fromtimestamp', (['(self.modified_date + self.ttl_in_seconds)'], {}), '(self.modified_date + self.ttl_in_seconds)\n', (2122, 2164), True, 'from datetime import datetime as dt\n'), ((3603, 3611), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (3609, 3611), True, 'from datetime import datetime as dt\n')]
|
# _*_ coding:utf-8 _*_
# author:ls
# time:2020/3/19 0019
import sys
from PyQt5.QtWidgets import QApplication,QAction,QMainWindow
from PyQt5.QtGui import QIcon
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.setui()
def setui(self):
self.statusbar = self.statusBar()
self.statusbar.showMessage('default show')
act = QAction('check',self,checkable=True)
act.setCheckable(True)
act.setStatusTip('view changed')
#不是太明白triggered如何使toggle函数执行
act.triggered.connect(self.toggle)
menubar = self.menuBar()
menu = menubar.addMenu('checkable')
menu.addAction(act)
self.setGeometry(300,300,400,150)
self.setWindowTitle('this is a checkable menu')
self.show()
def toggle(self,state):
if state:
self.statusbar.show()
else:
self.statusbar.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QAction",
"PyQt5.QtWidgets.QApplication"
] |
[((972, 994), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (984, 994), False, 'from PyQt5.QtWidgets import QApplication, QAction, QMainWindow\n'), ((391, 429), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""check"""', 'self'], {'checkable': '(True)'}), "('check', self, checkable=True)\n", (398, 429), False, 'from PyQt5.QtWidgets import QApplication, QAction, QMainWindow\n')]
|
# Standard library imports
import logging
import os
# Third party imports
import dash
import dash_bootstrap_components as dbc
from flask_caching import Cache
import plotly.io as pio
# Local application imports
from modules.gitlab import GitLab
import settings
# Initialize logging mechanism
logging.basicConfig(level=settings.LOGLEVEL, format=settings.LOGFORMAT)
logger = logging.getLogger(__name__)
gl = GitLab()
logger.info("Current GitLab version: {}".format(GitLab.version))
# App instance
app = dash.Dash(__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP])
app.title = settings.APP_NAME
# App caching
# CACHE_CONFIG = {
# # Note that filesystem cache doesn't work on systems with ephemeral
# # filesystems like Heroku.
# 'CACHE_TYPE': 'filesystem',
# 'CACHE_DIR': 'cache-directory',
# # should be equal to maximum number of users on the app at a single time
# # higher numbers will store more data in the filesystem / redis cache
# 'CACHE_THRESHOLD': 200
# }
CACHE_CONFIG = {
# try 'filesystem' if you don't want to setup redis
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': settings.REDIS_URL
}
cache = Cache()
cache.init_app(app.server, config=CACHE_CONFIG)
pio.templates.default = "plotly_dark"
|
[
"logging.basicConfig",
"logging.getLogger",
"modules.gitlab.GitLab",
"flask_caching.Cache",
"dash.Dash"
] |
[((309, 380), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'settings.LOGLEVEL', 'format': 'settings.LOGFORMAT'}), '(level=settings.LOGLEVEL, format=settings.LOGFORMAT)\n', (328, 380), False, 'import logging\n'), ((391, 418), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (408, 418), False, 'import logging\n'), ((427, 435), 'modules.gitlab.GitLab', 'GitLab', ([], {}), '()\n', (433, 435), False, 'from modules.gitlab import GitLab\n'), ((527, 631), 'dash.Dash', 'dash.Dash', (['__name__'], {'suppress_callback_exceptions': '(True)', 'external_stylesheets': '[dbc.themes.BOOTSTRAP]'}), '(__name__, suppress_callback_exceptions=True, external_stylesheets\n =[dbc.themes.BOOTSTRAP])\n', (536, 631), False, 'import dash\n'), ((1225, 1232), 'flask_caching.Cache', 'Cache', ([], {}), '()\n', (1230, 1232), False, 'from flask_caching import Cache\n')]
|
import socket
import requests
import json
import xml.etree.ElementTree as ET
class Camera(object):
def __init__(self):
"""
create camera object
"""
self.xml_url = self.discover()
self.name, self.api_version, self.services = self.connect(self.xml_url)
self.camera_endpoint_url = self.services["camera"] + "/camera"
self.available_apis = self.do("getAvailableApiList")["result"]
# prepare camera for rec mode
if "startRecMode" in self.available_apis[0]:
self.do("startRecMode")
self.available_apis = self.do("getAvailableApiList")["result"]
self.connected = False
def discover(self):
"""
discover camera using upnp ssdp method, return url for device xml
"""
msg = (
"M-SEARCH * HTTP/1.1\r\n"
"HOST: 172.16.58.3:1900\r\n"
'MAN: "ssdp:discover" \r\n'
"MX: 2\r\n"
"ST: urn:schemas-sony-com:service:ScalarWebAPI:1\r\n"
"\r\n"
).encode()
# Set up UDP socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.settimeout(2)
s.sendto(msg, ("172.16.58.3", 1900))
try:
while True:
data, addr = s.recvfrom(65507)
decoded_data = data.decode()
# get xml url from ssdp response
for item in decoded_data.split("\n"):
if "LOCATION" in item:
return item.strip().split(" ")[
1
] # get location url from ssdp response
self.connected = True
except socket.timeout:
raise ConnectionError("you are not connected to the camera's wifi")
def connect(self, xml_url):
"""
returns name, api_version, api_service_urls on success
"""
device_xml_request = requests.get(xml_url)
xml_file = str(device_xml_request.content.decode())
xml = ET.fromstring(xml_file)
name = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-upnp-org:device-1-0}friendlyName"
).text
api_version = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo/{urn:schemas-sony-com:av}X_ScalarWebAPI_Version"
).text
service_list = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo/{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceList"
)
api_service_urls = {}
for service in service_list:
service_type = service.find(
"{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType"
).text
action_url = service.find(
"{urn:schemas-sony-com:av}X_ScalarWebAPI_ActionList_URL"
).text
api_service_urls[service_type] = action_url
return name, api_version, api_service_urls
def info(self):
"""
returns camera info(name, api version, supported services, available apis) in a dictionary
"""
return {
"name": self.name,
"api version": self.api_version,
"supported services": list(self.services.keys()),
"available apis": self.available_apis,
}
def post_request(self, url, method, param=[]):
"""
sends post request to url with method and param as json
"""
if type(param) is not list:
param = [param]
json_request = {"method": method, "params": param, "id": 1, "version": "1.0"}
request = requests.post(url, json.dumps(json_request))
response = json.loads(request.content)
if "error" in list(response.keys()):
print("Error: ")
print(response)
else:
return response
def do(self, method, param=[]):
"""
this calls to camera service api, require method and param args
"""
# TODO: response handler, return result of do, etc
response = self.post_request(self.camera_endpoint_url, method, param)
return response
class ConnectionError(Exception):
pass
|
[
"json.loads",
"socket.socket",
"json.dumps",
"requests.get",
"xml.etree.ElementTree.fromstring"
] |
[((1094, 1162), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM', 'socket.IPPROTO_UDP'], {}), '(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n', (1107, 1162), False, 'import socket\n'), ((1952, 1973), 'requests.get', 'requests.get', (['xml_url'], {}), '(xml_url)\n', (1964, 1973), False, 'import requests\n'), ((2048, 2071), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['xml_file'], {}), '(xml_file)\n', (2061, 2071), True, 'import xml.etree.ElementTree as ET\n'), ((3779, 3806), 'json.loads', 'json.loads', (['request.content'], {}), '(request.content)\n', (3789, 3806), False, 'import json\n'), ((3734, 3758), 'json.dumps', 'json.dumps', (['json_request'], {}), '(json_request)\n', (3744, 3758), False, 'import json\n')]
|
from asyncio import get_event_loop
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from aiohttp import ClientSession
from pydantic import BaseModel
from sgqlc.endpoint.base import BaseEndpoint
from sgqlc.operation import Operation
from sgqlc_schemas.github.schema import (
AddLabelsToLabelableInput,
AddLabelsToLabelablePayload,
MergePullRequestInput,
Mutation,
Query,
Repository,
)
class Shared(BaseModel):
class Config:
arbitrary_types_allowed = True
class Location(Shared):
column: int
line: int
class Error(Shared):
locations: List[Location]
message: str
path: Optional[List[str]] = None
class DataWithErrors(Shared):
data: Union[Query, Mutation]
errors: List[Error]
@dataclass
class AsyncHttpEndpoint(BaseEndpoint):
url: str
headers: Dict[str, str] = field(default_factory=dict)
async def __call__(self, query) -> DataWithErrors:
async with ClientSession() as session:
res = await session.post(
self.url,
headers={**self.headers, 'Content-Type': 'application/json'},
json={'query': bytes(query).decode()},
)
try:
data = await res.json()
except Exception as e:
self._log_json_error(await res.text(), e)
data.setdefault('errors', [])
if data['errors']:
self._log_graphql_error(query, data)
if not (data['errors'] or data.get('data')):
data['errors'] = [{'message': data['message'], 'locations': []}]
return DataWithErrors(data=query + data, errors=data['errors'])
async def add_labels_to_labelable(
endpoint: BaseEndpoint, repository_id: str, labelable_id: str, label: str
) -> AddLabelsToLabelablePayload:
query = Operation(Query)
query.node(id=repository_id).__as__(Repository).labels(first=50).nodes().__fields__(
'name', 'id'
)
labels = {
repo_label.name: repo_label.id
for repo_label in (await endpoint(query)).node.labels.nodes
}
mutation = Operation(Mutation)
mutation.add_labels_to_labelable(
input=AddLabelsToLabelableInput(
labelable_id=labelable_id, label_ids=[labels[label]]
)
)
return (await endpoint(mutation)).add_labels_to_labelable
async def build_endpoint(token: str) -> AsyncHttpEndpoint:
return AsyncHttpEndpoint(
'https://api.github.com/graphql',
{'Authorization': 'Bearer ' + token},
)
async def main():
endpoint = await build_endpoint(open('token.txt').read())
qu = Operation(Query)
repo = qu.repository(owner='Mause', name='media')
repo.id()
repo.pull_requests(first=1).nodes().__fields__('title', 'id')
res = (await endpoint(qu)).repository
await add_labels_to_labelable(
endpoint, res.id, res.pull_requests.nodes[0].id, 'automerge'
)
op = Operation(Mutation)
op = build_merge([res.pull_requests.nodes[0].id])
res = await endpoint(op)
print(res)
def build_merge(ids: List[str]):
op = Operation(Mutation)
for i, ident in enumerate(ids):
op.merge_pull_request(
input=MergePullRequestInput(pull_request_id=ident), __alias__=f'merge_{i}'
).pull_request.title()
return op
if __name__ == "__main__":
get_event_loop().run_until_complete(main())
|
[
"aiohttp.ClientSession",
"sgqlc_schemas.github.schema.MergePullRequestInput",
"sgqlc_schemas.github.schema.AddLabelsToLabelableInput",
"asyncio.get_event_loop",
"dataclasses.field",
"sgqlc.operation.Operation"
] |
[((879, 906), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (884, 906), False, 'from dataclasses import dataclass, field\n'), ((1874, 1890), 'sgqlc.operation.Operation', 'Operation', (['Query'], {}), '(Query)\n', (1883, 1890), False, 'from sgqlc.operation import Operation\n'), ((2151, 2170), 'sgqlc.operation.Operation', 'Operation', (['Mutation'], {}), '(Mutation)\n', (2160, 2170), False, 'from sgqlc.operation import Operation\n'), ((2670, 2686), 'sgqlc.operation.Operation', 'Operation', (['Query'], {}), '(Query)\n', (2679, 2686), False, 'from sgqlc.operation import Operation\n'), ((2983, 3002), 'sgqlc.operation.Operation', 'Operation', (['Mutation'], {}), '(Mutation)\n', (2992, 3002), False, 'from sgqlc.operation import Operation\n'), ((3145, 3164), 'sgqlc.operation.Operation', 'Operation', (['Mutation'], {}), '(Mutation)\n', (3154, 3164), False, 'from sgqlc.operation import Operation\n'), ((982, 997), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (995, 997), False, 'from aiohttp import ClientSession\n'), ((2223, 2302), 'sgqlc_schemas.github.schema.AddLabelsToLabelableInput', 'AddLabelsToLabelableInput', ([], {'labelable_id': 'labelable_id', 'label_ids': '[labels[label]]'}), '(labelable_id=labelable_id, label_ids=[labels[label]])\n', (2248, 2302), False, 'from sgqlc_schemas.github.schema import AddLabelsToLabelableInput, AddLabelsToLabelablePayload, MergePullRequestInput, Mutation, Query, Repository\n'), ((3399, 3415), 'asyncio.get_event_loop', 'get_event_loop', ([], {}), '()\n', (3413, 3415), False, 'from asyncio import get_event_loop\n'), ((3251, 3295), 'sgqlc_schemas.github.schema.MergePullRequestInput', 'MergePullRequestInput', ([], {'pull_request_id': 'ident'}), '(pull_request_id=ident)\n', (3272, 3295), False, 'from sgqlc_schemas.github.schema import AddLabelsToLabelableInput, AddLabelsToLabelablePayload, MergePullRequestInput, Mutation, Query, Repository\n')]
|
#Exercício046
from time import sleep
import emoji
print('\033[32mCONTAGEM REGRESSIVA PARA O ANO NOVO:\033[m')
sleep(1)
for c in range(10, 0 - 1, -1):#repete os números de 10 até o 0
print(c)
sleep(1)
print(emoji.emojize("\033[31m:boom::boom::boom:KABUM:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[32m:boom::boom::boom:FOGUETE:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[33m:boom::boom::boom:FOGOS E MAIS FOGOS:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[34m:boom::boom::boom:GUANAGARA VIADO:boom::boom::boom:", use_aliases=True))
print('\033[32mxD')
|
[
"emoji.emojize",
"time.sleep"
] |
[((110, 118), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (115, 118), False, 'from time import sleep\n'), ((199, 207), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (204, 207), False, 'from time import sleep\n'), ((214, 302), 'emoji.emojize', 'emoji.emojize', (['"""\x1b[31m:boom::boom::boom:KABUM:boom::boom::boom:"""'], {'use_aliases': '(True)'}), "('\\x1b[31m:boom::boom::boom:KABUM:boom::boom::boom:',\n use_aliases=True)\n", (227, 302), False, 'import emoji\n'), ((306, 396), 'emoji.emojize', 'emoji.emojize', (['"""\x1b[32m:boom::boom::boom:FOGUETE:boom::boom::boom:"""'], {'use_aliases': '(True)'}), "('\\x1b[32m:boom::boom::boom:FOGUETE:boom::boom::boom:',\n use_aliases=True)\n", (319, 396), False, 'import emoji\n'), ((400, 501), 'emoji.emojize', 'emoji.emojize', (['"""\x1b[33m:boom::boom::boom:FOGOS E MAIS FOGOS:boom::boom::boom:"""'], {'use_aliases': '(True)'}), "('\\x1b[33m:boom::boom::boom:FOGOS E MAIS FOGOS:boom::boom::boom:',\n use_aliases=True)\n", (413, 501), False, 'import emoji\n'), ((505, 603), 'emoji.emojize', 'emoji.emojize', (['"""\x1b[34m:boom::boom::boom:GUANAGARA VIADO:boom::boom::boom:"""'], {'use_aliases': '(True)'}), "('\\x1b[34m:boom::boom::boom:GUANAGARA VIADO:boom::boom::boom:',\n use_aliases=True)\n", (518, 603), False, 'import emoji\n')]
|
import abc
import math
from ... import constants
class Rule(abc.ABC):
def __init__(self, failure_bin, **kwargs):
self.failure_bin = failure_bin
self.enabled = kwargs.get("enabled", True)
self.threshold = kwargs.get("threshold", float("nan"))
self.rule_name = kwargs.get("rule_name", self.default_rule_name)
self.kwargs = kwargs
def __unicode__(self):
enabled = "✓" if self.enabled else "✕"
threshold = "" if math.isnan(self.threshold) else ", threshold={}".format(self.threshold)
return "{0} {1} [bin={2}{3}]".format(enabled, self.rule_name, self.binmoji, threshold)
def check(self, dataset, output):
if self.enabled:
return self.apply_rule(dataset, output)
else:
return self.return_pass()
@property
def binmoji(self):
return constants.BIN_ICON[self.failure_bin]
@property
def bin_text(self):
return constants.BIN_TEXT[self.failure_bin]
def as_row(self):
return [self.rule_name, self.enabled, self.bin_text, self.threshold]
def return_pass(self):
return constants.BIN_NO_CHANGE, None
@abc.abstractmethod
def apply_rule(self, dataset, output):
"""return tuple of (bin, notes) associated with rule or None"""
...
def get_failure_message(self, *args) -> str:
return "An error occurred"
def _is_valid_number(self, val):
# Ensure number is an int or float, not equal to special case -999.
return val is not None and val != -999 and (isinstance(val, int) or isinstance(val, float))
class NumericValueExists(Rule):
# Test succeeds if value is numeric and not -999
field_name = None
field_name_verbose = None
def apply_rule(self, dataset, output):
val = output.get(self.field_name)
if self._is_valid_number(val):
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message()
def get_failure_message(self):
name = getattr(self, "field_name_verbose")
if name is None:
name = self.field_name
return "{} does not exist".format(name)
class BmdExists(NumericValueExists):
default_rule_name = "BMD exists"
field_name = "BMD"
class BmdlExists(NumericValueExists):
default_rule_name = "BMDL exists"
field_name = "BMDL"
class BmduExists(NumericValueExists):
default_rule_name = "BMDU exists"
field_name = "BMDU"
class AicExists(NumericValueExists):
default_rule_name = "AIC exists"
field_name = "AIC"
class RoiExists(NumericValueExists):
default_rule_name = "Residual of interest exists"
field_name = "residual_of_interest"
field_name_verbose = "Residual of Interest"
class ShouldBeGreaterThan(Rule):
# Test fails if value is less-than threshold.
field_name = ""
field_name_verbose = ""
def apply_rule(self, dataset, output):
val = output.get(self.field_name)
threshold = self.threshold
if not self._is_valid_number(val) or val >= threshold:
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message(val, threshold)
def get_failure_message(self, val, threshold):
name = self.field_name_verbose
return "{} is less than threshold ({:.3} < {})".format(name, float(val), threshold)
class GlobalFit(ShouldBeGreaterThan):
default_rule_name = "GGOF"
field_name = "p_value4"
field_name_verbose = "Goodness of fit p-value"
class ShouldBeLessThan(Rule, abc.ABC):
# Test fails if value is greater-than threshold.
msg = "" # w/ arguments for value and threshold
@abc.abstractmethod
def get_value(self, dataset, output):
...
def apply_rule(self, dataset, output):
val = self.get_value(dataset, output)
threshold = self.threshold
if not self._is_valid_number(val) or val <= threshold:
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message(val, threshold)
def get_failure_message(self, val, threshold):
name = self.field_name_verbose
return "{} is greater than threshold ({:.3} > {})".format(name, float(val), threshold)
class BmdBmdlRatio(ShouldBeLessThan):
default_rule_name = "BMD to BMDL ratio"
field_name_verbose = "BMD/BMDL ratio"
def get_value(self, dataset, output):
bmd = output.get("BMD")
bmdl = output.get("BMDL")
if self._is_valid_number(bmd) and self._is_valid_number(bmdl) and bmdl != 0:
return bmd / bmdl
class RoiFit(ShouldBeLessThan):
default_rule_name = "Residual of interest"
field_name_verbose = "Residual of interest"
def get_value(self, dataset, output):
return output.get("residual_of_interest")
class HighBmd(ShouldBeLessThan):
default_rule_name = "High BMD"
field_name_verbose = "BMD/high dose ratio"
def get_value(self, dataset, output):
max_dose = max(dataset.doses)
bmd = output.get("BMD")
if self._is_valid_number(max_dose) and self._is_valid_number(bmd) and bmd != 0:
return bmd / float(max_dose)
class HighBmdl(ShouldBeLessThan):
default_rule_name = "High BMDL"
field_name_verbose = "BMDL/high dose ratio"
def get_value(self, dataset, output):
max_dose = max(dataset.doses)
bmdl = output.get("BMDL")
if self._is_valid_number(max_dose) and self._is_valid_number(bmdl) and max_dose > 0:
return bmdl / float(max_dose)
class LowBmd(ShouldBeLessThan):
default_rule_name = "Low BMD"
field_name_verbose = "minimum dose/BMD ratio"
def get_value(self, dataset, output):
min_dose = min([d for d in dataset.doses if d > 0])
bmd = output.get("BMD")
if self._is_valid_number(min_dose) and self._is_valid_number(bmd) and bmd > 0:
return min_dose / float(bmd)
class LowBmdl(ShouldBeLessThan):
default_rule_name = "Low BMDL"
field_name_verbose = "minimum dose/BMDL ratio"
def get_value(self, dataset, output):
min_dose = min([d for d in dataset.doses if d > 0])
bmdl = output.get("BMDL")
if self._is_valid_number(min_dose) and self._is_valid_number(bmdl) and bmdl > 0:
return min_dose / float(bmdl)
class ControlResidual(ShouldBeLessThan):
default_rule_name = "Control residual"
field_name_verbose = "Residual at lowest dose"
def get_value(self, dataset, output):
if output.get("fit_residuals") and len(output["fit_residuals"]) > 0:
try:
return abs(output["fit_residuals"][0])
except TypeError:
return float("nan")
class ControlStdevResiduals(ShouldBeLessThan):
default_rule_name = "Control stdev"
field_name_verbose = "Ratio of modeled to actual stdev. at control"
def get_value(self, dataset, output):
if (
output.get("fit_est_stdev")
and output.get("fit_stdev")
and len(output["fit_est_stdev"]) > 0
and len(output["fit_stdev"]) > 0
):
try:
modeled = abs(output["fit_est_stdev"][0])
actual = abs(output["fit_stdev"][0])
except TypeError:
return float("nan")
if (
self._is_valid_number(modeled)
and self._is_valid_number(actual)
and modeled > 0
and actual > 0
):
return abs(modeled / actual)
class CorrectVarianceModel(Rule):
# Check variance model (continuous datasets-only)
default_rule_name = "Variance type"
def apply_rule(self, dataset, output):
if "parameters" not in output:
return self.return_pass()
# 0 = non-homogeneous modeled variance => Var(i) = alpha*mean(i)^rho
# 1 = constant variance => Var(i) = alpha*mean(i)
# if rho is a parameter, then variance model 0 is applied
rho = output["parameters"].get("rho")
constant_variance = 0 if rho else 1
p_value2 = output.get("p_value2")
if p_value2 == "<0.0001":
p_value2 = 0.0001
msg = None
if self._is_valid_number(p_value2):
if constant_variance == 1 and p_value2 < 0.1:
msg = "Incorrect variance model (p-value 2 = {}), constant variance selected".format(
p_value2
)
elif constant_variance == 0 and p_value2 > 0.1:
msg = "Incorrect variance model (p-value 2 = {}), modeled variance selected".format(
p_value2
)
else:
msg = "Correct variance model cannot be determined (p-value 2 = {})".format(p_value2)
if msg:
return self.failure_bin, msg
else:
return self.return_pass()
class VarianceModelFit(Rule):
default_rule_name = "Variance fit"
def apply_rule(self, dataset, output):
if "parameters" not in output:
return self.return_pass()
# 0 = non-homogeneous modeled variance => Var(i) = alpha*mean(i)^rho
# 1 = constant variance => Var(i) = alpha*mean(i)
# if rho is a parameter, then variance model 0 is applied
rho = output["parameters"].get("rho")
constant_variance = 0 if rho else 1
p_value2 = output.get("p_value2")
if p_value2 == "<0.0001":
p_value2 = 0.0001
p_value3 = output.get("p_value3")
if p_value3 == "<0.0001":
p_value3 = 0.0001
msg = None
if self._is_valid_number(p_value2) and constant_variance == 1 and p_value2 < 0.1:
msg = "Variance model poorly fits dataset (p-value 2 = {})".format(p_value2)
if self._is_valid_number(p_value3) and constant_variance == 0 and p_value3 < 0.1:
msg = "Variance model poorly fits dataset (p-value 3 = {})".format(p_value3)
if msg:
return self.failure_bin, msg
else:
return self.return_pass()
class NoDegreesOfFreedom(Rule):
"""
Check to ensure at least one degree of freedom exist to prevent recommendation of an
overfit model.
"""
default_rule_name = "Degrees of freedom"
def apply_rule(self, dataset, output):
df = output.get("df", 1)
if df == 0:
return self.failure_bin, "Zero degrees of freedom; saturated model"
return self.return_pass()
class Warnings(Rule):
# Test fails if any warnings exist.
default_rule_name = "Warnings"
def get_failure_message(self, warnings):
return "Warning(s): {}".format("; ".join(warnings))
def apply_rule(self, dataset, output):
warnings = output.get("warnings", [])
if len(warnings) > 0:
return self.failure_bin, self.get_failure_message(warnings)
else:
return self.return_pass()
|
[
"math.isnan"
] |
[((476, 502), 'math.isnan', 'math.isnan', (['self.threshold'], {}), '(self.threshold)\n', (486, 502), False, 'import math\n')]
|
import pandas as pd
import smartplots3_setup
def createSetup(name,expansion_factor,percapita_factor,plot_size,settings):
plt_setup_smart={
'name': name,
'expansion_factor':expansion_factor,
'percapita_factor':percapita_factor,
'scenarios_itr': [],
'scenarios_id':[],
'scenarios_year':[],
'plot_size': plot_size,
'bottom_labels': [],
'top_labels': [],
'plots_folder': "makeplots3"
}
plt_setup_smart['name']=name
plt_setup_smart['expansion_factor']=expansion_factor
plt_setup_smart['plot_size']=plot_size
plt_setup_smart['scenarios_year']=[]
plt_setup_smart['scenarios_id']=[]
plt_setup_smart['scenarios_itr']=[]
plt_setup_smart['top_labels']=[]
for (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label) in settings:
plt_setup_smart['scenarios_year'].append(scenarios_year)
plt_setup_smart['scenarios_id'].append(scenarios_id)
plt_setup_smart['scenarios_itr'].append(scenarios_itr)
plt_setup_smart['top_labels'].append(top_label)
plt_setup_smart['bottom_labels'].append(bottom_label)
return plt_setup_smart
def createSettingRow(scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label):
return (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label)
scenarios_lables = {
"Base_CL_CT": "Base0",
"Base_STL_STT_BAU": "Base2",
"Base_STL_STT_VTO": "Base3",
"Base_LTL_LTT_BAU": "Base5",
"Base_LTL_LTT_VTO": "Base6",
"A_STL_STT_BAU": "A2",
"A_STL_STT_VTO": "A3",
"B_LTL_LTT_BAU": "B5",
"B_LTL_LTT_VTO": "B6",
"C_LTL_LTT_BAU": "C5",
"C_LTL_LTT_VTO": "C6"
}
output_folder = "/home/ubuntu/git/jupyter/data/28thOct2019"
# Base_CL_CT
# A_STL_STT_BAU
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3 = createSetup('7scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (8, 4.5), settings)
#smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder)
#smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder)
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,2,15,scenarios_lables["Base_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,3,15,scenarios_lables["Base_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,4,15,scenarios_lables["Base_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,5,15,scenarios_lables["Base_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3_base = createSetup('11scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (10, 4.5), settings)
smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base, output_folder)
#smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267])
smartplots3_setup.tableSummary(plt_setup_smart3_base, output_folder)
|
[
"smartplots3_setup.pltRealizedModeSplitByTrips",
"smartplots3_setup.pltAveragePersonSpeed_allModes",
"smartplots3_setup.pltAveragePersonSpeed_car",
"smartplots3_setup.pltModeSplitInVMT",
"smartplots3_setup.pltRHEmptyPooled",
"smartplots3_setup.tableSummary",
"smartplots3_setup.pltLdvTechnologySplitInVMT",
"smartplots3_setup.pltModeSplitInPMTPerCapita",
"smartplots3_setup.pltEnergyPerCapita",
"smartplots3_setup.pltRHWaitTime"
] |
[((4138, 4212), 'smartplots3_setup.pltEnergyPerCapita', 'smartplots3_setup.pltEnergyPerCapita', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base, output_folder)\n', (4174, 4212), False, 'import smartplots3_setup\n'), ((4213, 4300), 'smartplots3_setup.pltRealizedModeSplitByTrips', 'smartplots3_setup.pltRealizedModeSplitByTrips', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base,\n output_folder)\n', (4258, 4300), False, 'import smartplots3_setup\n'), ((4297, 4383), 'smartplots3_setup.pltModeSplitInPMTPerCapita', 'smartplots3_setup.pltModeSplitInPMTPerCapita', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base,\n output_folder)\n', (4341, 4383), False, 'import smartplots3_setup\n'), ((4380, 4470), 'smartplots3_setup.pltAveragePersonSpeed_allModes', 'smartplots3_setup.pltAveragePersonSpeed_allModes', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base,\n output_folder)\n', (4428, 4470), False, 'import smartplots3_setup\n'), ((4467, 4552), 'smartplots3_setup.pltAveragePersonSpeed_car', 'smartplots3_setup.pltAveragePersonSpeed_car', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base,\n output_folder)\n', (4510, 4552), False, 'import smartplots3_setup\n'), ((4549, 4622), 'smartplots3_setup.pltModeSplitInVMT', 'smartplots3_setup.pltModeSplitInVMT', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base, output_folder)\n', (4584, 4622), False, 'import smartplots3_setup\n'), ((4623, 4695), 'smartplots3_setup.pltRHEmptyPooled', 'smartplots3_setup.pltRHEmptyPooled', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base, output_folder)\n', (4657, 4695), False, 'import smartplots3_setup\n'), ((4696, 4765), 'smartplots3_setup.pltRHWaitTime', 'smartplots3_setup.pltRHWaitTime', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base, output_folder)\n', (4727, 4765), False, 'import smartplots3_setup\n'), ((4766, 4852), 'smartplots3_setup.pltLdvTechnologySplitInVMT', 'smartplots3_setup.pltLdvTechnologySplitInVMT', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base,\n output_folder)\n', (4810, 4852), False, 'import smartplots3_setup\n'), ((4954, 5022), 'smartplots3_setup.tableSummary', 'smartplots3_setup.tableSummary', (['plt_setup_smart3_base', 'output_folder'], {}), '(plt_setup_smart3_base, output_folder)\n', (4984, 5022), False, 'import smartplots3_setup\n')]
|
# python
# import warnings
# Third party imports
import numpy as np
# grAdapt
from .base import Initial
from grAdapt.utils.sampling import sample_corner_bounds
class Vertices(Initial):
"""
Samples vertices if n_evals >= 2 ** len(bounds).
Else low discrepancy sequences are sampled.
"""
def __init__(self, sampling_method):
"""
Parameters
----------
sampling_method : grAdapt.sampling.equidistributed Object
Sample low discrepancy sequences when initial point method is not feasible
"""
super().__init__(sampling_method)
def sample(self, bounds, n_evals):
"""Returns a numpy array of sampled points.
Does not include corner points of the hypercube/search space.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n_evals : int
number of initial points sampled by method
Returns
-------
(self.n_evals, len(self.bounds)) numpy array
"""
super().sample(bounds, n_evals)
if 2 ** len(self.bounds) > self.n_evals:
return self.sampling_method.sample(bounds=bounds, n=n_evals)
else:
corner_points = sample_corner_bounds(self.bounds)
num_corner_points = corner_points.shape[0]
if self.n_evals > 2 ** len(self.bounds):
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - num_corner_points),
x_history=corner_points)
return np.vstack((corner_points, random_points))
else:
return corner_points
|
[
"numpy.vstack",
"grAdapt.utils.sampling.sample_corner_bounds"
] |
[((1391, 1424), 'grAdapt.utils.sampling.sample_corner_bounds', 'sample_corner_bounds', (['self.bounds'], {}), '(self.bounds)\n', (1411, 1424), False, 'from grAdapt.utils.sampling import sample_corner_bounds\n'), ((1819, 1860), 'numpy.vstack', 'np.vstack', (['(corner_points, random_points)'], {}), '((corner_points, random_points))\n', (1828, 1860), True, 'import numpy as np\n')]
|
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains utility code for reading packed data files.
"""
import os
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import h5py
import tqdm
# Atom typing
#
# Atom typing is the process of figuring out which layer each atom should be
# written to. For ease of testing, the packed data file contains a lot of
# potentially useful atomic information which can be distilled during the
# data loading process.
#
# Atom typing is implemented by map functions of the type:
# (atom descriptor) -> (layer index)
#
# If the layer index is -1, the atom is ignored.
class AtomTyper(object):
def __init__(self, fn, num_layers):
"""Initialize an atom typer.
Args:
fn: a function of type:
(atomic_num, aro, hdon, hacc, pcharge) -> (mask)
num_layers: number of output layers (<=32)
"""
self._fn = fn
self._num_layers = num_layers
def size(self):
return self._num_layers
def apply(self, *args):
return self._fn(*args)
class CondAtomTyper(AtomTyper):
def __init__(self, cond_func):
assert len(cond_func) <= 16
def _fn(*args):
v = 0
for k in range(len(cond_func)):
if cond_func[k](*args):
v |= 1 << k
return v
super(CondAtomTyper, self).__init__(_fn, len(cond_func))
REC_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num != 0
]),
# (C,N,O,S,*)
'simple': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (H,C,N,O,S,*)
'simple_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 1,
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta_mix': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
lambda num, aro, hdon, hacc, pcharge: num == 1, # hydrogen
lambda num, aro, hdon, hacc, pcharge: num == 6, # carbon
lambda num, aro, hdon, hacc, pcharge: num == 7, # nitrogen
lambda num, aro, hdon, hacc, pcharge: num == 8, # oxygen
lambda num, aro, hdon, hacc, pcharge: num == 16, # sulfur
])
}
LIG_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num: num != 0
]),
'simple': CondAtomTyper([
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
]),
'simple_h': CondAtomTyper([
lambda num: num == 1, # hydrogen
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
])
}
class FragmentDataset(Dataset):
"""Utility class to work with the packed fragments.h5 format."""
def __init__(self, fragment_file, rec_typer=REC_TYPER['simple'],
lig_typer=LIG_TYPER['simple'], filter_rec=None, filter_smi=None,
fdist_min=None, fdist_max=None, fmass_min=None, fmass_max=None,
verbose=False, lazy_loading=True):
"""Initializes the fragment dataset.
Args:
fragment_file: path to fragments.h5
rec_typer: AtomTyper for receptor
lig_typer: AtomTyper for ligand
filter_rec: list of receptor ids to use (or None to use all)
skip_remap: if True, don't prepare atom type information
(filtering options):
fdist_min: minimum fragment distance
fdist_max: maximum fragment distance
fmass_min: minimum fragment mass (Da)
fmass_max: maximum fragment mass (Da)
"""
self._rec_typer = rec_typer
self._lig_typer = lig_typer
self.verbose = verbose
self._lazy_loading = lazy_loading
self.rec = self._load_rec(fragment_file, rec_typer)
self.frag = self._load_fragments(fragment_file, lig_typer)
self.valid_idx = self._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose)
def _load_rec(self, fragment_file, rec_typer):
"""Loads receptor information."""
f = h5py.File(fragment_file, 'r')
rec_coords = f['rec_coords'][()]
rec_types = f['rec_types'][()]
rec_lookup = f['rec_lookup'][()]
r = range(len(rec_types))
if self.verbose:
r = tqdm.tqdm(r, desc='Remap receptor atoms')
rec_remapped = np.zeros(len(rec_types), dtype=np.uint16)
if not self._lazy_loading:
for i in r:
rec_remapped[i] = rec_typer.apply(*rec_types[i])
rec_loaded = np.zeros(len(rec_lookup)).astype(np.bool)
# create rec mapping
rec_mapping = {}
for i in range(len(rec_lookup)):
rec_mapping[rec_lookup[i][0].decode('ascii')] = i
rec = {
'rec_coords': rec_coords,
'rec_types': rec_types,
'rec_remapped': rec_remapped,
'rec_lookup': rec_lookup,
'rec_mapping': rec_mapping,
'rec_loaded': rec_loaded
}
f.close()
return rec
def _load_fragments(self, fragment_file, lig_typer):
"""Loads fragment information."""
f = h5py.File(fragment_file, 'r')
frag_data = f['frag_data'][()]
frag_lookup = f['frag_lookup'][()]
frag_smiles = f['frag_smiles'][()]
frag_mass = f['frag_mass'][()]
frag_dist = f['frag_dist'][()]
frag_lig_smi = None
frag_lig_idx = None
if 'frag_lig_smi' in f.keys():
frag_lig_smi = f['frag_lig_smi'][()]
frag_lig_idx = f['frag_lig_idx'][()]
# unpack frag data into separate structures
frag_coords = frag_data[:,:3].astype(np.float32)
frag_types = frag_data[:,3].astype(np.uint8)
frag_remapped = np.zeros(len(frag_types), dtype=np.uint16)
if not self._lazy_loading:
for i in range(len(frag_types)):
frag_remapped[i] = lig_typer.apply(frag_types[i])
frag_loaded = np.zeros(len(frag_lookup)).astype(np.bool)
# find and save connection point
r = range(len(frag_lookup))
if self.verbose:
r = tqdm.tqdm(r, desc='Frag connection point')
frag_conn = np.zeros((len(frag_lookup), 3))
for i in r:
_,f_start,f_end,_,_ = frag_lookup[i]
fdat = frag_data[f_start:f_end]
found = False
for j in range(len(fdat)):
if fdat[j][3] == 0:
frag_conn[i,:] = tuple(fdat[j])[:3]
found = True
break
assert found, "missing fragment connection point at %d" % i
frag = {
'frag_coords': frag_coords, # d_idx -> (x,y,z)
'frag_types': frag_types, # d_idx -> (type)
'frag_remapped': frag_remapped, # d_idx -> (layer)
'frag_lookup': frag_lookup, # f_idx -> (rec_id, fstart, fend, pstart, pend)
'frag_conn': frag_conn, # f_idx -> (x,y,z)
'frag_smiles': frag_smiles, # f_idx -> smiles
'frag_mass': frag_mass, # f_idx -> mass
'frag_dist': frag_dist, # f_idx -> dist
'frag_lig_smi': frag_lig_smi,
'frag_lig_idx': frag_lig_idx,
'frag_loaded': frag_loaded
}
f.close()
return frag
def _get_valid_examples(self, filter_rec, filter_smi, fdist_min, fdist_max, fmass_min,
fmass_max, verbose):
"""Returns an array of valid fragment indexes.
"Valid" in this context means the fragment belongs to a receptor in
filter_rec and the fragment abides by the optional mass/distance
constraints.
"""
# keep track of valid examples
valid_mask = np.ones(self.frag['frag_lookup'].shape[0]).astype(np.bool)
num_frags = self.frag['frag_lookup'].shape[0]
# filter by receptor id
if filter_rec is not None:
valid_rec = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter rec')
for i in r:
rec = self.frag['frag_lookup'][i][0].decode('ascii')
if rec in filter_rec:
valid_rec[i] = 1
valid_mask *= valid_rec
# filter by ligand smiles string
if filter_smi is not None:
valid_lig = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter lig')
for i in r:
smi = self.frag['frag_lig_smi'][self.frag['frag_lig_idx'][i]]
smi = smi.decode('ascii')
if smi in filter_smi:
valid_lig[i] = 1
valid_mask *= valid_lig
# filter by fragment distance
if fdist_min is not None:
valid_mask[self.frag['frag_dist'] < fdist_min] = 0
if fdist_max is not None:
valid_mask[self.frag['frag_dist'] > fdist_max] = 0
# filter by fragment mass
if fmass_min is not None:
valid_mask[self.frag['frag_mass'] < fmass_min] = 0
if fmass_max is not None:
valid_mask[self.frag['frag_mass'] > fmass_max] = 0
# convert to a list of indexes
valid_idx = np.where(valid_mask)[0]
return valid_idx
def __len__(self):
"""Returns the number of valid fragment examples."""
return self.valid_idx.shape[0]
def __getitem__(self, idx):
"""Returns the Nth example.
Returns a dict with:
f_coords: fragment coordinates (Fx3)
f_types: fragment layers (Fx1)
p_coords: parent coordinates (Px3)
p_types: parent layers (Px1)
r_coords: receptor coordinates (Rx3)
r_types: receptor layers (Rx1)
conn: fragment connection point in the parent molecule (x,y,z)
smiles: fragment smiles string
"""
# convert to fragment index
frag_idx = self.valid_idx[idx]
return self.get_raw(frag_idx)
def get_raw(self, frag_idx):
# lookup fragment
rec_id, f_start, f_end, p_start, p_end = self.frag['frag_lookup'][frag_idx]
smiles = self.frag['frag_smiles'][frag_idx].decode('ascii')
conn = self.frag['frag_conn'][frag_idx]
# lookup receptor
rec_idx = self.rec['rec_mapping'][rec_id.decode('ascii')]
_, r_start, r_end = self.rec['rec_lookup'][rec_idx]
# fetch data
# f_coords = self.frag['frag_coords'][f_start:f_end]
# f_types = self.frag['frag_types'][f_start:f_end]
p_coords = self.frag['frag_coords'][p_start:p_end]
r_coords = self.rec['rec_coords'][r_start:r_end]
if self._lazy_loading and self.frag['frag_loaded'][frag_idx] == 0:
frag_types = self.frag['frag_types']
frag_remapped = self.frag['frag_remapped']
# load parent
for i in range(p_start, p_end):
frag_remapped[i] = self._lig_typer.apply(frag_types[i])
self.frag['frag_loaded'][frag_idx] = 1
if self._lazy_loading and self.rec['rec_loaded'][rec_idx] == 0:
rec_types = self.rec['rec_types']
rec_remapped = self.rec['rec_remapped']
# load receptor
for i in range(r_start, r_end):
rec_remapped[i] = self._rec_typer.apply(*rec_types[i])
self.rec['rec_loaded'][rec_idx] = 1
p_mask = self.frag['frag_remapped'][p_start:p_end]
r_mask = self.rec['rec_remapped'][r_start:r_end]
return {
# 'f_coords': f_coords,
# 'f_types': f_types,
'p_coords': p_coords,
'p_types': p_mask,
'r_coords': r_coords,
'r_types': r_mask,
'conn': conn,
'smiles': smiles
}
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._lig_typer.size()
def rec_layers(self):
return self._rec_typer.size()
class SharedFragmentDataset(object):
def __init__(self, dat, filter_rec=None, filter_smi=None, fdist_min=None,
fdist_max=None, fmass_min=None, fmass_max=None):
self._dat = dat
self.valid_idx = self._dat._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose=True)
def __len__(self):
return self.valid_idx.shape[0]
def __getitem__(self, idx):
frag_idx = self.valid_idx[idx]
return self._dat.get_raw(frag_idx)
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self._dat.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._dat.lig_layers()
def rec_layers(self):
return self._dat.rec_layers()
class FingerprintDataset(Dataset):
def __init__(self, fingerprint_file):
"""Initializes a fingerprint dataset.
Args:
fingerprint_file: path to a fingerprint .h5 file
"""
self.fingerprints = self._load_fingerprints(fingerprint_file)
def _load_fingerprints(self, fingerprint_file):
"""Loads fingerprint information."""
f = h5py.File(fingerprint_file, 'r')
fingerprint_data = f['fingerprints'][()]
fingerprint_smiles = f['smiles'][()]
# create smiles->idx mapping
fingerprint_mapping = {}
for i in range(len(fingerprint_smiles)):
sm = fingerprint_smiles[i].decode('ascii')
fingerprint_mapping[sm] = i
fingerprints = {
'fingerprint_data': fingerprint_data,
'fingerprint_mapping': fingerprint_mapping,
'fingerprint_smiles': fingerprint_smiles,
}
f.close()
return fingerprints
def for_smiles(self, smiles):
"""Return a Tensor of fingerprints for a list of smiles.
Args:
smiles: size N list of smiles strings (as str not bytes)
"""
fp = np.zeros((len(smiles), self.fingerprints['fingerprint_data'].shape[1]))
for i in range(len(smiles)):
fp_idx = self.fingerprints['fingerprint_mapping'][smiles[i]]
fp[i] = self.fingerprints['fingerprint_data'][fp_idx]
return torch.Tensor(fp)
|
[
"numpy.ones",
"numpy.where",
"tqdm.tqdm",
"torch.Tensor",
"h5py.File",
"numpy.zeros"
] |
[((6574, 6603), 'h5py.File', 'h5py.File', (['fragment_file', '"""r"""'], {}), "(fragment_file, 'r')\n", (6583, 6603), False, 'import h5py\n'), ((7665, 7694), 'h5py.File', 'h5py.File', (['fragment_file', '"""r"""'], {}), "(fragment_file, 'r')\n", (7674, 7694), False, 'import h5py\n'), ((16271, 16303), 'h5py.File', 'h5py.File', (['fingerprint_file', '"""r"""'], {}), "(fingerprint_file, 'r')\n", (16280, 16303), False, 'import h5py\n'), ((17332, 17348), 'torch.Tensor', 'torch.Tensor', (['fp'], {}), '(fp)\n', (17344, 17348), False, 'import torch\n'), ((6802, 6843), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""Remap receptor atoms"""'}), "(r, desc='Remap receptor atoms')\n", (6811, 6843), False, 'import tqdm\n'), ((8655, 8697), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""Frag connection point"""'}), "(r, desc='Frag connection point')\n", (8664, 8697), False, 'import tqdm\n'), ((10515, 10549), 'numpy.zeros', 'np.zeros', (['num_frags'], {'dtype': 'np.bool'}), '(num_frags, dtype=np.bool)\n', (10523, 10549), True, 'import numpy as np\n'), ((10966, 11000), 'numpy.zeros', 'np.zeros', (['num_frags'], {'dtype': 'np.bool'}), '(num_frags, dtype=np.bool)\n', (10974, 11000), True, 'import numpy as np\n'), ((11892, 11912), 'numpy.where', 'np.where', (['valid_mask'], {}), '(valid_mask)\n', (11900, 11912), True, 'import numpy as np\n'), ((10309, 10351), 'numpy.ones', 'np.ones', (["self.frag['frag_lookup'].shape[0]"], {}), "(self.frag['frag_lookup'].shape[0])\n", (10316, 10351), True, 'import numpy as np\n'), ((10628, 10659), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""filter rec"""'}), "(r, desc='filter rec')\n", (10637, 10659), False, 'import tqdm\n'), ((11079, 11110), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""filter lig"""'}), "(r, desc='filter lig')\n", (11088, 11110), False, 'import tqdm\n')]
|
import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from dataset_pothole import pothole
from keras.models import model_from_json
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = pothole.load_data()
print(X_train.shape)
print()
print (y_train.shape)
print()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 200, 200, 1)
X_test = X_test.reshape(X_test.shape[0], 200, 200, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 3380
X_test /= 3380
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 4)
Y_test = np_utils.to_categorical(y_test, 4)
# 7. Define model architecture
nb_classes = 4
# number of epochs to train
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(200, 200, 1)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
convout2 = Activation('relu')
model.add(convout2)
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=2, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
|
[
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"keras.layers.Dense",
"keras.utils.np_utils.to_categorical",
"numpy.random.seed",
"keras.layers.Activation",
"dataset_pothole.pothole.load_data",
"keras.layers.Dropout"
] |
[((19, 38), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (33, 38), True, 'import numpy as np\n'), ((423, 442), 'dataset_pothole.pothole.load_data', 'pothole.load_data', ([], {}), '()\n', (440, 442), False, 'from dataset_pothole import pothole\n'), ((785, 820), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', '(4)'], {}), '(y_train, 4)\n', (808, 820), False, 'from keras.utils import np_utils\n'), ((830, 864), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', '(4)'], {}), '(y_test, 4)\n', (853, 864), False, 'from keras.utils import np_utils\n'), ((1099, 1111), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1109, 1111), False, 'from keras.models import Sequential\n'), ((1275, 1293), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1285, 1293), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1380, 1398), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1390, 1398), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1123, 1218), 'keras.layers.Convolution2D', 'Convolution2D', (['nb_filters', 'nb_conv', 'nb_conv'], {'border_mode': '"""valid"""', 'input_shape': '(200, 200, 1)'}), "(nb_filters, nb_conv, nb_conv, border_mode='valid',\n input_shape=(200, 200, 1))\n", (1136, 1218), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((1324, 1367), 'keras.layers.Convolution2D', 'Convolution2D', (['nb_filters', 'nb_conv', 'nb_conv'], {}), '(nb_filters, nb_conv, nb_conv)\n', (1337, 1367), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((1429, 1471), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(nb_pool, nb_pool)'}), '(pool_size=(nb_pool, nb_pool))\n', (1441, 1471), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((1483, 1495), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1490, 1495), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1508, 1517), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1515, 1517), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1529, 1539), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (1534, 1539), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1551, 1569), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1561, 1569), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1581, 1593), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1588, 1593), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1605, 1622), 'keras.layers.Dense', 'Dense', (['nb_classes'], {}), '(nb_classes)\n', (1610, 1622), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1634, 1655), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1644, 1655), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n')]
|
import hashlib
import random
import string
import logging
from django.db import models
LOG = logging.getLogger(__name__)
class Device(models.Model):
name = models.CharField(max_length=50, unique=True)
customer = models.CharField(max_length=50)
agent_status = models.CharField(max_length=10, default="offline")
program_status = models.CharField(max_length=10, default="down")
last_updated = models.DateTimeField(auto_now=True)
def delete_mqtt_credentials(self):
self.auth.all().delete()
self.acl.all().delete()
class MqttAuth(models.Model):
username = models.CharField(max_length=100, unique=True)
password = models.CharField(max_length=100)
salt = models.CharField(max_length=10)
activated = models.BooleanField(default=False)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="auth", related_query_name="auth", null=True
)
def __str__(self):
return "activated" if self.activated else "not activated"
@classmethod
def create(cls, username, password, activated, device=None):
salt = "".join(random.choice(string.ascii_letters) for _ in range(10))
password = hashlib.sha256((password + salt).encode("utf-8")).hexdigest()
return MqttAuth(username=username, password=password, salt=salt, activated=activated, device=device)
class MqttAcl(models.Model):
allow = models.SmallIntegerField()
ipaddr = models.CharField(max_length=60, null=True)
username = models.CharField(max_length=100, null=True)
clientid = models.CharField(max_length=100, null=True)
access = models.SmallIntegerField()
topic = models.CharField(max_length=100)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="acl", related_query_name="acl", null=True
)
class Telemetry(models.Model):
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="telemetry", related_query_name="telemetry"
)
created_on = models.DateTimeField(auto_now_add=True)
state = models.JSONField()
|
[
"logging.getLogger",
"random.choice",
"django.db.models.ForeignKey",
"django.db.models.JSONField",
"django.db.models.BooleanField",
"django.db.models.SmallIntegerField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] |
[((96, 123), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (113, 123), False, 'import logging\n'), ((165, 209), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (181, 209), False, 'from django.db import models\n'), ((225, 256), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (241, 256), False, 'from django.db import models\n'), ((276, 326), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'default': '"""offline"""'}), "(max_length=10, default='offline')\n", (292, 326), False, 'from django.db import models\n'), ((348, 395), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'default': '"""down"""'}), "(max_length=10, default='down')\n", (364, 395), False, 'from django.db import models\n'), ((415, 450), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (435, 450), False, 'from django.db import models\n'), ((603, 648), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (619, 648), False, 'from django.db import models\n'), ((664, 696), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (680, 696), False, 'from django.db import models\n'), ((708, 739), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (724, 739), False, 'from django.db import models\n'), ((756, 790), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (775, 790), False, 'from django.db import models\n'), ((804, 918), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Device'], {'on_delete': 'models.CASCADE', 'related_name': '"""auth"""', 'related_query_name': '"""auth"""', 'null': '(True)'}), "(Device, on_delete=models.CASCADE, related_name='auth',\n related_query_name='auth', null=True)\n", (821, 918), False, 'from django.db import models\n'), ((1414, 1440), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {}), '()\n', (1438, 1440), False, 'from django.db import models\n'), ((1454, 1496), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'null': '(True)'}), '(max_length=60, null=True)\n', (1470, 1496), False, 'from django.db import models\n'), ((1512, 1555), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (1528, 1555), False, 'from django.db import models\n'), ((1571, 1614), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (1587, 1614), False, 'from django.db import models\n'), ((1628, 1654), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {}), '()\n', (1652, 1654), False, 'from django.db import models\n'), ((1667, 1699), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1683, 1699), False, 'from django.db import models\n'), ((1713, 1825), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Device'], {'on_delete': 'models.CASCADE', 'related_name': '"""acl"""', 'related_query_name': '"""acl"""', 'null': '(True)'}), "(Device, on_delete=models.CASCADE, related_name='acl',\n related_query_name='acl', null=True)\n", (1730, 1825), False, 'from django.db import models\n'), ((1882, 1996), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Device'], {'on_delete': 'models.CASCADE', 'related_name': '"""telemetry"""', 'related_query_name': '"""telemetry"""'}), "(Device, on_delete=models.CASCADE, related_name=\n 'telemetry', related_query_name='telemetry')\n", (1899, 1996), False, 'from django.db import models\n'), ((2023, 2062), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2043, 2062), False, 'from django.db import models\n'), ((2075, 2093), 'django.db.models.JSONField', 'models.JSONField', ([], {}), '()\n', (2091, 2093), False, 'from django.db import models\n'), ((1125, 1160), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (1138, 1160), False, 'import random\n')]
|
from django.contrib.auth import SESSION_KEY
from django.core.cache import cache
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError
from proxy_server.response import AJAX_REQUEST
import httplib, json, proxy_server
def invoke_backend_service(method, function_path, json_data=dict(), request=None, response_token=True, public=False, secure=False):
error_message = None
try:
if public is False and request is None:
error_message = 'A private web service must receive Django\'s request'
raise Exception
if response_token is True and request is None:
error_message = 'A web service cannot expect a response token and not receive Django\'s request'
raise Exception
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.API_KEY] = settings.SECRET_KEY
if request is not None:
pk = cache.get(AJAX_REQUEST, None)
if pk:
request.user.pk = pk
cache.delete(AJAX_REQUEST)
headers[proxy_server.USER_TOKEN] = request.user.pk
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
return 403, None
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
return 204, None
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response_token is True:
user_dict = None
if SESSION_KEY in request.session:
user_dict = cache.get(request.session[SESSION_KEY])
cache.delete(request.session[SESSION_KEY])
request.session[SESSION_KEY] = response_json[proxy_server.USER_TOKEN]
request.user.pk = response_json[proxy_server.USER_TOKEN]
request.session[proxy_server.EXPIRATION_DATE] = response_json[proxy_server.EXPIRATION_DATE]
if user_dict:
user_dict['pk'] = request.user.pk
cache.set(request.session[SESSION_KEY], user_dict)
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
result = None
if proxy_server.RESPONSE in response_json:
result = response_json[proxy_server.RESPONSE]
return 200, result
else:
code = response.status
if proxy_server.ERROR in response_json:
error_message = response_json[proxy_server.ERROR][proxy_server.MESSAGE]
raise Exception(code)
else:
error_message = response.reason
raise Exception(code)
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return code, error
def invoke_backend_service_as_proxy(request, method, function_path, json_data=dict(), response_token=True, secure=False):
error_message = None
try:
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.USER_TOKEN] = request.META.get(proxy_server.HTTP_USER_TOKEN)
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
headers[proxy_server.API_KEY] = request.META.get(proxy_server.HTTP_API_KEY)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
resp = HttpResponse(status=response.status, reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
resp = HttpResponse(status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
resp = HttpResponse(response_data, status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return HttpResponseServerError(json.dumps(error), content_type='application/json')
|
[
"httplib.HTTPConnection",
"json.loads",
"httplib.HTTPSConnection",
"django.core.cache.cache.delete",
"django.http.HttpResponse",
"json.dumps",
"django.core.cache.cache.set",
"django.core.cache.cache.get"
] |
[((1575, 1604), 'django.core.cache.cache.get', 'cache.get', (['AJAX_REQUEST', 'None'], {}), '(AJAX_REQUEST, None)\n', (1584, 1604), False, 'from django.core.cache import cache\n'), ((5989, 6049), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': 'response.status', 'reason': 'response.reason'}), '(status=response.status, reason=response.reason)\n', (6001, 6049), False, 'from django.http import HttpResponse, HttpResponseServerError\n'), ((6532, 6629), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': 'response.status', 'content_type': '"""application/json"""', 'reason': 'response.reason'}), "(status=response.status, content_type='application/json',\n reason=response.reason)\n", (6544, 6629), False, 'from django.http import HttpResponse, HttpResponseServerError\n'), ((7367, 7480), 'django.http.HttpResponse', 'HttpResponse', (['response_data'], {'status': 'response.status', 'content_type': '"""application/json"""', 'reason': 'response.reason'}), "(response_data, status=response.status, content_type=\n 'application/json', reason=response.reason)\n", (7379, 7480), False, 'from django.http import HttpResponse, HttpResponseServerError\n'), ((1017, 1086), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (['settings.BACKEND_HOST', 'settings.BACKEND_PORT'], {}), '(settings.BACKEND_HOST, settings.BACKEND_PORT)\n', (1040, 1086), False, 'import httplib, json, proxy_server\n'), ((1128, 1174), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (['settings.BACKEND_HOST'], {}), '(settings.BACKEND_HOST)\n', (1151, 1174), False, 'import httplib, json, proxy_server\n'), ((1262, 1330), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['settings.BACKEND_HOST', 'settings.BACKEND_PORT'], {}), '(settings.BACKEND_HOST, settings.BACKEND_PORT)\n', (1284, 1330), False, 'import httplib, json, proxy_server\n'), ((1372, 1417), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['settings.BACKEND_HOST'], {}), '(settings.BACKEND_HOST)\n', (1394, 1417), False, 'import httplib, json, proxy_server\n'), ((1677, 1703), 'django.core.cache.cache.delete', 'cache.delete', (['AJAX_REQUEST'], {}), '(AJAX_REQUEST)\n', (1689, 1703), False, 'from django.core.cache import cache\n'), ((1917, 1938), 'json.dumps', 'json.dumps', (['json_data'], {}), '(json_data)\n', (1927, 1938), False, 'import httplib, json, proxy_server\n'), ((2496, 2521), 'json.loads', 'json.loads', (['response_data'], {}), '(response_data)\n', (2506, 2521), False, 'import httplib, json, proxy_server\n'), ((4932, 5001), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (['settings.BACKEND_HOST', 'settings.BACKEND_PORT'], {}), '(settings.BACKEND_HOST, settings.BACKEND_PORT)\n', (4955, 5001), False, 'import httplib, json, proxy_server\n'), ((5043, 5089), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (['settings.BACKEND_HOST'], {}), '(settings.BACKEND_HOST)\n', (5066, 5089), False, 'import httplib, json, proxy_server\n'), ((5177, 5245), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['settings.BACKEND_HOST', 'settings.BACKEND_PORT'], {}), '(settings.BACKEND_HOST, settings.BACKEND_PORT)\n', (5199, 5245), False, 'import httplib, json, proxy_server\n'), ((5287, 5332), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['settings.BACKEND_HOST'], {}), '(settings.BACKEND_HOST)\n', (5309, 5332), False, 'import httplib, json, proxy_server\n'), ((5699, 5720), 'json.dumps', 'json.dumps', (['json_data'], {}), '(json_data)\n', (5709, 5720), False, 'import httplib, json, proxy_server\n'), ((6966, 6991), 'json.loads', 'json.loads', (['response_data'], {}), '(response_data)\n', (6976, 6991), False, 'import httplib, json, proxy_server\n'), ((8183, 8200), 'json.dumps', 'json.dumps', (['error'], {}), '(error)\n', (8193, 8200), False, 'import httplib, json, proxy_server\n'), ((2788, 2827), 'django.core.cache.cache.get', 'cache.get', (['request.session[SESSION_KEY]'], {}), '(request.session[SESSION_KEY])\n', (2797, 2827), False, 'from django.core.cache import cache\n'), ((2848, 2890), 'django.core.cache.cache.delete', 'cache.delete', (['request.session[SESSION_KEY]'], {}), '(request.session[SESSION_KEY])\n', (2860, 2890), False, 'from django.core.cache import cache\n'), ((3264, 3314), 'django.core.cache.cache.set', 'cache.set', (['request.session[SESSION_KEY]', 'user_dict'], {}), '(request.session[SESSION_KEY], user_dict)\n', (3273, 3314), False, 'from django.core.cache import cache\n')]
|
import transitions
from functools import partial
# from transitions import transitions.Machine
# TODO: whenever there is a state chage store the following
# (DAY,function_called) -> Stored for every person for agent status, state and Testing state
class AgentStatusA(object):
"""The Statemachine of the agent"""
status = ['Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation']
def __init__(self):
"""Agent Status class is responsible for figuring out the Mobility of the agent, the agent mobility can be
'Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation'
"""
super(AgentStatusA, self).__init__()
self.ADDED_BIT = True
self.TruthStatus = None
self.Last_Added_Placeholder = None
self.buffer = []
self.Status = self.status[0]
# def log_update(self,message):
def update_objects(self,TruthStatus):
"""Update object of Virusmodel
Args:
TruthStatus (object): Truth State object to update
"""
self.TruthStatus = TruthStatus
def __remove_from_transport__(self):
if self.useTN == True:
self.City.TravellingCitizens.remove(self)
#print('Person {} removed from travelling list of City {}. New length = {}'.format(self.IntID, self.City.Name, len(self.City.TravellingCitizens)))
def _remove_(self):
"""Remove from workplace and transport list
"""
if self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj !=None:
self.buffer.append('_remove_')
obj.Working.remove(self)
self.ADDED_BIT = False
self.__remove_from_transport__()
def _add_(self):
"""Add to workplace and transport list
"""
if ~self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj != None:
if obj.Working!=None:
self.buffer.append('_add_')
obj.Working.add(self)
self.ADDED_BIT = True
if self.useTN == True:
self.City.TravellingCitizens.add(self)
def _left_(self):
"""Leave city, calls remove
"""
self._remove_()
def _entered_(self):
"""Come back to city
"""
self._add_()
def __remove_from_placeholder__(self):
"""Remove the person from the Truth Status Placeholders
Returns:
bool: Whether Removed or not
"""
try:
if self.Last_Added_Placeholder == 0: # If he is AFreeP
self.TruthStatus.AFreeP.remove(self)
return True
elif self.Last_Added_Placeholder == 1: # If he was Quarentined
self.TruthStatus.AQuarentinedP.remove(self)
return True
elif self.Last_Added_Placeholder == 2: # If he was Isolated
self.TruthStatus.SIsolatedP.remove(self)
return True
elif self.Last_Added_Placeholder == 3: # If he was Hospitalized
self.TruthStatus.SHospitalizedP.remove(self)
return True
elif self.Last_Added_Placeholder == 4: # If he was Icu
self.TruthStatus.SIcuP.remove(self)
return True
else:
return False
except:
self.about()
raise
def leave_city(self):
acceptable_states = [self.status[0]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[2]
self._left_()
self.__remove_from_placeholder__()
self.Last_Added_Placeholder = None
def enter_city(self):
acceptable_states = [self.status[2]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[0]
self._entered_()
if self.is_Asymptomatic():
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
def quarentined(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[2]]
assert self.Status in acceptable_states
if self.Last_Added_Placeholder != 1:
self.__remove_from_placeholder__()
if self.is_Free(): # If free add to quarentined placeholders
self.TruthStatus.AQuarentinedP.add(self)
self.Last_Added_Placeholder = 1
self.Status = self.status[1]
self._remove_()
def hospitalized(self,DAY):
acceptable_states = [self.status[0],self.status[1]]
assert self.Status in acceptable_states
self.Status = self.status[3]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SHospitalizedP.add(self)
self.Last_Added_Placeholder = 3
def admit_icu(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[3]]
assert self.Status in acceptable_states
self.Status = self.status[4]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIcuP.add(self)
self.Last_Added_Placeholder = 4
def isolate(self,Today):
acceptable_states = [self.status[0],self.status[1],self.status[3],self.status[4],self.status[5]]
assert self.Status in acceptable_states
if self.Status == self.status[0] or self.Status == self.status[1]:
self.show_symptoms(Today)
if self.Last_Added_Placeholder != 2:
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIsolatedP.add(self)
self.Last_Added_Placeholder = 2
self.Status = self.status[5]
self._remove_()
def is_Free(self):
return self.Status == self.status[0]
def is_Quarentined(self):
return self.Status == self.status[1]
def is_Out_of_City(self):
return self.Status == self.status[2]
def is_Hospitalized(self):
return self.Status == self.status[3]
def is_ICU(self):
return self.Status == self.status[4]
def is_Isolation(self):
return self.Status == self.status[5]
class AgentStateA(AgentStatusA):
states = ['Healthy','Asymptomatic','Symptomatic','Recovered','Died']
def __init__(self):
"""Agent status is the status of person with respect ot the virus
"""
super(AgentStateA, self).__init__()
#self = person
self.State = self.states[0]
self.TruthStatus = None
def infected(self,DAY):
acceptable_states = [self.states[0]]
assert self.State in acceptable_states
self.State = self.states[1]
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
self.History["Infected"] = DAY
def show_symptoms(self,DAY):
acceptable_states = [self.states[1],self.states[2]]
assert self.State in acceptable_states
self.State = self.states[2]
self.History["Symptomatic"] = DAY
def recover(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[3]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RRecoveredP.add(self)
self.Last_Added_Placeholder =5
self.History["Recovered"] = DAY
self.History["Died"] = -1
def die(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[4]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RDiedP.add(self)
self.Last_Added_Placeholder = 6
self.History["Recovered"] = -1
self.History["Died"] = DAY
def is_Healthy(self):
return self.State == self.states[0]
def is_Asymptomatic(self):
return self.State == self.states[1]
def is_Symptomatic(self):
return self.State == self.states[2]
def is_Recovered(self):
return self.State == self.states[3]
def is_Died(self):
return self.State == self.states[4]
class TestingState(object):
"""Summary
Attributes:
in_stack (bool): Description
machine (TYPE): Description
state (str): Description
tested (bool): Description
"""
machine = transitions.Machine(model=None, states=['Not_tested', 'Awaiting_Testing', 'Tested_Positive','Tested_Negative'], initial='Not_tested',
transitions=[
{'trigger': 'awaiting_test', 'source': ['Not_tested','Awaiting_Testing','Tested_Negative'], 'dest': 'Awaiting_Testing','before':'add_to_TestingQueue'},
{'trigger': 'tested_positive', 'source': 'Awaiting_Testing', 'dest': 'Tested_Positive','before':'tested_positive_func'},
{'trigger': 'tested_negative', 'source': 'Awaiting_Testing', 'dest': 'Tested_Negative','before':'tested_negative_func'},
])
def __init__(self):
"""This is responsible for updating testing state of the person
Deleted Parameters:
person (object): Home object
VM (object): Virusmodel object
"""
super().__init__()
self.state = 'Not_tested'
def __remove_from_testing_list__(self):
self.City.TestingQueue.remove(self)
def add_to_TestingQueue(self, PrivateTest=False):
"""Summary
"""
# This function is for the City to add citizens into testingQueue
if PrivateTest == False:
if self.state != 'Awaiting_Testing' :
self.City.TestingQueue.append(self)
if self.state == 'Tested_Negative':
self.City.TestedP['Negative'].remove(self)
#print('City {} added person {}'.format(self.City.Name, self.IntID))
#pass type of test
def tested_positive_func(self,Today, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Positive'].add(self)
self.City.NumTestedPositive += 1
if PrivateTest == False:
self.__remove_from_testing_list__()
if self.is_Quarentined():
self.isolate(Today)
def tested_negative_func(self, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Negative'].add(self)
if PrivateTest == False:
self.__remove_from_testing_list__()
def __getattribute__(self, item):
"""Summary
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
try:
return super(TestingState, self).__getattribute__(item)
except AttributeError:
if item in self.machine.events:
return partial(self.machine.events[item].trigger, self)
raise
|
[
"functools.partial",
"transitions.Machine"
] |
[((7619, 8201), 'transitions.Machine', 'transitions.Machine', ([], {'model': 'None', 'states': "['Not_tested', 'Awaiting_Testing', 'Tested_Positive', 'Tested_Negative']", 'initial': '"""Not_tested"""', 'transitions': "[{'trigger': 'awaiting_test', 'source': ['Not_tested', 'Awaiting_Testing',\n 'Tested_Negative'], 'dest': 'Awaiting_Testing', 'before':\n 'add_to_TestingQueue'}, {'trigger': 'tested_positive', 'source':\n 'Awaiting_Testing', 'dest': 'Tested_Positive', 'before':\n 'tested_positive_func'}, {'trigger': 'tested_negative', 'source':\n 'Awaiting_Testing', 'dest': 'Tested_Negative', 'before':\n 'tested_negative_func'}]"}), "(model=None, states=['Not_tested', 'Awaiting_Testing',\n 'Tested_Positive', 'Tested_Negative'], initial='Not_tested',\n transitions=[{'trigger': 'awaiting_test', 'source': ['Not_tested',\n 'Awaiting_Testing', 'Tested_Negative'], 'dest': 'Awaiting_Testing',\n 'before': 'add_to_TestingQueue'}, {'trigger': 'tested_positive',\n 'source': 'Awaiting_Testing', 'dest': 'Tested_Positive', 'before':\n 'tested_positive_func'}, {'trigger': 'tested_negative', 'source':\n 'Awaiting_Testing', 'dest': 'Tested_Negative', 'before':\n 'tested_negative_func'}])\n", (7638, 8201), False, 'import transitions\n'), ((9688, 9736), 'functools.partial', 'partial', (['self.machine.events[item].trigger', 'self'], {}), '(self.machine.events[item].trigger, self)\n', (9695, 9736), False, 'from functools import partial\n')]
|
"""Config flow for Eva Calor."""
from collections import OrderedDict
import logging
import uuid
from pyevacalor import ( # pylint: disable=redefined-builtin
ConnectionError,
Error as EvaCalorError,
UnauthorizedError,
evacalor,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from .const import CONF_UUID, DOMAIN
_LOGGER = logging.getLogger(__name__)
def conf_entries(hass):
"""Return the email tuples for the domain."""
return set(
entry.data[CONF_EMAIL] for entry in hass.config_entries.async_entries(DOMAIN)
)
class EvaCalorConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Eva Calor Config Flow handler."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def _entry_in_configuration_exists(self, user_input) -> bool:
"""Return True if config already exists in configuration."""
email = user_input[CONF_EMAIL]
if email in conf_entries(self.hass):
return True
return False
async def async_step_user(self, user_input=None):
"""User initiated integration."""
errors = {}
if user_input is not None:
# Validate user input
email = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
if self._entry_in_configuration_exists(user_input):
return self.async_abort(reason="device_already_configured")
try:
gen_uuid = str(uuid.uuid1())
evacalor(email, password, gen_uuid)
except UnauthorizedError:
errors["base"] = "unauthorized"
except ConnectionError:
errors["base"] = "connection_error"
except EvaCalorError:
errors["base"] = "unknown_error"
if "base" not in errors:
return self.async_create_entry(
title=DOMAIN,
data={
CONF_EMAIL: email,
CONF_PASSWORD: password,
CONF_UUID: gen_uuid,
},
)
else:
user_input = {}
data_schema = OrderedDict()
data_schema[vol.Required(CONF_EMAIL, default=user_input.get(CONF_EMAIL))] = str
data_schema[
vol.Required(CONF_PASSWORD, default=user_input.get(CONF_PASSWORD))
] = str
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=errors
)
|
[
"logging.getLogger",
"collections.OrderedDict",
"voluptuous.Schema",
"uuid.uuid1",
"pyevacalor.evacalor"
] |
[((421, 448), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (438, 448), False, 'import logging\n'), ((2267, 2280), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2278, 2280), False, 'from collections import OrderedDict\n'), ((1584, 1619), 'pyevacalor.evacalor', 'evacalor', (['email', 'password', 'gen_uuid'], {}), '(email, password, gen_uuid)\n', (1592, 1619), False, 'from pyevacalor import ConnectionError, Error as EvaCalorError, UnauthorizedError, evacalor\n'), ((2563, 2586), 'voluptuous.Schema', 'vol.Schema', (['data_schema'], {}), '(data_schema)\n', (2573, 2586), True, 'import voluptuous as vol\n'), ((1554, 1566), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1564, 1566), False, 'import uuid\n')]
|
from os import path
from telethon import Client
from telethon.types import Message, Voice
from callsmusic import callsmusic, queues
import converter
from downloaders import youtube
from config import BOT_NAME as bn, DURATION_LIMIT
from helpers.filters import command, other_filters
from helpers.decorators import errors
from helpers.errors import DurationLimitError
from helpers.gets import get_url, get_file_name
from telethon.types import InlineKeyboardButton, InlineKeyboardMarkup
@Client.on_message(command("lplay") & other_filters)
@errors
async def play(_, message: Message):
lel = await message.reply("🔄 **Processing** sounds...")
sender_id = message.from_user.id
sender_name = message.from_user.first_name
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="🔊 Group Support",
url="https://t.me/VcgMusicGroup")
]
]
)
audio = (message.reply_to_message.audio or message.reply_to_message.voice) if message.reply_to_message else None
url = get_url(message)
if audio:
if round(audio.duration / 60) > DURATION_LIMIT:
raise DurationLimitError(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed to play!"
)
file_name = get_file_name(audio)
file_path = await converter.convert(
(await message.reply_to_message.download(file_name))
if not path.isfile(path.join("downloads", file_name)) else file_name
)
elif url:
file_path = await converter.convert(youtube.download(url))
else:
return await lel.edit_text("❗ You did not give me anything to play!")
if message.chat.id in callsmusic.pytgcalls.active_calls:
position = await queues.put(message.chat.id, file=file_path)
await lel.edit(f"#⃣ **Queued** at position {position}!")
else:
callsmusic.pytgcalls.join_group_call(message.chat.id, file_path)
await message.reply_photo(
photo="https://telegra.ph/file/a4fa687ed647cfef52402.jpg",
reply_markup=keyboard,
caption="▶️ **Playing** here the song requested by {}!".format(
message.from_user.mention()
),
)
return await lel.delete()
|
[
"helpers.gets.get_url",
"callsmusic.queues.put",
"downloaders.youtube.download",
"os.path.join",
"helpers.filters.command",
"helpers.gets.get_file_name",
"helpers.errors.DurationLimitError",
"telethon.types.InlineKeyboardButton",
"callsmusic.callsmusic.pytgcalls.join_group_call"
] |
[((1140, 1156), 'helpers.gets.get_url', 'get_url', (['message'], {}), '(message)\n', (1147, 1156), False, 'from helpers.gets import get_url, get_file_name\n'), ((1392, 1412), 'helpers.gets.get_file_name', 'get_file_name', (['audio'], {}), '(audio)\n', (1405, 1412), False, 'from helpers.gets import get_url, get_file_name\n'), ((1997, 2061), 'callsmusic.callsmusic.pytgcalls.join_group_call', 'callsmusic.pytgcalls.join_group_call', (['message.chat.id', 'file_path'], {}), '(message.chat.id, file_path)\n', (2033, 2061), False, 'from callsmusic import callsmusic, queues\n'), ((508, 524), 'helpers.filters.command', 'command', (['"""lplay"""'], {}), "('lplay')\n", (515, 524), False, 'from helpers.filters import command, other_filters\n'), ((1246, 1345), 'helpers.errors.DurationLimitError', 'DurationLimitError', (['f"""❌ Videos longer than {DURATION_LIMIT} minute(s) aren\'t allowed to play!"""'], {}), '(\n f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren\'t allowed to play!")\n', (1264, 1345), False, 'from helpers.errors import DurationLimitError\n'), ((1870, 1913), 'callsmusic.queues.put', 'queues.put', (['message.chat.id'], {'file': 'file_path'}), '(message.chat.id, file=file_path)\n', (1880, 1913), False, 'from callsmusic import callsmusic, queues\n'), ((822, 900), 'telethon.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""🔊 Group Support"""', 'url': '"""https://t.me/VcgMusicGroup"""'}), "(text='🔊 Group Support', url='https://t.me/VcgMusicGroup')\n", (842, 900), False, 'from telethon.types import InlineKeyboardButton, InlineKeyboardMarkup\n'), ((1672, 1693), 'downloaders.youtube.download', 'youtube.download', (['url'], {}), '(url)\n', (1688, 1693), False, 'from downloaders import youtube\n'), ((1554, 1587), 'os.path.join', 'path.join', (['"""downloads"""', 'file_name'], {}), "('downloads', file_name)\n", (1563, 1587), False, 'from os import path\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Brief: Produces rand disjoint communities (clusters) for the given network with sizes similar in the ground truth.
:Description:
Takes number of the resulting communities and their sizes from the specified groundtruth (actually any sample
of the community structure, the real ground truth is not required) and fills stubs of the clusters with
randomly selected nodes from the input network with all their neighbors.
Note: Produced result is a random disjoint partitioning, so if the 'ground truth' had overlapping clusters, then
the number of nodes in the last cluster will be less than in the sample.
:Authors: <NAME> <<EMAIL>>
:Organizations: eXascale lab <http://exascale.info/>, ScienceWise <http://sciencewise.info/>,
Lumais <http://www.lumais.com/>
:Date: 2015-07
"""
from __future__ import print_function, division # Required for stderr output, must be the first import
import sys
import os # Pathes processing
#import igraph as ig
import random as rand
try:
# ATTENTION: Python3 newer treats imports as realtive and results in error here unlike Python2
from utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401
except ImportError:
# Note: this case should be the second because explicit relative imports cause various errors
# under Python2 and Python3, which complicates thier handling
from .utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401
# Default number of the resulting clusterings (partitions, i.e files that contain disjoint clusters)
_RESNUM = 1
class Params(object):
"""Input parameters (arguments)"""
def __init__(self):
"""Parameters:
groundtruth - flile name of the ground truth clustering
network - flile name of the input network
dirnet - whether the input network is directed
outnum - number of the resulting clusterings
randseed - seed for the clustering generation (automatically generated if not specified)
outpseed - whether to output the seed (automatically set to True on if the seed is generated automatically)
outdir - output directory
outname - base name of the output file based on the network name
outext - extenstion of the output files based on the groundtruth extension
"""
self.groundtruth = None
self.network = None
self.dirnet = False
self.outnum = _RESNUM
self.randseed = None
self.outpseed = False
self.outdir = None
self.outname = None
self.outext = ''
def parseParams(args):
"""Parse user-specified parameters
returns - parsed input arguments, Params()
"""
assert isinstance(args, (tuple, list)) and args, 'Input arguments must be specified'
prm = Params()
for arg in args:
# Validate input format
preflen = 3
if arg[0] != '-' or len(arg) <= preflen:
raise ValueError('Unexpected argument: ' + arg)
if arg[1] == 'g':
prm.groundtruth = arg[preflen:]
prm.outext = os.path.splitext(prm.groundtruth)[1]
elif arg[1] == 'i':
pos = arg.find('=', 2)
if pos == -1 or arg[2] not in 'ud=' or len(arg) == pos + 1:
raise ValueError('Unexpected argument: ' + arg)
pos += 1
prm.network = arg[pos:]
prm.outname, netext = os.path.splitext(os.path.split(prm.network)[1])
prm.dirnet = asymnet(netext.lower(), arg[2] == 'd')
if not prm.outname:
raise ValueError('Invalid network name (is a directory): ' + prm.network)
elif arg[1] == 'n':
prm.outnum = int(arg[preflen:])
assert prm.outnum >= 1, 'outnum must be a natural number'
elif arg[1] == 'r':
prm.randseed = arg[preflen:]
elif arg[1] == 'o':
prm.outdir = arg[preflen:]
else:
raise ValueError('Unexpected argument: ' + arg)
if not (prm.groundtruth and prm.network):
raise ValueError('Input network and groundtruth file names must be specified')
if not prm.outdir:
prm.outdir = os.path.split(prm.network)[0]
if not prm.outdir:
prm.outdir = '.'
if not prm.randseed:
try:
prm.randseed = ''.join(str(ord(c)) for c in os.urandom(8))
except NotImplementedError:
prm.randseed = str(rand.random())
prm.outpseed = True
return prm
def randcommuns(*args):
"""Generate random clusterings for the specified network"""
prm = parseParams(args)
print('Starting randcommuns clustering:'
'\n\tgroundtruth: {}'
'\n\t{} network: {}'
'\n\t{} cls of {} in {} with randseed: {}'
.format(prm.groundtruth, 'directed' if prm.dirnet else 'undirected', prm.network
, prm.outnum, prm.outname + prm.outext, prm.outdir, prm.randseed))
# Load Data from simple real-world networks
graph = loadNsl(prm.network, prm.dirnet) # ig.Graph.Read_Ncol(network, directed=dirnet) # , weights=False
# Load statistics from the ground thruth
groundstat = []
with open(prm.groundtruth, 'r') as fground:
for line in fground:
# Skip empty lines and comments (possible header)
if not line or line[0] == '#':
continue
groundstat.append(len(line.split()))
# Create outpdir if required
if prm.outdir and not os.path.exists(prm.outdir):
os.makedirs(prm.outdir)
# Geneate rand clsuterings
rand.seed(prm.randseed)
while prm.outnum > 0:
prm.outnum -= 1
# Active (remained) nodes indices of the input network
actnodes = set(graph.vs.indices) #pylint: disable=E1101
clusters = [] # Forming clusters
# Reference size of the ground truth clusters (they migh have overlaps unlike the current partitioning)
for clmarg in groundstat:
nodes = [] # Content of the current cluster
# Check whether all nodes of the initial network are mapped
if not actnodes:
break
# Select subsequent rand node
ind = rand.sample(actnodes, 1)[0]
actnodes.remove(ind)
nodes.append(ind)
inode = 0 # Index of the node in the current cluster
# Select neighbors of the selected nodes to fill the clusters
while len(nodes) < clmarg and actnodes:
for nd in graph.vs[nodes[inode]].neighbors(): #pylint: disable=E1136
if nd.index not in actnodes:
continue
actnodes.remove(nd.index)
nodes.append(nd.index)
if len(nodes) >= clmarg or not actnodes:
break
inode += 1
if inode >= len(nodes) and len(nodes) < clmarg and actnodes:
ind = rand.sample(actnodes, 1)[0]
actnodes.remove(ind)
nodes.append(ind)
# Use original labels of the nodes
clusters.append(graph.vs[ind]['name'] for ind in nodes) #pylint: disable=E1136
# Output resulting clusters
with open('/'.join((prm.outdir, ''.join((prm.outname, '_', str(prm.outnum), prm.outext)))), 'w') as fout:
for cl in clusters:
# Note: print() unlike fout.write() appends the newline
print(' '.join(cl), file=fout)
# Output randseed used for the generated clusterings
# Output to the dir above if possible to not mix cluster levels with rand seed
if prm.outpseed:
with open('/'.join((prm.outdir, (os.path.splitext(prm.outname)[0] + '.seed'))), 'w') as fout:
# Note: print() unlike fout.write() appends the newline
print(prm.randseed, file=fout)
print('Random clusterings are successfully generated')
if __name__ == '__main__':
if len(sys.argv) > 2:
randcommuns(*sys.argv[1:])
else:
print('\n'.join(('Produces random disjoint partitioning (clusters are formed with rand nodes and their neighbors)'
' for the input network specified in the NSL format (generalizaiton of NCOL, SNAP, etc.)\n',
'Usage: {app} -g=<ground_truth> -i[{{u, d}}]=<input_network> [-n=<res_num>] [-r=<rand_seed>] [-o=<outp_dir>]',
'',
' -g=<ground_truth> - ground truth clustering as a template for sizes of the resulting communities',
' -i[X]=<input_network> - file of the input network in the format: <src_id> <dst_id> [<weight>]',
' Xu - undirected input network (<src_id> <dst_id> implies also <dst_id> <src_id>). Default',
' Xd - directed input network (both <src_id> <dst_id> and <dst_id> <src_id> are specified)',
' NOTE: (un)directed flag is considered only for the networks with non-NSL file extension',
' -n=<res_num> - number of the resulting clusterings to generate. Default: {resnum}',
' -r=<rand_seed> - random seed, string. Default: value from the system rand source (otherwise current time)',
' -o=<output_communities> - . Default: ./<input_network>/'
)).format(app=sys.argv[0], resnum=_RESNUM))
|
[
"os.path.exists",
"random.sample",
"os.makedirs",
"os.urandom",
"os.path.splitext",
"random.seed",
"os.path.split",
"random.random",
"utils.parser_nsl.loadNsl"
] |
[((4524, 4556), 'utils.parser_nsl.loadNsl', 'loadNsl', (['prm.network', 'prm.dirnet'], {}), '(prm.network, prm.dirnet)\n', (4531, 4556), False, 'from utils.parser_nsl import asymnet, loadNsl\n'), ((5029, 5052), 'random.seed', 'rand.seed', (['prm.randseed'], {}), '(prm.randseed)\n', (5038, 5052), True, 'import random as rand\n'), ((4976, 4999), 'os.makedirs', 'os.makedirs', (['prm.outdir'], {}), '(prm.outdir)\n', (4987, 4999), False, 'import os\n'), ((3807, 3833), 'os.path.split', 'os.path.split', (['prm.network'], {}), '(prm.network)\n', (3820, 3833), False, 'import os\n'), ((4946, 4972), 'os.path.exists', 'os.path.exists', (['prm.outdir'], {}), '(prm.outdir)\n', (4960, 4972), False, 'import os\n'), ((2896, 2929), 'os.path.splitext', 'os.path.splitext', (['prm.groundtruth'], {}), '(prm.groundtruth)\n', (2912, 2929), False, 'import os\n'), ((5563, 5587), 'random.sample', 'rand.sample', (['actnodes', '(1)'], {}), '(actnodes, 1)\n', (5574, 5587), True, 'import random as rand\n'), ((4021, 4034), 'random.random', 'rand.random', ([], {}), '()\n', (4032, 4034), True, 'import random as rand\n'), ((3177, 3203), 'os.path.split', 'os.path.split', (['prm.network'], {}), '(prm.network)\n', (3190, 3203), False, 'import os\n'), ((3954, 3967), 'os.urandom', 'os.urandom', (['(8)'], {}), '(8)\n', (3964, 3967), False, 'import os\n'), ((6132, 6156), 'random.sample', 'rand.sample', (['actnodes', '(1)'], {}), '(actnodes, 1)\n', (6143, 6156), True, 'import random as rand\n'), ((6775, 6804), 'os.path.splitext', 'os.path.splitext', (['prm.outname'], {}), '(prm.outname)\n', (6791, 6804), False, 'import os\n')]
|
import onnx
import onnxruntime
import torch
import onnx.numpy_helper
# added by huxi, load rpn config
from pcdet.pointpillar_quantize_config import load_rpn_config_json
# ========================================
config_dict = load_rpn_config_json.get_config()
onnx_model_file = config_dict["vfe_onnx_file"]
onnx_model = onnx.load(onnx_model_file)
onnx.checker.check_model(onnx_model)
#check model
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
#[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "linear.weight"]
[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "14"]
[tensor_bn_gamma] = [t for t in onnx_model.graph.initializer if t.name == "norm.weight"]
[tensor_bn_beta] = [t for t in onnx_model.graph.initializer if t.name == "norm.bias"]
[tensor_bn_mean] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_mean"]
[tensor_bn_var] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_var"]
mat_w = onnx.numpy_helper.to_array(tensor_mat_weight)
mat_w = mat_w.transpose()
mat_w_list = list(mat_w.flatten())
bn_gamma_w = onnx.numpy_helper.to_array(tensor_bn_gamma)
bn_gamma_w_list = list(bn_gamma_w.flatten())
bn_beta_w = onnx.numpy_helper.to_array(tensor_bn_beta)
bn_beta_w_list = list(bn_beta_w.flatten())
bn_mean_w = onnx.numpy_helper.to_array(tensor_bn_mean)
bn_mean_w_list = list(bn_mean_w.flatten())
bn_var_w = onnx.numpy_helper.to_array(tensor_bn_var)
bn_var_w_list = list(bn_var_w.flatten())
result_line = ""
exported_vfe_weight_file = config_dict["vfe_exported_weight_file"]
with open(exported_vfe_weight_file, 'w') as f:
for idx,val in enumerate(mat_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_gamma_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_beta_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_mean_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_var_w_list):
result_line += str(val)
result_line += " "
f.write(result_line)
|
[
"pcdet.pointpillar_quantize_config.load_rpn_config_json.get_config",
"onnx.checker.check_model",
"onnx.load",
"onnx.numpy_helper.to_array"
] |
[((229, 262), 'pcdet.pointpillar_quantize_config.load_rpn_config_json.get_config', 'load_rpn_config_json.get_config', ([], {}), '()\n', (260, 262), False, 'from pcdet.pointpillar_quantize_config import load_rpn_config_json\n'), ((324, 350), 'onnx.load', 'onnx.load', (['onnx_model_file'], {}), '(onnx_model_file)\n', (333, 350), False, 'import onnx\n'), ((351, 387), 'onnx.checker.check_model', 'onnx.checker.check_model', (['onnx_model'], {}), '(onnx_model)\n', (375, 387), False, 'import onnx\n'), ((1067, 1112), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['tensor_mat_weight'], {}), '(tensor_mat_weight)\n', (1093, 1112), False, 'import onnx\n'), ((1188, 1231), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['tensor_bn_gamma'], {}), '(tensor_bn_gamma)\n', (1214, 1231), False, 'import onnx\n'), ((1290, 1332), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['tensor_bn_beta'], {}), '(tensor_bn_beta)\n', (1316, 1332), False, 'import onnx\n'), ((1389, 1431), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['tensor_bn_mean'], {}), '(tensor_bn_mean)\n', (1415, 1431), False, 'import onnx\n'), ((1487, 1528), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['tensor_bn_var'], {}), '(tensor_bn_var)\n', (1513, 1528), False, 'import onnx\n')]
|
#! /usr/bin/python
import requests
import re
from bs4 import BeautifulSoup
import colors
class FindingComments(object):
def __init__(self, url):
self.url = url
self.comment_list = ['<!--(.*)-->']
self.found_comments = {}
def get_soure_code(self):
resp_text = requests.get(self.url).text
return resp_text
def find_comment(self):
source_code = self.get_soure_code()
for comment in self.comment_list:
comments = re.findall(comment, source_code)
self.found_comments[comment] = comments
def parse_comments(self):
self.find_comment()
comment_dict = {}
if len(self.found_comments) > 0:
for comment_code, comment in self.found_comments.items():
colors.success('Found for {} : {}'
.format(comment_code, comment))
comment_dict[comment_code] = comment
else:
colors.error('No comment found')
return comment_dict
|
[
"re.findall",
"colors.error",
"requests.get"
] |
[((304, 326), 'requests.get', 'requests.get', (['self.url'], {}), '(self.url)\n', (316, 326), False, 'import requests\n'), ((495, 527), 're.findall', 're.findall', (['comment', 'source_code'], {}), '(comment, source_code)\n', (505, 527), False, 'import re\n'), ((969, 1001), 'colors.error', 'colors.error', (['"""No comment found"""'], {}), "('No comment found')\n", (981, 1001), False, 'import colors\n')]
|
#!/usr/bin/python
# This program revises the existing overview file.
# If a keyword is found in an Abstract of an accession of a gene, the url of the abstract is added to the overview file
# The revised overview.txt is created in the same directory of the old one and named overview_new.txt
"""
Usage: link_assignment.py -o <overview> -pub <pubhits>
-h --help Please enter the files overview.txt and the pubhits.
"""
from docopt import docopt
from sys import argv
import csv
import os
import util
def load_pubhits_in_dict(pubhits_path):
with open(pubhits_path, 'r') as pubhits_file:
pubhits_reader = csv.reader(pubhits_file, delimiter='\t', )
return dict((row[util.PUBHITS_GENE_ID_INDEX].strip(), row) for row in pubhits_reader)
def build_overview_link(pubhits_dict, gene_id, links):
"""
builds the pubhits link out of the gene id and the pubhits dict
:param pubhits_dict: pubhits dictionary
:param gene_id: gene id
:param links: existsing links
:return: links
"""
pubhits_acc = pubhits_dict[gene_id][util.PUBHITS_ACC_INDEX]
pubhits_link = pubhits_dict[gene_id][util.PUBHITS_LINK_INDEX]
if links.strip() == util.NO_LINK:
new_links = [pubhits_acc + ":" + pubhits_link]
else:
new_links = [links, pubhits_acc + ":" + pubhits_link]
overview_link = ','.join(new_links)
if not overview_link or overview_link == util.TODO:
overview_link = util.NO_KEYWORDS
return overview_link
def set_link_in_row(old_row, pubhits_dict):
"""
set link in existing overview row (dictionary)
:param old_row: overview row
:param pubhits_dict: pubhits dictionary
:return: revised overview row
"""
gene_id = old_row[util.GENE_ID]
if (gene_id in pubhits_dict):
old_row[util.LINKS] = build_overview_link(pubhits_dict, gene_id, old_row[util.LINKS])
return old_row
def main():
args = docopt(__doc__, argv[1:])
overview_path = args['<overview>']
pubhits = args['<pubhits>']
new_overview_path = os.path.splitext(overview_path)[0] + "_new.txt"
pubhits_dict = load_pubhits_in_dict(pubhits)
with open(overview_path, 'r') as overview, open(new_overview_path, 'w') as new_overview:
overview_reader = csv.DictReader(overview, delimiter='\t')
overview_writer = csv.DictWriter(new_overview, delimiter='\t', extrasaction='ignore',
fieldnames=overview.readline().rstrip('\n').split("\t"))
overview.seek(0)
overview_writer.writeheader()
for overview_row in overview_reader:
overview_row = set_link_in_row(overview_row, pubhits_dict)
overview_writer.writerow(overview_row)
if __name__ == '__main__':
main()
|
[
"csv.DictReader",
"csv.reader",
"docopt.docopt",
"os.path.splitext"
] |
[((1913, 1938), 'docopt.docopt', 'docopt', (['__doc__', 'argv[1:]'], {}), '(__doc__, argv[1:])\n', (1919, 1938), False, 'from docopt import docopt\n'), ((620, 660), 'csv.reader', 'csv.reader', (['pubhits_file'], {'delimiter': '"""\t"""'}), "(pubhits_file, delimiter='\\t')\n", (630, 660), False, 'import csv\n'), ((2250, 2290), 'csv.DictReader', 'csv.DictReader', (['overview'], {'delimiter': '"""\t"""'}), "(overview, delimiter='\\t')\n", (2264, 2290), False, 'import csv\n'), ((2034, 2065), 'os.path.splitext', 'os.path.splitext', (['overview_path'], {}), '(overview_path)\n', (2050, 2065), False, 'import os\n')]
|
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
df.head(5)
X = df.drop(['customerID','Churn'],1)
y = df['Churn']
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
#Replacing spaces with 'NaN' in train dataset
X_train['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Replacing spaces with 'NaN' in test dataset
X_test['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Converting the type of column from X_train to float
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
#Converting the type of column from X_test to float
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
#Filling missing values
X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True)
X_test['TotalCharges'].fillna(X_train['TotalCharges'].mean(), inplace=True)
#Check value counts
print(X_train.isnull().sum())
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
#Label encoding test data
for x in cat_cols:
le = LabelEncoder()
X_test[x] = le.fit_transform(X_test[x])
#Encoding train data target
y_train = y_train.replace({'No':0, 'Yes':1})
#Encoding test data target
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train, X_test, y_train, y_test)
ada_model = AdaBoostClassifier(random_state = 0)
ada_model.fit(X_train, y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test, y_pred)
ada_score
ada_cm = confusion_matrix(y_test, y_pred)
ada_cm
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test, y_pred)
xgb_cm = confusion_matrix(y_test, y_pred)
xgb_cr = classification_report(y_test, y_pred)
clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test, y_pred)
clf_cm = confusion_matrix(y_test, y_pred)
clf_cr = classification_report(y_test, y_pred)
print(xgb_score, clf_score)
print(xgb_cm, clf_cm)
print(xgb_cr, xgb_cr)
|
[
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.metrics.accuracy_score",
"xgboost.XGBClassifier",
"sklearn.metrics.confusion_matrix"
] |
[((137, 154), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (148, 154), True, 'import pandas as pd\n'), ((255, 308), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (271, 308), False, 'from sklearn.model_selection import train_test_split\n'), ((1763, 1797), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1781, 1797), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((1882, 1912), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1896, 1912), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((1934, 1966), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1950, 1966), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2215, 2244), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2228, 2244), False, 'from xgboost import XGBClassifier\n'), ((2324, 2354), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2338, 2354), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2364, 2396), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2380, 2396), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2406, 2443), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2427, 2443), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2457, 2513), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'xgb_model', 'param_grid': 'parameters'}), '(estimator=xgb_model, param_grid=parameters)\n', (2469, 2513), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2592, 2622), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2606, 2622), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2632, 2664), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2648, 2664), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2674, 2711), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2695, 2711), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((1208, 1222), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1220, 1222), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1328, 1342), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1340, 1342), False, 'from sklearn.preprocessing import LabelEncoder\n')]
|
import argparse, time, logging, os, math, random
os.environ["MXNET_USE_OPERATOR_TUNING"] = "0"
import numpy as np
from scipy import stats
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler
from os import listdir
import os.path
import argparse
import pickle
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_size = mpi_comm.Get_size()
mpi_rank = mpi_comm.Get_rank()
# print('rank: %d' % (mpi_rank), flush=True)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="dir of the data", required=True)
parser.add_argument("--valdir", type=str, help="dir of the val data", required=True)
parser.add_argument("--batchsize", type=int, help="batchsize", default=8)
parser.add_argument("--epochs", type=int, help="epochs", default=100)
parser.add_argument("--interval", type=int, help="log interval", default=10)
parser.add_argument("--nsplit", type=int, help="number of split", default=40)
parser.add_argument("--lr", type=float, help="learning rate", default=0.001)
parser.add_argument("--alpha", type=float, help="moving average", default=1.0)
parser.add_argument("--alpha-decay", type=float, help="decay factor of alpha", default=0.5)
parser.add_argument("--alpha-decay-epoch", type=str, help="epoch of alpha decay", default='800')
parser.add_argument("--log", type=str, help="dir of the log file", default='train_cifar100.log')
parser.add_argument("--classes", type=int, help="number of classes", default=20)
parser.add_argument("--iterations", type=int, help="number of local epochs", default=50)
parser.add_argument("--aggregation", type=str, help="aggregation method", default='mean')
parser.add_argument("--nbyz", type=int, help="number of Byzantine workers", default=0)
parser.add_argument("--trim", type=int, help="number of trimmed workers on one side", default=0)
# parser.add_argument("--lr-decay", type=float, help="lr decay rate", default=0.1)
# parser.add_argument("--lr-decay-epoch", type=str, help="lr decay epoch", default='400')
parser.add_argument("--iid", type=int, help="IID setting", default=0)
parser.add_argument("--model", type=str, help="model", default='mobilenetv2_1.0')
parser.add_argument("--save", type=int, help="save", default=0)
parser.add_argument("--start-epoch", type=int, help="epoch start from", default=-1)
parser.add_argument("--seed", type=int, help="random seed", default=733)
args = parser.parse_args()
# print(args, flush=True)
filehandler = logging.FileHandler(args.log)
streamhandler = logging.StreamHandler()
if mpi_rank == 0:
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
mx.random.seed(args.seed + mpi_rank)
random.seed(args.seed + mpi_rank)
np.random.seed(args.seed + mpi_rank)
data_dir = os.path.join(args.dir, 'dataset_split_{}'.format(args.nsplit))
train_dir = os.path.join(data_dir, 'train')
# val_dir = os.path.join(data_dir, 'val')
val_train_dir = os.path.join(args.valdir, 'train')
val_val_dir = os.path.join(args.valdir, 'val')
training_files = []
for filename in sorted(listdir(train_dir)):
absolute_filename = os.path.join(train_dir, filename)
training_files.append(absolute_filename)
context = mx.cpu()
classes = args.classes
def get_train_batch(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_train_batch_byz(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(classes - 1 - L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(classes - 1 - L)
def get_val_train_batch(data_dir):
test_filename = os.path.join(data_dir, 'train_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_val_val_batch(data_dir):
test_filename = os.path.join(data_dir, 'val_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
train_data_list = []
for training_file in training_files:
[train_X, train_Y] = get_train_batch(training_file)
train_dataset = mx.gluon.data.dataset.ArrayDataset(train_X, train_Y)
train_data = gluon.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
train_data_list.append(train_data)
[val_train_X, val_train_Y] = get_val_train_batch(val_train_dir)
val_train_dataset = mx.gluon.data.dataset.ArrayDataset(val_train_X, val_train_Y)
val_train_data = gluon.data.DataLoader(val_train_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
[val_val_X, val_val_Y] = get_val_val_batch(val_val_dir)
val_val_dataset = mx.gluon.data.dataset.ArrayDataset(val_val_X, val_val_Y)
val_val_data = gluon.data.DataLoader(val_val_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
model_name = args.model
if model_name == 'default':
net = gluon.nn.Sequential()
with net.name_scope():
# First convolutional layer
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# Second convolutional layer
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Third convolutional layer
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Flatten and apply fullly connected layers
net.add(gluon.nn.Flatten())
# net.add(gluon.nn.Dense(512, activation="relu"))
# net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dropout(rate=0.25))
net.add(gluon.nn.Dense(classes))
else:
model_kwargs = {'ctx': context, 'pretrained': False, 'classes': classes}
net = get_model(model_name, **model_kwargs)
if model_name.startswith('cifar') or model_name == 'default':
net.initialize(mx.init.Xavier(), ctx=context)
else:
net.initialize(mx.init.MSRAPrelu(), ctx=context)
# # no weight decay
# for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
# v.wd_mult = 0.0
optimizer = 'sgd'
lr = args.lr
# optimizer_params = {'momentum': 0.9, 'learning_rate': lr, 'wd': 0.0001}
optimizer_params = {'momentum': 0.0, 'learning_rate': lr, 'wd': 0.0}
# lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
alpha_decay_epoch = [int(i) for i in args.alpha_decay_epoch.split(',')]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
loss_func = gluon.loss.SoftmaxCrossEntropyLoss()
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
train_cross_entropy = mx.metric.CrossEntropy()
# warmup
# print('warm up', flush=True)
trainer.set_learning_rate(0.01)
# train_data = random.choice(train_data_list)
train_data = train_data_list[90]
for local_epoch in range(5):
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
if args.start_epoch > 0:
break
if args.start_epoch > 0:
break
# # force initialization
# train_data = random.choice(train_data_list)
# for i, (data, label) in enumerate(train_data):
# outputs = net(data)
if mpi_rank == 0:
params_prev = [param.data().copy() for param in net.collect_params().values()]
else:
params_prev = None
nd.waitall()
# broadcast
params_prev = mpi_comm.bcast(params_prev, root=0)
for param, param_prev in zip(net.collect_params().values(), params_prev):
param.set_data(param_prev)
if mpi_rank == 0:
worker_list = list(range(mpi_size))
training_file_index_list = [i for i in range(len(training_files))]
alpha = args.alpha
randperm_choice_list = []
randperm_list = [i for i in range(args.nsplit)]
for i in range(int(math.ceil(args.epochs * mpi_size / args.nsplit))):
random.shuffle(randperm_list)
randperm_choice_list = randperm_choice_list + randperm_list
if args.start_epoch > 0:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (args.start_epoch))
net.load_parameters(filename, ctx=context)
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f'%(args.start_epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha))
nd.waitall()
time_0 = time.time()
for epoch in range(args.start_epoch+1, args.epochs):
# train_metric.reset()
# if epoch in lr_decay_epoch:
# lr = lr * args.lr_decay
if epoch in alpha_decay_epoch:
alpha = alpha * args.alpha_decay
tic = time.time()
if args.iid == 0:
if mpi_rank == 0:
training_file_index_sublist = randperm_choice_list[(mpi_size * epoch):(mpi_size * epoch + mpi_size)]
# logger.info(training_file_index_sublist)
else:
training_file_index_sublist = None
training_file_index = mpi_comm.scatter(training_file_index_sublist, root=0)
train_data = train_data_list[training_file_index]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
trainer.set_learning_rate(lr)
if alpha < 1:
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
param_prev[:] = param.data() * (1-alpha)
# select byz workers
if args.nbyz > 0:
if mpi_rank == 0:
random.shuffle(worker_list)
byz_worker_list = worker_list[0:args.nbyz]
else:
byz_worker_list = None
byz_worker_list = mpi_comm.bcast(byz_worker_list, root=0)
else:
byz_worker_list = []
if mpi_rank in byz_worker_list:
# byz worker
[byz_train_X, byz_train_Y] = get_train_batch_byz(random.choice(training_files))
byz_train_dataset = mx.gluon.data.dataset.ArrayDataset(byz_train_X, byz_train_Y)
byz_train_data = gluon.data.DataLoader(byz_train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
net.initialize(mx.init.MSRAPrelu(), ctx=context, force_reinit=True)
for local_epoch in range(args.iterations):
for i, (data, label) in enumerate(byz_train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
else:
# train
# local epoch
for local_epoch in range(args.iterations):
if args.iid == 1:
train_data = random.choice(train_data_list)
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
# aggregation
nd.waitall()
params_np = [param.data().copy().asnumpy() for param in net.collect_params().values()]
params_np_list = mpi_comm.gather(params_np, root=0)
if mpi_rank == 0:
n_params = len(params_np)
if args.aggregation == "trim" or args.trim > 0:
params_np = [ ( stats.trim_mean( np.stack( [params[j] for params in params_np_list], axis=0), args.trim/mpi_size, axis=0 ) ) for j in range(n_params) ]
else:
params_np = [ ( np.mean( np.stack( [params[j] for params in params_np_list], axis=0), axis=0 ) ) for j in range(n_params) ]
else:
params_np = None
params_np = mpi_comm.bcast(params_np, root=0)
params_nd = [ nd.array(param_np) for param_np in params_np ]
for param, param_nd in zip(net.collect_params().values(), params_nd):
param.set_data(param_nd)
if alpha < 1:
# moving average
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
weight = param.data()
weight[:] = weight * alpha + param_prev
# test
nd.waitall()
toc = time.time()
if ( epoch % args.interval == 0 or epoch == args.epochs-1 ) :
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f, time=%f, elapsed=%f'%(epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha, toc-tic, time.time()-time_0))
# logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f'%(epoch, top1, top5))
if args.save == 1:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (epoch))
net.save_parameters(filename)
nd.waitall()
|
[
"logging.getLogger",
"logging.StreamHandler",
"mxnet.autograd.record",
"mxnet.gluon.nn.Conv2D",
"mxnet.gluon.nn.BatchNorm",
"mxnet.init.Xavier",
"numpy.array",
"mxnet.gluon.nn.MaxPool2D",
"mxnet.gluon.nn.Sequential",
"mxnet.gluon.data.dataset.ArrayDataset",
"mxnet.gluon.nn.Flatten",
"mxnet.gluon.loss.SoftmaxCrossEntropyLoss",
"os.listdir",
"mxnet.metric.Accuracy",
"argparse.ArgumentParser",
"mxnet.nd.waitall",
"numpy.stack",
"logging.FileHandler",
"numpy.random.seed",
"mxnet.nd.array",
"mxnet.gluon.data.DataLoader",
"mxnet.nd.softmax",
"random.choice",
"random.shuffle",
"mxnet.gluon.nn.Dense",
"mxnet.metric.TopKAccuracy",
"os.path.splitext",
"pickle.load",
"mxnet.gluon.nn.Dropout",
"time.time",
"math.ceil",
"mxnet.cpu",
"os.path.join",
"mxnet.metric.CrossEntropy",
"random.seed",
"gluoncv.model_zoo.get_model",
"mxnet.random.seed",
"mxnet.init.MSRAPrelu"
] |
[((622, 647), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (645, 647), False, 'import argparse\n'), ((2619, 2648), 'logging.FileHandler', 'logging.FileHandler', (['args.log'], {}), '(args.log)\n', (2638, 2648), False, 'import argparse, time, logging, os, math, random\n'), ((2665, 2688), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2686, 2688), False, 'import argparse, time, logging, os, math, random\n'), ((2850, 2886), 'mxnet.random.seed', 'mx.random.seed', (['(args.seed + mpi_rank)'], {}), '(args.seed + mpi_rank)\n', (2864, 2886), True, 'import mxnet as mx\n'), ((2887, 2920), 'random.seed', 'random.seed', (['(args.seed + mpi_rank)'], {}), '(args.seed + mpi_rank)\n', (2898, 2920), False, 'import argparse, time, logging, os, math, random\n'), ((2921, 2957), 'numpy.random.seed', 'np.random.seed', (['(args.seed + mpi_rank)'], {}), '(args.seed + mpi_rank)\n', (2935, 2957), True, 'import numpy as np\n'), ((3045, 3076), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (3057, 3076), False, 'import argparse, time, logging, os, math, random\n'), ((3135, 3169), 'os.path.join', 'os.path.join', (['args.valdir', '"""train"""'], {}), "(args.valdir, 'train')\n", (3147, 3169), False, 'import argparse, time, logging, os, math, random\n'), ((3184, 3216), 'os.path.join', 'os.path.join', (['args.valdir', '"""val"""'], {}), "(args.valdir, 'val')\n", (3196, 3216), False, 'import argparse, time, logging, os, math, random\n'), ((3396, 3404), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (3402, 3404), True, 'import mxnet as mx\n'), ((5126, 5186), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['val_train_X', 'val_train_Y'], {}), '(val_train_X, val_train_Y)\n', (5160, 5186), True, 'import mxnet as mx\n'), ((5204, 5314), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['val_train_dataset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'last_batch': '"""keep"""', 'num_workers': '(1)'}), "(val_train_dataset, batch_size=1000, shuffle=False,\n last_batch='keep', num_workers=1)\n", (5225, 5314), False, 'from mxnet import gluon, nd\n'), ((5386, 5442), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['val_val_X', 'val_val_Y'], {}), '(val_val_X, val_val_Y)\n', (5420, 5442), True, 'import mxnet as mx\n'), ((5458, 5566), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['val_val_dataset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'last_batch': '"""keep"""', 'num_workers': '(1)'}), "(val_val_dataset, batch_size=1000, shuffle=False,\n last_batch='keep', num_workers=1)\n", (5479, 5566), False, 'from mxnet import gluon, nd\n'), ((8114, 8150), 'mxnet.gluon.loss.SoftmaxCrossEntropyLoss', 'gluon.loss.SoftmaxCrossEntropyLoss', ([], {}), '()\n', (8148, 8150), False, 'from mxnet import gluon, nd\n'), ((8167, 8187), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (8185, 8187), True, 'import mxnet as mx\n'), ((8200, 8220), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (8218, 8220), True, 'import mxnet as mx\n'), ((8232, 8257), 'mxnet.metric.TopKAccuracy', 'mx.metric.TopKAccuracy', (['(5)'], {}), '(5)\n', (8254, 8257), True, 'import mxnet as mx\n'), ((8280, 8304), 'mxnet.metric.CrossEntropy', 'mx.metric.CrossEntropy', ([], {}), '()\n', (8302, 8304), True, 'import mxnet as mx\n'), ((9074, 9086), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (9084, 9086), False, 'from mxnet import gluon, nd\n'), ((10845, 10857), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (10855, 10857), False, 'from mxnet import gluon, nd\n'), ((10868, 10879), 'time.time', 'time.time', ([], {}), '()\n', (10877, 10879), False, 'import argparse, time, logging, os, math, random\n'), ((2721, 2742), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (2738, 2742), False, 'import argparse, time, logging, os, math, random\n'), ((3261, 3279), 'os.listdir', 'listdir', (['train_dir'], {}), '(train_dir)\n', (3268, 3279), False, 'from os import listdir\n'), ((3306, 3339), 'os.path.join', 'os.path.join', (['train_dir', 'filename'], {}), '(train_dir, filename)\n', (3318, 3339), False, 'import argparse, time, logging, os, math, random\n'), ((4051, 4107), 'os.path.join', 'os.path.join', (['data_dir', "('train_data_%03d.pkl' % mpi_rank)"], {}), "(data_dir, 'train_data_%03d.pkl' % mpi_rank)\n", (4063, 4107), False, 'import argparse, time, logging, os, math, random\n'), ((4394, 4448), 'os.path.join', 'os.path.join', (['data_dir', "('val_data_%03d.pkl' % mpi_rank)"], {}), "(data_dir, 'val_data_%03d.pkl' % mpi_rank)\n", (4406, 4448), False, 'import argparse, time, logging, os, math, random\n'), ((4816, 4868), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['train_X', 'train_Y'], {}), '(train_X, train_Y)\n', (4850, 4868), True, 'import mxnet as mx\n'), ((4886, 5006), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'last_batch': '"""rollover"""', 'num_workers': '(1)'}), "(train_dataset, batch_size=args.batchsize, shuffle=\n True, last_batch='rollover', num_workers=1)\n", (4907, 5006), False, 'from mxnet import gluon, nd\n'), ((5627, 5648), 'mxnet.gluon.nn.Sequential', 'gluon.nn.Sequential', ([], {}), '()\n', (5646, 5648), False, 'from mxnet import gluon, nd\n'), ((7385, 7422), 'gluoncv.model_zoo.get_model', 'get_model', (['model_name'], {}), '(model_name, **model_kwargs)\n', (7394, 7422), False, 'from gluoncv.model_zoo import get_model\n'), ((9551, 9580), 'random.shuffle', 'random.shuffle', (['randperm_list'], {}), '(randperm_list)\n', (9565, 9580), False, 'import argparse, time, logging, os, math, random\n'), ((9696, 9722), 'os.path.splitext', 'os.path.splitext', (['args.log'], {}), '(args.log)\n', (9712, 9722), False, 'import argparse, time, logging, os, math, random\n'), ((11142, 11153), 'time.time', 'time.time', ([], {}), '()\n', (11151, 11153), False, 'import argparse, time, logging, os, math, random\n'), ((13671, 13683), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (13681, 13683), False, 'from mxnet import gluon, nd\n'), ((14878, 14890), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (14888, 14890), False, 'from mxnet import gluon, nd\n'), ((14906, 14917), 'time.time', 'time.time', ([], {}), '()\n', (14915, 14917), False, 'import argparse, time, logging, os, math, random\n'), ((16547, 16559), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (16557, 16559), False, 'from mxnet import gluon, nd\n'), ((3524, 3538), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3535, 3538), False, 'import pickle\n'), ((3684, 3695), 'mxnet.nd.array', 'nd.array', (['L'], {}), '(L)\n', (3692, 3695), False, 'from mxnet import gluon, nd\n'), ((3795, 3809), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3806, 3809), False, 'import pickle\n'), ((3969, 3994), 'mxnet.nd.array', 'nd.array', (['(classes - 1 - L)'], {}), '(classes - 1 - L)\n', (3977, 3994), False, 'from mxnet import gluon, nd\n'), ((4164, 4178), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4175, 4178), False, 'import pickle\n'), ((4328, 4339), 'mxnet.nd.array', 'nd.array', (['L'], {}), '(L)\n', (4336, 4339), False, 'from mxnet import gluon, nd\n'), ((4505, 4519), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4516, 4519), False, 'import pickle\n'), ((4669, 4680), 'mxnet.nd.array', 'nd.array', (['L'], {}), '(L)\n', (4677, 4680), False, 'from mxnet import gluon, nd\n'), ((7505, 7521), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {}), '()\n', (7519, 7521), True, 'import mxnet as mx\n'), ((7561, 7580), 'mxnet.init.MSRAPrelu', 'mx.init.MSRAPrelu', ([], {}), '()\n', (7578, 7580), True, 'import mxnet as mx\n'), ((9496, 9543), 'math.ceil', 'math.ceil', (['(args.epochs * mpi_size / args.nsplit)'], {}), '(args.epochs * mpi_size / args.nsplit)\n', (9505, 9543), False, 'import argparse, time, logging, os, math, random\n'), ((10521, 10540), 'numpy.array', 'np.array', (['top1_list'], {}), '(top1_list)\n', (10529, 10540), True, 'import numpy as np\n'), ((10561, 10580), 'numpy.array', 'np.array', (['top5_list'], {}), '(top5_list)\n', (10569, 10580), True, 'import numpy as np\n'), ((10609, 10636), 'numpy.array', 'np.array', (['crossentropy_list'], {}), '(crossentropy_list)\n', (10617, 10636), True, 'import numpy as np\n'), ((12496, 12556), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['byz_train_X', 'byz_train_Y'], {}), '(byz_train_X, byz_train_Y)\n', (12530, 12556), True, 'import mxnet as mx\n'), ((12586, 12710), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['byz_train_dataset'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'last_batch': '"""rollover"""', 'num_workers': '(1)'}), "(byz_train_dataset, batch_size=args.batchsize, shuffle\n =True, last_batch='rollover', num_workers=1)\n", (12607, 12710), False, 'from mxnet import gluon, nd\n'), ((14408, 14426), 'mxnet.nd.array', 'nd.array', (['param_np'], {}), '(param_np)\n', (14416, 14426), False, 'from mxnet import gluon, nd\n'), ((3656, 3667), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (3664, 3667), False, 'from mxnet import gluon, nd\n'), ((3941, 3952), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (3949, 3952), False, 'from mxnet import gluon, nd\n'), ((4300, 4311), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (4308, 4311), False, 'from mxnet import gluon, nd\n'), ((4641, 4652), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (4649, 4652), False, 'from mxnet import gluon, nd\n'), ((5729, 5807), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(64)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=64, kernel_size=3, padding=(1, 1), activation='relu')\n", (5744, 5807), False, 'from mxnet import gluon, nd\n'), ((5824, 5844), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (5842, 5844), False, 'from mxnet import gluon, nd\n'), ((5862, 5940), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(64)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=64, kernel_size=3, padding=(1, 1), activation='relu')\n", (5877, 5940), False, 'from mxnet import gluon, nd\n'), ((5957, 5977), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (5975, 5977), False, 'from mxnet import gluon, nd\n'), ((5995, 6037), 'mxnet.gluon.nn.MaxPool2D', 'gluon.nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (6013, 6037), False, 'from mxnet import gluon, nd\n'), ((6055, 6082), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (6071, 6082), False, 'from mxnet import gluon, nd\n'), ((6236, 6315), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(128)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=128, kernel_size=3, padding=(1, 1), activation='relu')\n", (6251, 6315), False, 'from mxnet import gluon, nd\n'), ((6332, 6352), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (6350, 6352), False, 'from mxnet import gluon, nd\n'), ((6370, 6449), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(128)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=128, kernel_size=3, padding=(1, 1), activation='relu')\n", (6385, 6449), False, 'from mxnet import gluon, nd\n'), ((6466, 6486), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (6484, 6486), False, 'from mxnet import gluon, nd\n'), ((6504, 6546), 'mxnet.gluon.nn.MaxPool2D', 'gluon.nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (6522, 6546), False, 'from mxnet import gluon, nd\n'), ((6564, 6591), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (6580, 6591), False, 'from mxnet import gluon, nd\n'), ((7014, 7032), 'mxnet.gluon.nn.Flatten', 'gluon.nn.Flatten', ([], {}), '()\n', (7030, 7032), False, 'from mxnet import gluon, nd\n'), ((7166, 7204), 'mxnet.gluon.nn.Dense', 'gluon.nn.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (7180, 7204), False, 'from mxnet import gluon, nd\n'), ((7222, 7249), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (7238, 7249), False, 'from mxnet import gluon, nd\n'), ((7267, 7290), 'mxnet.gluon.nn.Dense', 'gluon.nn.Dense', (['classes'], {}), '(classes)\n', (7281, 7290), False, 'from mxnet import gluon, nd\n'), ((8550, 8561), 'mxnet.autograd.record', 'ag.record', ([], {}), '()\n', (8559, 8561), True, 'from mxnet import autograd as ag\n'), ((10195, 10214), 'mxnet.nd.softmax', 'nd.softmax', (['outputs'], {}), '(outputs)\n', (10205, 10214), False, 'from mxnet import gluon, nd\n'), ((12045, 12072), 'random.shuffle', 'random.shuffle', (['worker_list'], {}), '(worker_list)\n', (12059, 12072), False, 'import argparse, time, logging, os, math, random\n'), ((12433, 12462), 'random.choice', 'random.choice', (['training_files'], {}), '(training_files)\n', (12446, 12462), False, 'import argparse, time, logging, os, math, random\n'), ((12733, 12752), 'mxnet.init.MSRAPrelu', 'mx.init.MSRAPrelu', ([], {}), '()\n', (12750, 12752), True, 'import mxnet as mx\n'), ((15821, 15840), 'numpy.array', 'np.array', (['top1_list'], {}), '(top1_list)\n', (15829, 15840), True, 'import numpy as np\n'), ((15869, 15888), 'numpy.array', 'np.array', (['top5_list'], {}), '(top5_list)\n', (15877, 15888), True, 'import numpy as np\n'), ((15925, 15952), 'numpy.array', 'np.array', (['crossentropy_list'], {}), '(crossentropy_list)\n', (15933, 15952), True, 'import numpy as np\n'), ((13314, 13344), 'random.choice', 'random.choice', (['train_data_list'], {}), '(train_data_list)\n', (13327, 13344), False, 'import argparse, time, logging, os, math, random\n'), ((15431, 15450), 'mxnet.nd.softmax', 'nd.softmax', (['outputs'], {}), '(outputs)\n', (15441, 15450), False, 'from mxnet import gluon, nd\n'), ((16385, 16411), 'os.path.splitext', 'os.path.splitext', (['args.log'], {}), '(args.log)\n', (16401, 16411), False, 'import argparse, time, logging, os, math, random\n'), ((12933, 12944), 'mxnet.autograd.record', 'ag.record', ([], {}), '()\n', (12942, 12944), True, 'from mxnet import autograd as ag\n'), ((13433, 13444), 'mxnet.autograd.record', 'ag.record', ([], {}), '()\n', (13442, 13444), True, 'from mxnet import autograd as ag\n'), ((14012, 14070), 'numpy.stack', 'np.stack', (['[params[j] for params in params_np_list]'], {'axis': '(0)'}), '([params[j] for params in params_np_list], axis=0)\n', (14020, 14070), True, 'import numpy as np\n'), ((14190, 14248), 'numpy.stack', 'np.stack', (['[params[j] for params in params_np_list]'], {'axis': '(0)'}), '([params[j] for params in params_np_list], axis=0)\n', (14198, 14248), True, 'import numpy as np\n'), ((16187, 16198), 'time.time', 'time.time', ([], {}), '()\n', (16196, 16198), False, 'import argparse, time, logging, os, math, random\n')]
|
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from multiprocessing import Pool
import pickle
import time
import numpy as np
import torch
from scipy.stats import norm
from collections import OrderedDict
import plotting as plg
import utils.model_utils as mutils
import utils.exp_utils as utils
def get_mirrored_patch_crops(patch_crops, org_img_shape):
mirrored_patch_crops = []
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2],
ii[3], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
return mirrored_patch_crops
def get_mirrored_patch_crops_ax_dep(patch_crops, org_img_shape, mirror_axes):
mirrored_patch_crops = []
for ax_ix, axes in enumerate(mirror_axes):
if isinstance(axes, (int, float)) and int(axes) == 0:
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0],
ii[2], ii[3], ii[4], ii[5]]
for ii in patch_crops])
elif isinstance(axes, (int, float)) and int(axes) == 1:
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
elif hasattr(axes, "__iter__") and (tuple(axes) == (0, 1) or tuple(axes) == (1, 0)):
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
else:
raise Exception("invalid mirror axes {} in get mirrored patch crops".format(axes))
return mirrored_patch_crops
def apply_wbc_to_patient(inputs):
"""
wrapper around prediction box consolidation: weighted box clustering (wbc). processes a single patient.
loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes,
aggregates and stores results in new list.
:return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D
predictions, and a dummy batch dimension of 1 for 3D predictions.
:return. pid: string. patient id.
"""
regress_flag, in_patient_results_list, pid, class_dict, clustering_iou, n_ens = inputs
out_patient_results_list = [[] for _ in range(len(in_patient_results_list))]
for bix, b in enumerate(in_patient_results_list):
for cl in list(class_dict.keys()):
boxes = [(ix, box) for ix, box in enumerate(b) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([b[1]['box_coords'] for b in boxes])
box_scores = np.array([b[1]['box_score'] for b in boxes])
box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes])
box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes])
try:
box_patch_id = np.array([b[1]['patch_id'] for b in boxes])
except KeyError: #backward compatibility for already saved pred results ... omg
box_patch_id = np.array([b[1]['ens_ix'] for b in boxes])
box_regressions = np.array([b[1]['regression'] for b in boxes]) if regress_flag else None
box_rg_bins = np.array([b[1]['rg_bin'] if 'rg_bin' in b[1].keys() else float('NaN') for b in boxes])
box_rg_uncs = np.array([b[1]['rg_uncertainty'] if 'rg_uncertainty' in b[1].keys() else float('NaN') for b in boxes])
if 0 not in box_scores.shape:
keep_scores, keep_coords, keep_n_missing, keep_regressions, keep_rg_bins, keep_rg_uncs = \
weighted_box_clustering(box_coords, box_scores, box_center_factor, box_n_overlaps, box_rg_bins, box_rg_uncs,
box_regressions, box_patch_id, clustering_iou, n_ens)
for boxix in range(len(keep_scores)):
clustered_box = {'box_type': 'det', 'box_coords': keep_coords[boxix],
'box_score': keep_scores[boxix], 'cluster_n_missing': keep_n_missing[boxix],
'box_pred_class_id': cl}
if regress_flag:
clustered_box.update({'regression': keep_regressions[boxix],
'rg_uncertainty': keep_rg_uncs[boxix],
'rg_bin': keep_rg_bins[boxix]})
out_patient_results_list[bix].append(clustered_box)
# add gt boxes back to new output list.
out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt'])
return [out_patient_results_list, pid]
def weighted_box_clustering(box_coords, scores, box_pc_facts, box_n_ovs, box_rg_bins, box_rg_uncs,
box_regress, box_patch_id, thresh, n_ens):
"""Consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling.
clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the
average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered
its position within the patch is) and the size of the corresponding box.
The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position
(1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique
patches in the cluster, which did not contribute any predict any boxes.
:param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs).
:param box_coords: y1, x1, y2, x2, (z1), (z2).
:param scores: confidence scores.
:param box_pc_facts: patch-center factors from position on patch tiles.
:param box_n_ovs: number of patch overlaps at box position.
:param box_rg_bins: regression bin predictions.
:param box_rg_uncs: (n_dets,) regression uncertainties (from model mrcnn_aleatoric).
:param box_regress: (n_dets, n_regression_features).
:param box_patch_id: ensemble index.
:param thresh: threshold for iou_matching.
:param n_ens: number of models, that are ensembled. (-> number of expected predictions per position).
:return: keep_scores: (n_keep) new scores of boxes to be kept.
:return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept.
"""
dim = 2 if box_coords.shape[1] == 4 else 3
y1 = box_coords[:,0]
x1 = box_coords[:,1]
y2 = box_coords[:,2]
x2 = box_coords[:,3]
areas = (y2 - y1 + 1) * (x2 - x1 + 1)
if dim == 3:
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
areas *= (z2 - z1 + 1)
# order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24)
order = scores.argsort()[::-1]
keep_scores = []
keep_coords = []
keep_n_missing = []
keep_regress = []
keep_rg_bins = []
keep_rg_uncs = []
while order.size > 0:
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order])
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
inter = w * h
if dim == 3:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0, zz2 - zz1 + 1)
inter *= d
# overlap between currently highest scoring box and all boxes.
ovr = inter / (areas[i] + areas[order] - inter)
ovr_fl = inter.astype('float64') / (areas[i] + areas[order] - inter.astype('float64'))
assert np.all(ovr==ovr_fl), "ovr {}\n ovr_float {}".format(ovr, ovr_fl)
# get all the predictions that match the current box to build one cluster.
matches = np.nonzero(ovr > thresh)[0]
match_n_ovs = box_n_ovs[order[matches]]
match_pc_facts = box_pc_facts[order[matches]]
match_patch_id = box_patch_id[order[matches]]
match_ov_facts = ovr[matches]
match_areas = areas[order[matches]]
match_scores = scores[order[matches]]
# weight all scores in cluster by patch factors, and size.
match_score_weights = match_ov_facts * match_areas * match_pc_facts
match_scores *= match_score_weights
# for the weighted average, scores have to be divided by the number of total expected preds at the position
# of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is
# multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be
# in areas of different overlaps).
n_expected_preds = n_ens * np.mean(match_n_ovs)
# the number of missing predictions is obtained as the number of patches,
# which did not contribute any prediction to the current cluster.
n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0]))
# missing preds are given the mean weighting
# (expected prediction is the mean over all predictions in cluster).
denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights)
# compute weighted average score for the cluster
avg_score = np.sum(match_scores) / denom
# compute weighted average of coordinates for the cluster. now only take existing
# predictions into account.
avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)]
if dim == 3:
avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores))
avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores))
if box_regress is not None:
# compute wt. avg. of regression vectors (component-wise average)
avg_regress = np.sum(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0) / np.sum(
match_scores)
avg_rg_bins = np.round(np.sum(box_rg_bins[order[matches]] * match_scores) / np.sum(match_scores))
avg_rg_uncs = np.sum(box_rg_uncs[order[matches]] * match_scores) / np.sum(match_scores)
else:
avg_regress = np.array(float('NaN'))
avg_rg_bins = np.array(float('NaN'))
avg_rg_uncs = np.array(float('NaN'))
# some clusters might have very low scores due to high amounts of missing predictions.
# filter out the with a conservative threshold, to speed up evaluation.
if avg_score > 0.01:
keep_scores.append(avg_score)
keep_coords.append(avg_coords)
keep_n_missing.append((n_missing_preds / n_expected_preds * 100)) # relative
keep_regress.append(avg_regress)
keep_rg_uncs.append(avg_rg_uncs)
keep_rg_bins.append(avg_rg_bins)
# get index of all elements that were not matched and discard all others.
inds = np.nonzero(ovr <= thresh)[0]
inds_where = np.where(ovr<=thresh)[0]
assert np.all(inds == inds_where), "inds_nonzero {} \ninds_where {}".format(inds, inds_where)
order = order[inds]
return keep_scores, keep_coords, keep_n_missing, keep_regress, keep_rg_bins, keep_rg_uncs
def apply_nms_to_patient(inputs):
in_patient_results_list, pid, class_dict, iou_thresh = inputs
out_patient_results_list = []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch in in_patient_results_list:
batch_el_boxes = []
for cl in list(class_dict.keys()):
det_boxes = [box for box in batch if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([box['box_coords'] for box in det_boxes])
box_scores = np.array([box['box_score'] for box in det_boxes])
if 0 not in box_scores.shape:
keep_ix = mutils.nms_numpy(box_coords, box_scores, iou_thresh)
else:
keep_ix = []
batch_el_boxes += [det_boxes[ix] for ix in keep_ix]
batch_el_boxes += [box for box in batch if box['box_type'] == 'gt']
out_patient_results_list.append(batch_el_boxes)
assert len(in_patient_results_list) == len(out_patient_results_list), "batch dim needs to be maintained, in: {}, out {}".format(len(in_patient_results_list), len(out_patient_results_list))
return [out_patient_results_list, pid]
def nms_2to3D(dets, thresh):
"""
Merges 2D boxes to 3D cubes. For this purpose, boxes of all slices are regarded as lying in one slice.
An adaptation of Non-maximum suppression is applied where clusters are found (like in NMS) with the extra constraint
that suppressed boxes have to have 'connected' z coordinates w.r.t the core slice (cluster center, highest
scoring box, the prevailing box). 'connected' z-coordinates are determined
as the z-coordinates with predictions until the first coordinate for which no prediction is found.
example: a cluster of predictions was found overlap > iou thresh in xy (like NMS). The z-coordinate of the highest
scoring box is 50. Other predictions have 23, 46, 48, 49, 51, 52, 53, 56, 57.
Only the coordinates connected with 50 are clustered to one cube: 48, 49, 51, 52, 53. (46 not because nothing was
found in 47, so 47 is a 'hole', which interrupts the connection). Only the boxes corresponding to these coordinates
are suppressed. All others are kept for building of further clusters.
This algorithm works better with a certain min_confidence of predictions, because low confidence (e.g. noisy/cluttery)
predictions can break the relatively strong assumption of defining cubes' z-boundaries at the first 'hole' in the cluster.
:param dets: (n_detections, (y1, x1, y2, x2, scores, slice_id)
:param thresh: iou matchin threshold (like in NMS).
:return: keep: (n_keep,) 1D tensor of indices to be kept.
:return: keep_z: (n_keep, [z1, z2]) z-coordinates to be added to boxes, which are kept in order to form cubes.
"""
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
where maximum is taken needs to be the lower coordinate"""
scores = dets[:, -2]
slice_id = dets[:, -1]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
keep_z = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself: okay?
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1 + 1)
w = np.maximum(0.0, xx2 - xx1 + 1)
inter = h * w
iou = inter / (areas[i] + areas[order] - inter)
matches = np.argwhere(
iou > thresh) # get all the elements that match the current box and have a lower score
slice_ids = slice_id[order[matches]]
core_slice = slice_id[int(i)]
upper_holes = [ii for ii in np.arange(core_slice, np.max(slice_ids)) if ii not in slice_ids]
lower_holes = [ii for ii in np.arange(np.min(slice_ids), core_slice) if ii not in slice_ids]
max_valid_slice_id = np.min(upper_holes) if len(upper_holes) > 0 else np.max(slice_ids)
min_valid_slice_id = np.max(lower_holes) if len(lower_holes) > 0 else np.min(slice_ids)
z_matches = matches[(slice_ids <= max_valid_slice_id) & (slice_ids >= min_valid_slice_id)]
# expand by one z voxel since box content is surrounded w/o overlap, i.e., z-content computed as z2-z1
z1 = np.min(slice_id[order[z_matches]]) - 1
z2 = np.max(slice_id[order[z_matches]]) + 1
keep.append(i)
keep_z.append([z1, z2])
order = np.delete(order, z_matches, axis=0)
return keep, keep_z
def apply_2d_3d_merging_to_patient(inputs):
"""
wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension)
and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression
(Detailed methodology is described in nms_2to3D).
:return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]].
:return. pid: string. patient id.
"""
in_patient_results_list, pid, class_dict, merge_3D_iou = inputs
out_patient_results_list = []
for cl in list(class_dict.keys()):
det_boxes, slice_ids = [], []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch_ix, batch in enumerate(in_patient_results_list):
batch_element_det_boxes = [(ix, box) for ix, box in enumerate(batch) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
det_boxes += batch_element_det_boxes
slice_ids += [batch_ix] * len(batch_element_det_boxes)
box_coords = np.array([batch[1]['box_coords'] for batch in det_boxes])
box_scores = np.array([batch[1]['box_score'] for batch in det_boxes])
slice_ids = np.array(slice_ids)
if 0 not in box_scores.shape:
keep_ix, keep_z = nms_2to3D(
np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou)
else:
keep_ix, keep_z = [], []
# store kept predictions in new results list and add corresponding z-dimension info to coordinates.
for kix, kz in zip(keep_ix, keep_z):
keep_box = det_boxes[kix][1]
keep_box['box_coords'] = list(keep_box['box_coords']) + kz
out_patient_results_list.append(keep_box)
gt_boxes = [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
assert np.all([len(box["box_coords"]) == 6 for box in gt_boxes]), "expanded preds to 3D but GT is 2D."
out_patient_results_list += gt_boxes
return [[out_patient_results_list], pid] # additional list wrapping is extra batch dim.
class Predictor:
"""
Prediction pipeline:
- receives a patched patient image (n_patches, c, y, x, (z)) from patient data loader.
- forwards patches through model in chunks of batch_size. (method: batch_tiling_forward)
- unmolds predictions (boxes and segmentations) to original patient coordinates. (method: spatial_tiling_forward)
Ensembling (mode == 'test'):
- for inference, forwards 4 mirrored versions of image to through model and unmolds predictions afterwards
accordingly (method: data_aug_forward)
- for inference, loads multiple parameter-sets of the trained model corresponding to different epochs. for each
parameter-set loops over entire test set, runs prediction pipeline for each patient. (method: predict_test_set)
Consolidation of predictions:
- consolidates a patient's predictions (boxes, segmentations) collected over patches, data_aug- and temporal ensembling,
performs clustering and weighted averaging (external function: apply_wbc_to_patient) to obtain consistent outptus.
- for 2D networks, consolidates box predictions to 3D cubes via clustering (adaption of non-maximum surpression).
(external function: apply_2d_3d_merging_to_patient)
Ground truth handling:
- dissmisses any ground truth boxes returned by the model (happens in validation mode, patch-based groundtruth)
- if provided by data loader, adds patient-wise ground truth to the final predictions to be passed to the evaluator.
"""
def __init__(self, cf, net, logger, mode):
self.cf = cf
self.batch_size = cf.batch_size
self.logger = logger
self.mode = mode
self.net = net
self.n_ens = 1
self.rank_ix = '0'
self.regress_flag = any(['regression' in task for task in self.cf.prediction_tasks])
if self.cf.merge_2D_to_3D_preds:
assert self.cf.dim == 2, "Merge 2Dto3D only valid for 2D preds, but current dim is {}.".format(self.cf.dim)
if self.mode == 'test':
last_state_path = os.path.join(self.cf.fold_dir, 'last_state.pth')
try:
self.model_index = torch.load(last_state_path)["model_index"]
self.model_index = self.model_index[self.model_index["rank"] <= self.cf.test_n_epochs]
except FileNotFoundError:
raise FileNotFoundError('no last_state/model_index file in fold directory. '
'seems like you are trying to run testing without prior training...')
self.n_ens = cf.test_n_epochs
if self.cf.test_aug_axes is not None:
self.n_ens *= (len(self.cf.test_aug_axes)+1)
self.example_plot_dir = os.path.join(cf.test_dir, "example_plots")
os.makedirs(self.example_plot_dir, exist_ok=True)
def batch_tiling_forward(self, batch):
"""
calls the actual network forward method. in patch-based prediction, the batch dimension might be overladed
with n_patches >> batch_size, which would exceed gpu memory. In this case, batches are processed in chunks of
batch_size. validation mode calls the train method to monitor losses (returned ground truth objects are discarded).
test mode calls the test forward method, no ground truth required / involved.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
img = batch['data']
if img.shape[0] <= self.batch_size:
if self.mode == 'val':
# call training method to monitor losses
results_dict = self.net.train_forward(batch, is_validation=True)
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
elif self.mode == 'test':
results_dict = self.net.test_forward(batch, return_masks=self.cf.return_masks_in_test)
else: # needs batch tiling
split_ixs = np.split(np.arange(img.shape[0]), np.arange(img.shape[0])[::self.batch_size])
chunk_dicts = []
for chunk_ixs in split_ixs[1:]: # first split is elements before 0, so empty
b = {k: batch[k][chunk_ixs] for k in batch.keys()
if (isinstance(batch[k], np.ndarray) and batch[k].shape[0] == img.shape[0])}
if self.mode == 'val':
chunk_dicts += [self.net.train_forward(b, is_validation=True)]
else:
chunk_dicts += [self.net.test_forward(b, return_masks=self.cf.return_masks_in_test)]
results_dict = {}
# flatten out batch elements from chunks ([chunk, chunk] -> [b, b, b, b, ...])
results_dict['boxes'] = [item for d in chunk_dicts for item in d['boxes']]
results_dict['seg_preds'] = np.array([item for d in chunk_dicts for item in d['seg_preds']])
if self.mode == 'val':
# if hasattr(self.cf, "losses_to_monitor"):
# loss_names = self.cf.losses_to_monitor
# else:
# loss_names = {name for dic in chunk_dicts for name in dic if 'loss' in name}
# estimate patient loss by mean over batch_chunks. Most similar to training loss.
results_dict['torch_loss'] = torch.mean(torch.cat([d['torch_loss'] for d in chunk_dicts]))
results_dict['class_loss'] = np.mean([d['class_loss'] for d in chunk_dicts])
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
return results_dict
def spatial_tiling_forward(self, batch, patch_crops = None, n_aug='0'):
"""
forwards batch to batch_tiling_forward method and receives and returns a dictionary with results.
if patch-based prediction, the results received from batch_tiling_forward will be on a per-patch-basis.
this method uses the provided patch_crops to re-transform all predictions to whole-image coordinates.
Patch-origin information of all box-predictions will be needed for consolidation, hence it is stored as
'patch_id', which is a unique string for each patch (also takes current data aug and temporal epoch instances
into account). all box predictions get additional information about the amount overlapping patches at the
respective position (used for consolidation).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- monitor_values (only in validation mode)
returned dict is a flattened version with 1 batch instance (3D) or slices (2D)
"""
if patch_crops is not None:
#print("patch_crops not None, applying patch center factor")
patches_dict = self.batch_tiling_forward(batch)
results_dict = {'boxes': [[] for _ in range(batch['original_img_shape'][0])]}
#bc of ohe--> channel dim of seg has size num_classes
out_seg_shape = list(batch['original_img_shape'])
out_seg_shape[1] = patches_dict["seg_preds"].shape[1]
out_seg_preds = np.zeros(out_seg_shape, dtype=np.float16)
patch_overlap_map = np.zeros_like(out_seg_preds, dtype='uint8')
for pix, pc in enumerate(patch_crops):
if self.cf.dim == 3:
out_seg_preds[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += patches_dict['seg_preds'][pix]
patch_overlap_map[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += 1
elif self.cf.dim == 2:
out_seg_preds[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += patches_dict['seg_preds'][pix]
patch_overlap_map[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += 1
out_seg_preds[patch_overlap_map > 0] /= patch_overlap_map[patch_overlap_map > 0]
results_dict['seg_preds'] = out_seg_preds
for pix, pc in enumerate(patch_crops):
patch_boxes = patches_dict['boxes'][pix]
for box in patch_boxes:
# add unique patch id for consolidation of predictions.
box['patch_id'] = self.rank_ix + '_' + n_aug + '_' + str(pix)
# boxes from the edges of a patch have a lower prediction quality, than the ones at patch-centers.
# hence they will be down-weighted for consolidation, using the 'box_patch_center_factor', which is
# obtained by a gaussian distribution over positions in the patch and average over spatial dimensions.
# Also the info 'box_n_overlaps' is stored for consolidation, which represents the amount of
# overlapping patches at the box's position.
c = box['box_coords']
#box_centers = np.array([(c[ii] + c[ii+2])/2 for ii in range(len(c)//2)])
box_centers = [(c[ii] + c[ii + 2]) / 2 for ii in range(2)]
if self.cf.dim == 3:
box_centers.append((c[4] + c[5]) / 2)
box['box_patch_center_factor'] = np.mean(
[norm.pdf(bc, loc=pc, scale=pc * 0.8) * np.sqrt(2 * np.pi) * pc * 0.8 for bc, pc in
zip(box_centers, np.array(self.cf.patch_size) / 2)])
if self.cf.dim == 3:
c += np.array([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])
int_c = [int(np.floor(ii)) if ix%2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]])
results_dict['boxes'][0].append(box)
else:
c += np.array([pc[0], pc[2], pc[0], pc[2]])
int_c = [int(np.floor(ii)) if ix % 2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(
patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])
results_dict['boxes'][pc[4]].append(box)
if self.mode == 'val':
results_dict['torch_loss'] = patches_dict['torch_loss']
results_dict['class_loss'] = patches_dict['class_loss']
else:
results_dict = self.batch_tiling_forward(batch)
for b in results_dict['boxes']:
for box in b:
box['box_patch_center_factor'] = 1
box['box_n_overlaps'] = 1
box['patch_id'] = self.rank_ix + '_' + n_aug
return results_dict
def data_aug_forward(self, batch):
"""
in val_mode: passes batch through to spatial_tiling method without data_aug.
in test_mode: if cf.test_aug is set in configs, createst 4 mirrored versions of the input image,
passes all of them to the next processing step (spatial_tiling method) and re-transforms returned predictions
to original image version.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
patch_crops = batch['patch_crop_coords'] if self.patched_patient else None
results_list = [self.spatial_tiling_forward(batch, patch_crops)]
org_img_shape = batch['original_img_shape']
if self.mode == 'test' and self.cf.test_aug_axes is not None:
if isinstance(self.cf.test_aug_axes, (int, float)):
self.cf.test_aug_axes = (self.cf.test_aug_axes,)
#assert np.all(np.array(self.cf.test_aug_axes)<self.cf.dim), "test axes {} need to be spatial axes".format(self.cf.test_aug_axes)
if self.patched_patient:
# apply mirror transformations to patch-crop coordinates, for correct tiling in spatial_tiling method.
mirrored_patch_crops = get_mirrored_patch_crops_ax_dep(patch_crops, batch['original_img_shape'],
self.cf.test_aug_axes)
self.logger.info("mirrored patch crop coords for patched patient in test augs!")
else:
mirrored_patch_crops = [None] * 3
img = np.copy(batch['data'])
for n_aug, sp_axis in enumerate(self.cf.test_aug_axes):
#sp_axis = np.array(axis) #-2 #spatial axis index
axis = np.array(sp_axis)+2
if isinstance(sp_axis, (int, float)):
# mirroring along one axis at a time
batch['data'] = np.flip(img, axis=axis).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis+2]
coords[sp_axis+2] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(chunk_dict['seg_preds'], axis=axis)
elif hasattr(sp_axis, "__iter__") and tuple(sp_axis)==(0,1) or tuple(sp_axis)==(1,0):
#NEED: mirrored patch crops are given as [(y-axis), (x-axis), (y-,x-axis)], obey this order!
# mirroring along two axes at same time
batch['data'] = np.flip(np.flip(img, axis=axis[0]), axis=axis[1]).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis[0]] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]+2]
coords[sp_axis[0]+2] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]]
coords[sp_axis[1]] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]+2]
coords[sp_axis[1]+2] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(np.flip(chunk_dict['seg_preds'], axis=axis[0]), axis=axis[1]).copy()
else:
raise Exception("Invalid axis type {} in test augs".format(type(axis)))
results_list.append(chunk_dict)
batch['data'] = img
# aggregate all boxes/seg_preds per batch element from data_aug predictions.
results_dict = {}
results_dict['boxes'] = [[item for d in results_list for item in d['boxes'][batch_instance]]
for batch_instance in range(org_img_shape[0])]
# results_dict['seg_preds'] = np.array([[item for d in results_list for item in d['seg_preds'][batch_instance]]
# for batch_instance in range(org_img_shape[0])])
results_dict['seg_preds'] = np.stack([dic['seg_preds'] for dic in results_list], axis=1)
# needs segs probs in seg_preds entry:
results_dict['seg_preds'] = np.sum(results_dict['seg_preds'], axis=1) #add up seg probs from different augs per class
if self.mode == 'val':
results_dict['torch_loss'] = results_list[0]['torch_loss']
results_dict['class_loss'] = results_list[0]['class_loss']
return results_dict
def load_saved_predictions(self):
"""loads raw predictions saved by self.predict_test_set. aggregates and/or merges 2D boxes to 3D cubes for
evaluation (if model predicts 2D but evaluation is run in 3D), according to settings config.
:return: list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'batch_dices': dice scores as recorded in raw prediction results.
- 'seg_preds': not implemented yet. could replace dices by seg preds to have raw seg info available, however
would consume critically large memory amount. todo evaluation of instance/semantic segmentation.
"""
results_file = 'pred_results.pkl' if not self.cf.hold_out_test_set else 'pred_results_held_out.pkl'
if not self.cf.hold_out_test_set or not self.cf.ensemble_folds:
self.logger.info("loading saved predictions of fold {}".format(self.cf.fold))
with open(os.path.join(self.cf.fold_dir, results_file), 'rb') as handle:
results_list = pickle.load(handle)
box_results_list = [(res_dict["boxes"], pid) for res_dict, pid in results_list]
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor
self.logger.info('loaded raw test set predictions with n_patients = {} and n_ens = {}'.format(
len(results_list), self.n_ens))
else:
self.logger.info("loading saved predictions of hold-out test set")
fold_dirs = sorted([os.path.join(self.cf.exp_dir, f) for f in os.listdir(self.cf.exp_dir) if
os.path.isdir(os.path.join(self.cf.exp_dir, f)) and f.startswith("fold")])
results_list = []
folds_loaded = 0
for fold in range(self.cf.n_cv_splits):
fold_dir = os.path.join(self.cf.exp_dir, 'fold_{}'.format(fold))
if fold_dir in fold_dirs:
with open(os.path.join(fold_dir, results_file), 'rb') as handle:
fold_list = pickle.load(handle)
results_list += fold_list
folds_loaded += 1
else:
self.logger.info("Skipping fold {} since no saved predictions found.".format(fold))
box_results_list = []
for res_dict, pid in results_list: #without filtering gt out:
box_results_list.append((res_dict['boxes'], pid))
#it's usually not right to filter out gts here, is it?
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor * folds_loaded
# -------------- aggregation of boxes via clustering -----------------
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou {} and n_ens {} over {} patients'.format(
self.cf.clustering_iou, self.n_ens, len(box_results_list)))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii
in box_results_list]
del box_results_list
pool = Pool(processes=self.cf.n_workers)
box_results_list = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou {} over {} patients.'.format(
self.cf.clustering_iou, len(box_results_list)))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in box_results_list]
del box_results_list
box_results_list = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2Dto3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in box_results_list]
box_results_list = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
for ix in range(len(results_list)):
assert np.all(results_list[ix][1] == box_results_list[ix][1]), "pid mismatch between loaded and aggregated results"
results_list[ix][0]["boxes"] = box_results_list[ix][0]
return results_list # holds (results_dict, pid)
def predict_patient(self, batch):
"""
predicts one patient.
called either directly via loop over validation set in exec.py (mode=='val')
or from self.predict_test_set (mode=='test).
in val mode: adds 3D ground truth info to predictions and runs consolidation and 2Dto3D merging of predictions.
in test mode: returns raw predictions (ground truth addition, consolidation, 2D to 3D merging are
done in self.predict_test_set, because patient predictions across several epochs might be needed
to be collected first, in case of temporal ensembling).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
#if self.mode=="test":
# self.logger.info('predicting patient {} for fold {} '.format(np.unique(batch['pid']), self.cf.fold))
# True if patient is provided in patches and predictions need to be tiled.
self.patched_patient = 'patch_crop_coords' in list(batch.keys())
# forward batch through prediction pipeline.
results_dict = self.data_aug_forward(batch)
#has seg probs in entry 'seg_preds'
if self.mode == 'val':
for b in range(batch['patient_bb_target'].shape[0]):
for t in range(len(batch['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['patient_bb_target'][b][t],
'class_targets': batch['patient_class_targets'][b][t]}
for name in self.cf.roi_items:
gt_box.update({name : batch['patient_'+name][b][t]})
results_dict['boxes'][b].append(gt_box)
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
if self.patched_patient and self.cf.clustering == "wbc":
wbc_input = [self.regress_flag, results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou, self.n_ens]
results_dict['boxes'] = apply_wbc_to_patient(wbc_input)[0]
elif self.patched_patient:
nms_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou]
results_dict['boxes'] = apply_nms_to_patient(nms_inputs)[0]
if self.cf.merge_2D_to_3D_preds:
results_dict['2D_boxes'] = results_dict['boxes']
merge_dims_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.merge_3D_iou]
results_dict['boxes'] = apply_2d_3d_merging_to_patient(merge_dims_inputs)[0]
return results_dict
def predict_test_set(self, batch_gen, return_results=True):
"""
wrapper around test method, which loads multiple (or one) epoch parameters (temporal ensembling), loops through
the test set and collects predictions per patient. Also flattens the results per patient and epoch
and adds optional ground truth boxes for evaluation. Saves out the raw result list for later analysis and
optionally consolidates and returns predictions immediately.
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo evaluation of instance/semantic segmentation.
"""
# -------------- raw predicting -----------------
dict_of_patients_results = OrderedDict()
set_of_result_types = set()
self.model_index = self.model_index.sort_values(by="rank")
# get paths of all parameter sets to be loaded for temporal ensembling. (or just one for no temp. ensembling).
weight_paths = [os.path.join(self.cf.fold_dir, file_name) for file_name in self.model_index["file_name"]]
for rank_ix, weight_path in enumerate(weight_paths):
self.logger.info(('tmp ensembling over rank_ix:{} epoch:{}'.format(rank_ix, weight_path)))
self.net.load_state_dict(torch.load(weight_path))
self.net.eval()
self.rank_ix = str(rank_ix)
plot_batches = np.random.choice(np.arange(batch_gen['n_test']),
size=min(batch_gen['n_test'], self.cf.n_test_plots), replace=False)
with torch.no_grad():
for i in range(batch_gen['n_test']):
batch = next(batch_gen['test'])
pid = np.unique(batch['pid'])
assert len(pid)==1
pid = pid[0]
if not pid in dict_of_patients_results.keys(): # store batch info in patient entry of results dict.
dict_of_patients_results[pid] = {}
dict_of_patients_results[pid]['results_dicts'] = []
dict_of_patients_results[pid]['patient_bb_target'] = batch['patient_bb_target']
for name in self.cf.roi_items:
dict_of_patients_results[pid]["patient_"+name] = batch["patient_"+name]
stime = time.time()
results_dict = self.predict_patient(batch) #only holds "boxes", "seg_preds"
# needs ohe seg probs in seg_preds entry:
results_dict['seg_preds'] = np.argmax(results_dict['seg_preds'], axis=1)[:,np.newaxis]
print("\rpredicting patient {} with weight rank {} (progress: {}/{}) took {:.2f}s".format(
str(pid), rank_ix, (rank_ix)*batch_gen['n_test']+(i+1), len(weight_paths)*batch_gen['n_test'],
time.time()-stime), end="", flush=True)
if i in plot_batches and (not self.patched_patient or 'patient_data' in batch.keys()):
try:
# view qualitative results of random test case
out_file = os.path.join(self.example_plot_dir,
'batch_example_test_{}_rank_{}.png'.format(self.cf.fold, rank_ix))
utils.split_off_process(plg.view_batch, self.cf, batch, results_dict,
has_colorchannels=self.cf.has_colorchannels,
show_gt_labels=True, show_seg_ids='dice' in self.cf.metrics,
get_time="test-example plot", out_file=out_file)
except Exception as e:
self.logger.info("WARNING: error in view_batch: {}".format(e))
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class( results_dict['seg_preds'],
batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
dict_of_patients_results[pid]['results_dicts'].append({k:v for k,v in results_dict.items()
if k in ["boxes", "batch_dices"]})
# collect result types to know which ones to look for when saving
set_of_result_types.update(dict_of_patients_results[pid]['results_dicts'][-1].keys())
# -------------- re-order, save raw results -----------------
self.logger.info('finished predicting test set. starting aggregation of predictions.')
results_per_patient = []
for pid, p_dict in dict_of_patients_results.items():
# dict_of_patients_results[pid]['results_list'] has length batch['n_test']
results_dict = {}
# collect all boxes/seg_preds of same batch_instance over temporal instances.
b_size = len(p_dict['results_dicts'][0]["boxes"])
for res_type in [rtype for rtype in set_of_result_types if rtype in ["boxes", "batch_dices"]]:#, "seg_preds"]]:
if not 'batch' in res_type: #assume it's results on batch-element basis
results_dict[res_type] = [[item for rank_dict in p_dict['results_dicts'] for item in rank_dict[res_type][batch_instance]]
for batch_instance in range(b_size)]
else:
results_dict[res_type] = []
for dict in p_dict['results_dicts']:
if 'dice' in res_type:
item = dict[res_type] #dict['batch_dices'] has shape (num_seg_classes,)
assert len(item) == self.cf.num_seg_classes, \
"{}, {}".format(len(item), self.cf.num_seg_classes)
else:
raise NotImplementedError
results_dict[res_type].append(item)
# rdict[dice] shape (n_rank_epochs (n_saved_ranks), nsegclasses)
# calc mean over test epochs so inline with shape from sampling
results_dict[res_type] = np.mean(results_dict[res_type], axis=0) #maybe error type with other than dice
if not hasattr(self.cf, "eval_test_separately") or not self.cf.eval_test_separately:
# add unpatched 2D or 3D (if dim==3 or merge_2D_to_3D) ground truth boxes for evaluation.
for b in range(p_dict['patient_bb_target'].shape[0]):
for targ in range(len(p_dict['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords':p_dict['patient_bb_target'][b][targ],
'class_targets': p_dict['patient_class_targets'][b][targ]}
for name in self.cf.roi_items:
gt_box.update({name: p_dict["patient_"+name][b][targ]})
results_dict['boxes'][b].append(gt_box)
results_per_patient.append([results_dict, pid])
out_string = 'pred_results_held_out' if self.cf.hold_out_test_set else 'pred_results'
with open(os.path.join(self.cf.fold_dir, '{}.pkl'.format(out_string)), 'wb') as handle:
pickle.dump(results_per_patient, handle)
if return_results:
# -------------- results processing, clustering, etc. -----------------
final_patient_box_results = [ (res_dict["boxes"], pid) for res_dict,pid in results_per_patient ]
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou = {} and n_ens = {}.'.format(
self.cf.clustering_iou, self.n_ens))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou = {}.'.format(self.cf.clustering_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in final_patient_box_results]
del final_patient_box_results
final_patient_box_results = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2D-to-3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
# final_patient_box_results holds [avg_boxes, pid] if wbc
for ix in range(len(results_per_patient)):
assert results_per_patient[ix][1] == final_patient_box_results[ix][1], "should be same pid"
results_per_patient[ix][0]["boxes"] = final_patient_box_results[ix][0]
# results_per_patient = [(res_dict["boxes"] = boxes, pid) for (boxes,pid) in final_patient_box_results]
return results_per_patient # holds list of (results_dict, pid)
|
[
"numpy.sqrt",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.flip",
"os.listdir",
"numpy.where",
"numpy.delete",
"numpy.max",
"numpy.stack",
"numpy.concatenate",
"numpy.min",
"numpy.maximum",
"collections.OrderedDict",
"numpy.ceil",
"pickle.load",
"numpy.argmax",
"numpy.floor",
"scipy.stats.norm.pdf",
"numpy.nonzero",
"time.time",
"torch.cat",
"numpy.copy",
"pickle.dump",
"numpy.minimum",
"os.makedirs",
"numpy.unique",
"torch.load",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"utils.exp_utils.split_off_process",
"multiprocessing.Pool",
"torch.no_grad",
"numpy.all",
"utils.model_utils.nms_numpy",
"numpy.zeros_like",
"utils.model_utils.dice_per_batch_and_class"
] |
[((10050, 10078), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order]'], {}), '(y1[i], y1[order])\n', (10060, 10078), True, 'import numpy as np\n'), ((10093, 10121), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order]'], {}), '(x1[i], x1[order])\n', (10103, 10121), True, 'import numpy as np\n'), ((10136, 10164), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order]'], {}), '(y2[i], y2[order])\n', (10146, 10164), True, 'import numpy as np\n'), ((10179, 10207), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order]'], {}), '(x2[i], x2[order])\n', (10189, 10207), True, 'import numpy as np\n'), ((10221, 10249), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1 + 1)'], {}), '(0, xx2 - xx1 + 1)\n', (10231, 10249), True, 'import numpy as np\n'), ((10262, 10290), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1 + 1)'], {}), '(0, yy2 - yy1 + 1)\n', (10272, 10290), True, 'import numpy as np\n'), ((10735, 10756), 'numpy.all', 'np.all', (['(ovr == ovr_fl)'], {}), '(ovr == ovr_fl)\n', (10741, 10756), True, 'import numpy as np\n'), ((14444, 14470), 'numpy.all', 'np.all', (['(inds == inds_where)'], {}), '(inds == inds_where)\n', (14450, 14470), True, 'import numpy as np\n'), ((17594, 17610), 'numpy.all', 'np.all', (['(y1 <= y2)'], {}), '(y1 <= y2)\n', (17600, 17610), True, 'import numpy as np\n'), ((17615, 17631), 'numpy.all', 'np.all', (['(x1 <= x2)'], {}), '(x1 <= x2)\n', (17621, 17631), True, 'import numpy as np\n'), ((18105, 18133), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order]'], {}), '(y1[i], y1[order])\n', (18115, 18133), True, 'import numpy as np\n'), ((18222, 18250), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order]'], {}), '(x1[i], x1[order])\n', (18232, 18250), True, 'import numpy as np\n'), ((18265, 18293), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order]'], {}), '(y2[i], y2[order])\n', (18275, 18293), True, 'import numpy as np\n'), ((18308, 18336), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order]'], {}), '(x2[i], x2[order])\n', (18318, 18336), True, 'import numpy as np\n'), ((18350, 18380), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (18360, 18380), True, 'import numpy as np\n'), ((18393, 18423), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (18403, 18423), True, 'import numpy as np\n'), ((18521, 18546), 'numpy.argwhere', 'np.argwhere', (['(iou > thresh)'], {}), '(iou > thresh)\n', (18532, 18546), True, 'import numpy as np\n'), ((19499, 19534), 'numpy.delete', 'np.delete', (['order', 'z_matches'], {'axis': '(0)'}), '(order, z_matches, axis=0)\n', (19508, 19534), True, 'import numpy as np\n'), ((20822, 20879), 'numpy.array', 'np.array', (["[batch[1]['box_coords'] for batch in det_boxes]"], {}), "([batch[1]['box_coords'] for batch in det_boxes])\n", (20830, 20879), True, 'import numpy as np\n'), ((20901, 20957), 'numpy.array', 'np.array', (["[batch[1]['box_score'] for batch in det_boxes]"], {}), "([batch[1]['box_score'] for batch in det_boxes])\n", (20909, 20957), True, 'import numpy as np\n'), ((20978, 20997), 'numpy.array', 'np.array', (['slice_ids'], {}), '(slice_ids)\n', (20986, 20997), True, 'import numpy as np\n'), ((39772, 39832), 'numpy.stack', 'np.stack', (["[dic['seg_preds'] for dic in results_list]"], {'axis': '(1)'}), "([dic['seg_preds'] for dic in results_list], axis=1)\n", (39780, 39832), True, 'import numpy as np\n'), ((39916, 39957), 'numpy.sum', 'np.sum', (["results_dict['seg_preds']"], {'axis': '(1)'}), "(results_dict['seg_preds'], axis=1)\n", (39922, 39957), True, 'import numpy as np\n'), ((50095, 50108), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (50106, 50108), False, 'from collections import OrderedDict\n'), ((5492, 5537), 'numpy.array', 'np.array', (["[b[1]['box_coords'] for b in boxes]"], {}), "([b[1]['box_coords'] for b in boxes])\n", (5500, 5537), True, 'import numpy as np\n'), ((5563, 5607), 'numpy.array', 'np.array', (["[b[1]['box_score'] for b in boxes]"], {}), "([b[1]['box_score'] for b in boxes])\n", (5571, 5607), True, 'import numpy as np\n'), ((5640, 5698), 'numpy.array', 'np.array', (["[b[1]['box_patch_center_factor'] for b in boxes]"], {}), "([b[1]['box_patch_center_factor'] for b in boxes])\n", (5648, 5698), True, 'import numpy as np\n'), ((5728, 5777), 'numpy.array', 'np.array', (["[b[1]['box_n_overlaps'] for b in boxes]"], {}), "([b[1]['box_n_overlaps'] for b in boxes])\n", (5736, 5777), True, 'import numpy as np\n'), ((10353, 10381), 'numpy.maximum', 'np.maximum', (['z1[i]', 'z1[order]'], {}), '(z1[i], z1[order])\n', (10363, 10381), True, 'import numpy as np\n'), ((10400, 10428), 'numpy.minimum', 'np.minimum', (['z2[i]', 'z2[order]'], {}), '(z2[i], z2[order])\n', (10410, 10428), True, 'import numpy as np\n'), ((10445, 10473), 'numpy.maximum', 'np.maximum', (['(0)', '(zz2 - zz1 + 1)'], {}), '(0, zz2 - zz1 + 1)\n', (10455, 10473), True, 'import numpy as np\n'), ((10901, 10925), 'numpy.nonzero', 'np.nonzero', (['(ovr > thresh)'], {}), '(ovr > thresh)\n', (10911, 10925), True, 'import numpy as np\n'), ((11821, 11841), 'numpy.mean', 'np.mean', (['match_n_ovs'], {}), '(match_n_ovs)\n', (11828, 11841), True, 'import numpy as np\n'), ((12238, 12265), 'numpy.sum', 'np.sum', (['match_score_weights'], {}), '(match_score_weights)\n', (12244, 12265), True, 'import numpy as np\n'), ((12393, 12413), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12399, 12413), True, 'import numpy as np\n'), ((14354, 14379), 'numpy.nonzero', 'np.nonzero', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (14364, 14379), True, 'import numpy as np\n'), ((14404, 14427), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (14412, 14427), True, 'import numpy as np\n'), ((15139, 15189), 'numpy.array', 'np.array', (["[box['box_coords'] for box in det_boxes]"], {}), "([box['box_coords'] for box in det_boxes])\n", (15147, 15189), True, 'import numpy as np\n'), ((15215, 15264), 'numpy.array', 'np.array', (["[box['box_score'] for box in det_boxes]"], {}), "([box['box_score'] for box in det_boxes])\n", (15223, 15264), True, 'import numpy as np\n'), ((18949, 18968), 'numpy.min', 'np.min', (['upper_holes'], {}), '(upper_holes)\n', (18955, 18968), True, 'import numpy as np\n'), ((18998, 19015), 'numpy.max', 'np.max', (['slice_ids'], {}), '(slice_ids)\n', (19004, 19015), True, 'import numpy as np\n'), ((19045, 19064), 'numpy.max', 'np.max', (['lower_holes'], {}), '(lower_holes)\n', (19051, 19064), True, 'import numpy as np\n'), ((19094, 19111), 'numpy.min', 'np.min', (['slice_ids'], {}), '(slice_ids)\n', (19100, 19111), True, 'import numpy as np\n'), ((19336, 19370), 'numpy.min', 'np.min', (['slice_id[order[z_matches]]'], {}), '(slice_id[order[z_matches]])\n', (19342, 19370), True, 'import numpy as np\n'), ((19388, 19422), 'numpy.max', 'np.max', (['slice_id[order[z_matches]]'], {}), '(slice_id[order[z_matches]])\n', (19394, 19422), True, 'import numpy as np\n'), ((24004, 24052), 'os.path.join', 'os.path.join', (['self.cf.fold_dir', '"""last_state.pth"""'], {}), "(self.cf.fold_dir, 'last_state.pth')\n", (24016, 24052), False, 'import os\n'), ((24676, 24718), 'os.path.join', 'os.path.join', (['cf.test_dir', '"""example_plots"""'], {}), "(cf.test_dir, 'example_plots')\n", (24688, 24718), False, 'import os\n'), ((24731, 24780), 'os.makedirs', 'os.makedirs', (['self.example_plot_dir'], {'exist_ok': '(True)'}), '(self.example_plot_dir, exist_ok=True)\n', (24742, 24780), False, 'import os\n'), ((27335, 27399), 'numpy.array', 'np.array', (["[item for d in chunk_dicts for item in d['seg_preds']]"], {}), "([item for d in chunk_dicts for item in d['seg_preds']])\n", (27343, 27399), True, 'import numpy as np\n'), ((30136, 30177), 'numpy.zeros', 'np.zeros', (['out_seg_shape'], {'dtype': 'np.float16'}), '(out_seg_shape, dtype=np.float16)\n', (30144, 30177), True, 'import numpy as np\n'), ((30210, 30253), 'numpy.zeros_like', 'np.zeros_like', (['out_seg_preds'], {'dtype': '"""uint8"""'}), "(out_seg_preds, dtype='uint8')\n", (30223, 30253), True, 'import numpy as np\n'), ((35770, 35792), 'numpy.copy', 'np.copy', (["batch['data']"], {}), "(batch['data'])\n", (35777, 35792), True, 'import numpy as np\n'), ((43876, 43909), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (43880, 43909), False, 'from multiprocessing import Pool\n'), ((44836, 44869), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (44840, 44869), False, 'from multiprocessing import Pool\n'), ((45215, 45269), 'numpy.all', 'np.all', (['(results_list[ix][1] == box_results_list[ix][1])'], {}), '(results_list[ix][1] == box_results_list[ix][1])\n', (45221, 45269), True, 'import numpy as np\n'), ((50356, 50397), 'os.path.join', 'os.path.join', (['self.cf.fold_dir', 'file_name'], {}), '(self.cf.fold_dir, file_name)\n', (50368, 50397), False, 'import os\n'), ((57028, 57068), 'pickle.dump', 'pickle.dump', (['results_per_patient', 'handle'], {}), '(results_per_patient, handle)\n', (57039, 57068), False, 'import pickle\n'), ((5826, 5869), 'numpy.array', 'np.array', (["[b[1]['patch_id'] for b in boxes]"], {}), "([b[1]['patch_id'] for b in boxes])\n", (5834, 5869), True, 'import numpy as np\n'), ((6065, 6110), 'numpy.array', 'np.array', (["[b[1]['regression'] for b in boxes]"], {}), "([b[1]['regression'] for b in boxes])\n", (6073, 6110), True, 'import numpy as np\n'), ((12286, 12314), 'numpy.mean', 'np.mean', (['match_score_weights'], {}), '(match_score_weights)\n', (12293, 12314), True, 'import numpy as np\n'), ((12571, 12612), 'numpy.sum', 'np.sum', (['(y1[order[matches]] * match_scores)'], {}), '(y1[order[matches]] * match_scores)\n', (12577, 12612), True, 'import numpy as np\n'), ((12615, 12635), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12621, 12635), True, 'import numpy as np\n'), ((12659, 12700), 'numpy.sum', 'np.sum', (['(x1[order[matches]] * match_scores)'], {}), '(x1[order[matches]] * match_scores)\n', (12665, 12700), True, 'import numpy as np\n'), ((12703, 12723), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12709, 12723), True, 'import numpy as np\n'), ((12747, 12788), 'numpy.sum', 'np.sum', (['(y2[order[matches]] * match_scores)'], {}), '(y2[order[matches]] * match_scores)\n', (12753, 12788), True, 'import numpy as np\n'), ((12791, 12811), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12797, 12811), True, 'import numpy as np\n'), ((12835, 12876), 'numpy.sum', 'np.sum', (['(x2[order[matches]] * match_scores)'], {}), '(x2[order[matches]] * match_scores)\n', (12841, 12876), True, 'import numpy as np\n'), ((12879, 12899), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12885, 12899), True, 'import numpy as np\n'), ((13256, 13329), 'numpy.sum', 'np.sum', (['(box_regress[order[matches]] * match_scores[:, np.newaxis])'], {'axis': '(0)'}), '(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0)\n', (13262, 13329), True, 'import numpy as np\n'), ((13332, 13352), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13338, 13352), True, 'import numpy as np\n'), ((13506, 13556), 'numpy.sum', 'np.sum', (['(box_rg_uncs[order[matches]] * match_scores)'], {}), '(box_rg_uncs[order[matches]] * match_scores)\n', (13512, 13556), True, 'import numpy as np\n'), ((13559, 13579), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13565, 13579), True, 'import numpy as np\n'), ((15333, 15385), 'utils.model_utils.nms_numpy', 'mutils.nms_numpy', (['box_coords', 'box_scores', 'iou_thresh'], {}), '(box_coords, box_scores, iou_thresh)\n', (15349, 15385), True, 'import utils.model_utils as mutils\n'), ((21094, 21171), 'numpy.concatenate', 'np.concatenate', (['(box_coords, box_scores[:, None], slice_ids[:, None])'], {'axis': '(1)'}), '((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1)\n', (21108, 21171), True, 'import numpy as np\n'), ((26485, 26508), 'numpy.arange', 'np.arange', (['img.shape[0]'], {}), '(img.shape[0])\n', (26494, 26508), True, 'import numpy as np\n'), ((27930, 27977), 'numpy.mean', 'np.mean', (["[d['class_loss'] for d in chunk_dicts]"], {}), "([d['class_loss'] for d in chunk_dicts])\n", (27937, 27977), True, 'import numpy as np\n'), ((41600, 41619), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (41611, 41619), False, 'import pickle\n'), ((44311, 44344), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (44315, 44344), False, 'from multiprocessing import Pool\n'), ((47884, 48062), 'utils.model_utils.dice_per_batch_and_class', 'mutils.dice_per_batch_and_class', (["results_dict['seg_preds']", "(batch['patient_seg'] if self.patched_patient else batch['seg'])", 'self.cf.num_seg_classes'], {'convert_to_ohe': '(True)'}), "(results_dict['seg_preds'], batch[\n 'patient_seg'] if self.patched_patient else batch['seg'], self.cf.\n num_seg_classes, convert_to_ohe=True)\n", (47915, 48062), True, 'import utils.model_utils as mutils\n'), ((50649, 50672), 'torch.load', 'torch.load', (['weight_path'], {}), '(weight_path)\n', (50659, 50672), False, 'import torch\n'), ((50786, 50816), 'numpy.arange', 'np.arange', (["batch_gen['n_test']"], {}), "(batch_gen['n_test'])\n", (50795, 50816), True, 'import numpy as np\n'), ((50947, 50962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (50960, 50962), False, 'import torch\n'), ((57726, 57759), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (57730, 57759), False, 'from multiprocessing import Pool\n'), ((58902, 58935), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (58906, 58935), False, 'from multiprocessing import Pool\n'), ((5993, 6034), 'numpy.array', 'np.array', (["[b[1]['ens_ix'] for b in boxes]"], {}), "([b[1]['ens_ix'] for b in boxes])\n", (6001, 6034), True, 'import numpy as np\n'), ((12953, 12994), 'numpy.sum', 'np.sum', (['(z1[order[matches]] * match_scores)'], {}), '(z1[order[matches]] * match_scores)\n', (12959, 12994), True, 'import numpy as np\n'), ((12997, 13017), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13003, 13017), True, 'import numpy as np\n'), ((13049, 13090), 'numpy.sum', 'np.sum', (['(z2[order[matches]] * match_scores)'], {}), '(z2[order[matches]] * match_scores)\n', (13055, 13090), True, 'import numpy as np\n'), ((13093, 13113), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13099, 13113), True, 'import numpy as np\n'), ((13405, 13455), 'numpy.sum', 'np.sum', (['(box_rg_bins[order[matches]] * match_scores)'], {}), '(box_rg_bins[order[matches]] * match_scores)\n', (13411, 13455), True, 'import numpy as np\n'), ((13458, 13478), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13464, 13478), True, 'import numpy as np\n'), ((18776, 18793), 'numpy.max', 'np.max', (['slice_ids'], {}), '(slice_ids)\n', (18782, 18793), True, 'import numpy as np\n'), ((18865, 18882), 'numpy.min', 'np.min', (['slice_ids'], {}), '(slice_ids)\n', (18871, 18882), True, 'import numpy as np\n'), ((24105, 24132), 'torch.load', 'torch.load', (['last_state_path'], {}), '(last_state_path)\n', (24115, 24132), False, 'import torch\n'), ((26510, 26533), 'numpy.arange', 'np.arange', (['img.shape[0]'], {}), '(img.shape[0])\n', (26519, 26533), True, 'import numpy as np\n'), ((27834, 27883), 'torch.cat', 'torch.cat', (["[d['torch_loss'] for d in chunk_dicts]"], {}), "([d['torch_loss'] for d in chunk_dicts])\n", (27843, 27883), False, 'import torch\n'), ((35951, 35968), 'numpy.array', 'np.array', (['sp_axis'], {}), '(sp_axis)\n', (35959, 35968), True, 'import numpy as np\n'), ((37203, 37246), 'numpy.flip', 'np.flip', (["chunk_dict['seg_preds']"], {'axis': 'axis'}), "(chunk_dict['seg_preds'], axis=axis)\n", (37210, 37246), True, 'import numpy as np\n'), ((41506, 41550), 'os.path.join', 'os.path.join', (['self.cf.fold_dir', 'results_file'], {}), '(self.cf.fold_dir, results_file)\n', (41518, 41550), False, 'import os\n'), ((42149, 42181), 'os.path.join', 'os.path.join', (['self.cf.exp_dir', 'f'], {}), '(self.cf.exp_dir, f)\n', (42161, 42181), False, 'import os\n'), ((51096, 51119), 'numpy.unique', 'np.unique', (["batch['pid']"], {}), "(batch['pid'])\n", (51105, 51119), True, 'import numpy as np\n'), ((51737, 51748), 'time.time', 'time.time', ([], {}), '()\n', (51746, 51748), False, 'import time\n'), ((55936, 55975), 'numpy.mean', 'np.mean', (['results_dict[res_type]'], {'axis': '(0)'}), '(results_dict[res_type], axis=0)\n', (55943, 55975), True, 'import numpy as np\n'), ((58143, 58176), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (58147, 58176), False, 'from multiprocessing import Pool\n'), ((32414, 32466), 'numpy.array', 'np.array', (['[pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]]'], {}), '([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])\n', (32422, 32466), True, 'import numpy as np\n'), ((32630, 32724), 'numpy.mean', 'np.mean', (['patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]\n ]'], {}), '(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c\n [4]:int_c[5]])\n', (32637, 32724), True, 'import numpy as np\n'), ((32836, 32874), 'numpy.array', 'np.array', (['[pc[0], pc[2], pc[0], pc[2]]'], {}), '([pc[0], pc[2], pc[0], pc[2]])\n', (32844, 32874), True, 'import numpy as np\n'), ((33039, 33113), 'numpy.mean', 'np.mean', (['patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]]'], {}), '(patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])\n', (33046, 33113), True, 'import numpy as np\n'), ((42191, 42218), 'os.listdir', 'os.listdir', (['self.cf.exp_dir'], {}), '(self.cf.exp_dir)\n', (42201, 42218), False, 'import os\n'), ((42685, 42704), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (42696, 42704), False, 'import pickle\n'), ((51955, 51999), 'numpy.argmax', 'np.argmax', (["results_dict['seg_preds']"], {'axis': '(1)'}), "(results_dict['seg_preds'], axis=1)\n", (51964, 51999), True, 'import numpy as np\n'), ((53533, 53711), 'utils.model_utils.dice_per_batch_and_class', 'mutils.dice_per_batch_and_class', (["results_dict['seg_preds']", "(batch['patient_seg'] if self.patched_patient else batch['seg'])", 'self.cf.num_seg_classes'], {'convert_to_ohe': '(True)'}), "(results_dict['seg_preds'], batch[\n 'patient_seg'] if self.patched_patient else batch['seg'], self.cf.\n num_seg_classes, convert_to_ohe=True)\n", (53564, 53711), True, 'import utils.model_utils as mutils\n'), ((12054, 12079), 'numpy.unique', 'np.unique', (['match_patch_id'], {}), '(match_patch_id)\n', (12063, 12079), True, 'import numpy as np\n'), ((36118, 36141), 'numpy.flip', 'np.flip', (['img'], {'axis': 'axis'}), '(img, axis=axis)\n', (36125, 36141), True, 'import numpy as np\n'), ((42594, 42630), 'os.path.join', 'os.path.join', (['fold_dir', 'results_file'], {}), '(fold_dir, results_file)\n', (42606, 42630), False, 'import os\n'), ((52742, 52978), 'utils.exp_utils.split_off_process', 'utils.split_off_process', (['plg.view_batch', 'self.cf', 'batch', 'results_dict'], {'has_colorchannels': 'self.cf.has_colorchannels', 'show_gt_labels': '(True)', 'show_seg_ids': "('dice' in self.cf.metrics)", 'get_time': '"""test-example plot"""', 'out_file': 'out_file'}), "(plg.view_batch, self.cf, batch, results_dict,\n has_colorchannels=self.cf.has_colorchannels, show_gt_labels=True,\n show_seg_ids='dice' in self.cf.metrics, get_time='test-example plot',\n out_file=out_file)\n", (52765, 52978), True, 'import utils.exp_utils as utils\n'), ((42268, 42300), 'os.path.join', 'os.path.join', (['self.cf.exp_dir', 'f'], {}), '(self.cf.exp_dir, f)\n', (42280, 42300), False, 'import os\n'), ((52268, 52279), 'time.time', 'time.time', ([], {}), '()\n', (52277, 52279), False, 'import time\n'), ((32504, 32516), 'numpy.floor', 'np.floor', (['ii'], {}), '(ii)\n', (32512, 32516), True, 'import numpy as np\n'), ((32540, 32551), 'numpy.ceil', 'np.ceil', (['ii'], {}), '(ii)\n', (32547, 32551), True, 'import numpy as np\n'), ((32912, 32924), 'numpy.floor', 'np.floor', (['ii'], {}), '(ii)\n', (32920, 32924), True, 'import numpy as np\n'), ((32950, 32961), 'numpy.ceil', 'np.ceil', (['ii'], {}), '(ii)\n', (32957, 32961), True, 'import numpy as np\n'), ((37567, 37593), 'numpy.flip', 'np.flip', (['img'], {'axis': 'axis[0]'}), '(img, axis=axis[0])\n', (37574, 37593), True, 'import numpy as np\n'), ((38962, 39008), 'numpy.flip', 'np.flip', (["chunk_dict['seg_preds']"], {'axis': 'axis[0]'}), "(chunk_dict['seg_preds'], axis=axis[0])\n", (38969, 39008), True, 'import numpy as np\n'), ((32183, 32219), 'scipy.stats.norm.pdf', 'norm.pdf', (['bc'], {'loc': 'pc', 'scale': '(pc * 0.8)'}), '(bc, loc=pc, scale=pc * 0.8)\n', (32191, 32219), False, 'from scipy.stats import norm\n'), ((32222, 32240), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (32229, 32240), True, 'import numpy as np\n'), ((32308, 32336), 'numpy.array', 'np.array', (['self.cf.patch_size'], {}), '(self.cf.patch_size)\n', (32316, 32336), True, 'import numpy as np\n')]
|
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['tensorflow==1.8.0','pandas==0.23.1','setuptools==38.7.0','numpy==1.14.1','Keras==2.1.4','scikit_learn==0.19.1','h5py']
setup(
name='classifier',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='My training application package.',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
zip_safe=False
)
|
[
"setuptools.find_packages"
] |
[((311, 326), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (324, 326), False, 'from setuptools import find_packages\n')]
|
import os
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from typing import Optional, Sequence
from pydantic import BaseModel, Field, FilePath
@contextmanager
def temp_config(**kwargs):
"""A context manager that creates a temporary config file for SIMReconstructor.
`**kwargs` should be valid keyword arguments for :class:`ReconParams`.
"""
params = ReconParams(**kwargs)
tf = NamedTemporaryFile(delete=False)
tf.file.write(params.to_config().encode()) # type: ignore
tf.close()
try:
yield tf
finally:
os.unlink(tf.name)
class ReconParams(BaseModel):
otf_file: Optional[FilePath] = Field(None, description="OTF file")
usecorr: bool = Field(
False, description="use the flat-field correction file provided"
)
ndirs: int = Field(default=3, description="number of directions")
nphases: int = Field(default=5, description="number of phases per direction")
nordersout: int = Field(
0, description="number of output orders; must be <= norders"
)
angle0: float = Field(1.648, description="angle of the first direction in radians")
ls: float = Field(0.172, description="line spacing of SIM pattern in microns")
na: float = Field(1.42, description="Detection numerical aperture")
nimm: float = Field(1.515, description="refractive index of immersion medium")
zoomfact: float = Field(2, description="lateral oversampling factor")
explodefact: float = Field(
1,
description="artificially exploding the reciprocal-space "
"distance between orders by this factor",
)
zzoom: int = Field(1, description="axial zoom factor")
nofilteroverlaps: bool = Field(
False,
description="do not filter the overlaping region between bands "
"usually used in trouble shooting",
)
background: float = Field(0, description="camera readout background")
wiener: float = Field(0.01, description="Wiener constant")
forcemodamp: Optional[Sequence[float]] = Field(
None, description="modamps forced to these values"
)
k0angles: Optional[Sequence[float]] = Field(
None, description="user given pattern vector k0 angles for all directions"
)
otfRA: bool = Field(True, description="using rotationally averaged OTF")
otfPerAngle: bool = Field(True, description="using one OTF per SIM angle")
fastSI: bool = Field(
True,
description="SIM data is organized in Z->Angle->Phase order; "
"default being Angle->Z->Phase",
)
k0searchAll: bool = Field(False, description="search for k0 at all time points")
norescale: bool = Field(False, description="bleach correcting for z") # TODO
equalizez: bool = Field(True, description="bleach correcting for z")
equalizet: bool = Field(True, description="bleach correcting for time")
dampenOrder0: bool = Field(True, description="dampen order-0 in final assembly")
nosuppress: bool = Field(
False,
description="do not suppress DC singularity in final assembly "
"(good idea for 2D/TIRF data)",
)
nokz0: bool = Field(
True, description="do not use kz=0 plane of the 0th order in the final assembly"
)
gammaApo: float = Field(
1, description="output apodization gamma; 1.0 means triangular apo"
)
bessel: bool = Field(False, description="bessel-SIM data")
besselExWave: float = Field(
0.488, description="Bessel SIM excitation wavelength in microns"
)
besselNA: float = Field(0.144, description="Bessel SIM excitation NA)")
deskew: float = Field(
0,
description="Deskew angle; if not 0.0 then perform deskewing before processing",
)
deskewshift: int = Field(
0,
description="If deskewed, the output image's extra shift in X (positive->left)",
)
noRecon: bool = Field(
False,
description="No reconstruction will be performed; "
"useful when combined with --deskew",
)
cropXY: int = Field(
0, description="Crop the XY dimension to this number; 0 means no cropping"
)
xyres: float = Field(0.1, description="XY pixel size")
zres: float = Field(0.2, description="Z step size")
zresPSF: float = Field(0.15, description="Z step size of the PSF")
wavelength: int = Field(530, description="emission wavelength in nanometers")
writeTitle: bool = Field(
False,
description="Write command line to image header "
"(may cause issues with bioformats)",
)
def to_config(self, exclude_unset=True):
lines = []
for k, v in self.dict(exclude_unset=exclude_unset).items():
if k == "k0angles":
v = ",".join(str(x) for x in v)
if isinstance(v, bool):
v = int(v)
lines.append(f'{k.replace("_", "-")}={v}')
return "\n".join(lines)
|
[
"pydantic.Field",
"os.unlink",
"tempfile.NamedTemporaryFile"
] |
[((433, 465), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (451, 465), False, 'from tempfile import NamedTemporaryFile\n'), ((677, 712), 'pydantic.Field', 'Field', (['None'], {'description': '"""OTF file"""'}), "(None, description='OTF file')\n", (682, 712), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((733, 804), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""use the flat-field correction file provided"""'}), "(False, description='use the flat-field correction file provided')\n", (738, 804), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((836, 888), 'pydantic.Field', 'Field', ([], {'default': '(3)', 'description': '"""number of directions"""'}), "(default=3, description='number of directions')\n", (841, 888), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((908, 970), 'pydantic.Field', 'Field', ([], {'default': '(5)', 'description': '"""number of phases per direction"""'}), "(default=5, description='number of phases per direction')\n", (913, 970), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((993, 1060), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""number of output orders; must be <= norders"""'}), "(0, description='number of output orders; must be <= norders')\n", (998, 1060), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1095, 1162), 'pydantic.Field', 'Field', (['(1.648)'], {'description': '"""angle of the first direction in radians"""'}), "(1.648, description='angle of the first direction in radians')\n", (1100, 1162), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1179, 1245), 'pydantic.Field', 'Field', (['(0.172)'], {'description': '"""line spacing of SIM pattern in microns"""'}), "(0.172, description='line spacing of SIM pattern in microns')\n", (1184, 1245), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1262, 1317), 'pydantic.Field', 'Field', (['(1.42)'], {'description': '"""Detection numerical aperture"""'}), "(1.42, description='Detection numerical aperture')\n", (1267, 1317), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1336, 1400), 'pydantic.Field', 'Field', (['(1.515)'], {'description': '"""refractive index of immersion medium"""'}), "(1.515, description='refractive index of immersion medium')\n", (1341, 1400), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1423, 1474), 'pydantic.Field', 'Field', (['(2)'], {'description': '"""lateral oversampling factor"""'}), "(2, description='lateral oversampling factor')\n", (1428, 1474), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1500, 1616), 'pydantic.Field', 'Field', (['(1)'], {'description': '"""artificially exploding the reciprocal-space distance between orders by this factor"""'}), "(1, description=\n 'artificially exploding the reciprocal-space distance between orders by this factor'\n )\n", (1505, 1616), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1658, 1699), 'pydantic.Field', 'Field', (['(1)'], {'description': '"""axial zoom factor"""'}), "(1, description='axial zoom factor')\n", (1663, 1699), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1729, 1849), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""do not filter the overlaping region between bands usually used in trouble shooting"""'}), "(False, description=\n 'do not filter the overlaping region between bands usually used in trouble shooting'\n )\n", (1734, 1849), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1898, 1947), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""camera readout background"""'}), "(0, description='camera readout background')\n", (1903, 1947), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((1968, 2010), 'pydantic.Field', 'Field', (['(0.01)'], {'description': '"""Wiener constant"""'}), "(0.01, description='Wiener constant')\n", (1973, 2010), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2056, 2113), 'pydantic.Field', 'Field', (['None'], {'description': '"""modamps forced to these values"""'}), "(None, description='modamps forced to these values')\n", (2061, 2113), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2170, 2256), 'pydantic.Field', 'Field', (['None'], {'description': '"""user given pattern vector k0 angles for all directions"""'}), "(None, description=\n 'user given pattern vector k0 angles for all directions')\n", (2175, 2256), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2284, 2342), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""using rotationally averaged OTF"""'}), "(True, description='using rotationally averaged OTF')\n", (2289, 2342), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2367, 2421), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""using one OTF per SIM angle"""'}), "(True, description='using one OTF per SIM angle')\n", (2372, 2421), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2441, 2555), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""SIM data is organized in Z->Angle->Phase order; default being Angle->Z->Phase"""'}), "(True, description=\n 'SIM data is organized in Z->Angle->Phase order; default being Angle->Z->Phase'\n )\n", (2446, 2555), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2604, 2664), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""search for k0 at all time points"""'}), "(False, description='search for k0 at all time points')\n", (2609, 2664), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2687, 2738), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""bleach correcting for z"""'}), "(False, description='bleach correcting for z')\n", (2692, 2738), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2769, 2819), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""bleach correcting for z"""'}), "(True, description='bleach correcting for z')\n", (2774, 2819), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2842, 2895), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""bleach correcting for time"""'}), "(True, description='bleach correcting for time')\n", (2847, 2895), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((2921, 2980), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""dampen order-0 in final assembly"""'}), "(True, description='dampen order-0 in final assembly')\n", (2926, 2980), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3004, 3119), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""do not suppress DC singularity in final assembly (good idea for 2D/TIRF data)"""'}), "(False, description=\n 'do not suppress DC singularity in final assembly (good idea for 2D/TIRF data)'\n )\n", (3009, 3119), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3162, 3254), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""do not use kz=0 plane of the 0th order in the final assembly"""'}), "(True, description=\n 'do not use kz=0 plane of the 0th order in the final assembly')\n", (3167, 3254), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3286, 3360), 'pydantic.Field', 'Field', (['(1)'], {'description': '"""output apodization gamma; 1.0 means triangular apo"""'}), "(1, description='output apodization gamma; 1.0 means triangular apo')\n", (3291, 3360), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3394, 3437), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""bessel-SIM data"""'}), "(False, description='bessel-SIM data')\n", (3399, 3437), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3464, 3535), 'pydantic.Field', 'Field', (['(0.488)'], {'description': '"""Bessel SIM excitation wavelength in microns"""'}), "(0.488, description='Bessel SIM excitation wavelength in microns')\n", (3469, 3535), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3572, 3625), 'pydantic.Field', 'Field', (['(0.144)'], {'description': '"""Bessel SIM excitation NA)"""'}), "(0.144, description='Bessel SIM excitation NA)')\n", (3577, 3625), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3646, 3740), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""Deskew angle; if not 0.0 then perform deskewing before processing"""'}), "(0, description=\n 'Deskew angle; if not 0.0 then perform deskewing before processing')\n", (3651, 3740), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3782, 3876), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""If deskewed, the output image\'s extra shift in X (positive->left)"""'}), '(0, description=\n "If deskewed, the output image\'s extra shift in X (positive->left)")\n', (3787, 3876), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((3915, 4019), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""No reconstruction will be performed; useful when combined with --deskew"""'}), "(False, description=\n 'No reconstruction will be performed; useful when combined with --deskew')\n", (3920, 4019), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((4067, 4153), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""Crop the XY dimension to this number; 0 means no cropping"""'}), "(0, description=\n 'Crop the XY dimension to this number; 0 means no cropping')\n", (4072, 4153), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((4182, 4221), 'pydantic.Field', 'Field', (['(0.1)'], {'description': '"""XY pixel size"""'}), "(0.1, description='XY pixel size')\n", (4187, 4221), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((4240, 4277), 'pydantic.Field', 'Field', (['(0.2)'], {'description': '"""Z step size"""'}), "(0.2, description='Z step size')\n", (4245, 4277), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((4299, 4348), 'pydantic.Field', 'Field', (['(0.15)'], {'description': '"""Z step size of the PSF"""'}), "(0.15, description='Z step size of the PSF')\n", (4304, 4348), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((4371, 4430), 'pydantic.Field', 'Field', (['(530)'], {'description': '"""emission wavelength in nanometers"""'}), "(530, description='emission wavelength in nanometers')\n", (4376, 4430), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((4454, 4556), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""Write command line to image header (may cause issues with bioformats)"""'}), "(False, description=\n 'Write command line to image header (may cause issues with bioformats)')\n", (4459, 4556), False, 'from pydantic import BaseModel, Field, FilePath\n'), ((591, 609), 'os.unlink', 'os.unlink', (['tf.name'], {}), '(tf.name)\n', (600, 609), False, 'import os\n')]
|
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind
from sklearn.preprocessing import LabelEncoder
def load_data():
questionnaire = pd.read_excel('XAutoML.xlsx')
encoder = LabelEncoder()
encoder.classes_ = np.array(['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree'])
for c in questionnaire.columns:
try:
questionnaire.loc[:, c] = questionnaire.loc[:, c].str.strip().str.lower()
questionnaire.loc[:, c] = encoder.transform(questionnaire.loc[:, c])
except (AttributeError, ValueError):
pass
questionnaire.columns = questionnaire.columns.str.strip()
requirements = pd.read_excel('task_results.ods', sheet_name='Requirements', skiprows=1)
requirements = requirements.drop(index=[24], columns=['Unnamed: 1']).T
requirements.columns = requirements.iloc[0]
requirements = requirements[1:]
tasks = pd.read_excel('task_results.ods', sheet_name=0)
tasks = tasks.dropna(axis=1, how='all').dropna(axis=0, how='all')
tasks.index = tasks.iloc[:, 0]
tasks.drop(columns=tasks.columns[:2], inplace=True)
return questionnaire, requirements, tasks
def calculate_sus(df: pd.DataFrame):
invert = [False, False, True, False, True, False, True, False, True, True]
for c, inv in zip(df.columns, invert):
if inv:
df.loc[:, c] = 4 - df.loc[:, c]
df.loc[:, c] = df.loc[:, c] * 2.5
score = df.sum(axis=1)
print('###### System Usability Score ######')
print(df.mean(axis=0))
print(score.mean(), score.std())
print('\n\n')
def print_visual_design(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Visual Design ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def print_previous_knowledge(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Previous Knowledge ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def plot_priority_distribution(df: pd.DataFrame, group=False):
def calc_user_group(value: str):
return value.strip().split('.')[0]
x = []
y = []
m = []
for col in df:
y.append(df[col].to_list())
x.append([col] * df.shape[0])
m.append(df[col].index.map(calc_user_group))
x = np.array(x).flatten()
y = 24 - np.array(y).flatten()
m = np.array(m).flatten()
data = pd.DataFrame({'x': x, 'y': y, 'role': m})
mean = data.groupby(by=['x', 'role']).mean().reset_index()
mean = pd.DataFrame({
'Domain Expert': 24 - mean.loc[mean['role'] == 'Domain Expert', 'y'].reset_index(drop=True),
'Data Scientist': 24 - mean.loc[mean['role'] == 'Data Scientist', 'y'].reset_index(drop=True),
'AutoML Researcher': 24 - mean.loc[mean['role'] == 'AutoML Researcher', 'y'].reset_index(drop=True),
'All': 24 - data.groupby('x').mean()['y'].reset_index(drop=True)
})
print('Average card rank')
for _, row in mean.iterrows():
print(f'\\({row[0]:.1f}\\)\t& \\({row[1]:.1f}\\)\t& \\({row[2]:.1f}\\)\t& \\({row[3]:.1f}\\) \\\\')
print('\n\n')
if group:
replacements = {
'#01': ['#02', '#03', '#04'],
'#05': ['#06', '#07', '#08'],
'#09': ['#10', '#11', '#12'],
'#15': ['#16'],
'#19': ['#20'],
# '#22': ['#23', '#24']
}
for key, values in replacements.items():
for value in values:
data.loc[data['x'] == value, 'x'] = key
rename = {
'#01': 'Input Data',
'#05': 'Pre-Proc. Data',
'#09': 'Feat.-Eng. Data',
'#13': 'Complete Pipeline',
'#14': 'Search Space',
'#15': 'Search Strategy',
'#17': 'Perf. Metrics',
'#18': 'Perf. Visual.',
'#19': 'Explanations',
'#21': 'View Hyperparam.',
'#22': 'Comp. Perf.',
'#23': 'Comp. Pipelines',
'#24': 'Comp. Hyperparam.'
}
else:
rename = {
'#01': 'R01 View Input',
'#02': 'R02 Desc Input',
'#03': 'R03 Input Stat',
'#04': 'R04 Plot Input',
'#05': 'R05 View Pre-Proc',
'#06': 'R06 Desc Pre-Proc',
'#07': 'R07 Pre-Proc Stat',
'#08': 'R08 Plot Pre-Proc',
'#09': 'R09 View Feat-Eng',
'#10': 'R10 Feat-Eng Stat',
'#11': 'R11 Plot Feat-Eng',
'#12': 'R12 Desc Feat-Eng',
'#13': 'R13 Complete Pipe',
'#14': 'R14 Search Space',
'#15': 'R15 Pipe Search Strat',
'#16': 'R16 HP Search Strat',
'#17': 'R17 View Perf Metrics',
'#18': 'R18 Plot Perf Visual',
'#19': 'R19 Global Expl',
'#20': 'R20 Local Expl',
'#21': 'R21 View HP',
'#22': 'R22 Comp Perf',
'#23': 'R23 Comp Pipe',
'#24': 'R24 Comp HP'
}
for old, new in rename.items():
data.loc[data['x'] == old, 'x'] = new
data.loc[data['role'] == 'AutoML Researcher', 'role'] = 'Data Scientist'
print('Difference between user groups per card')
for card in data['x'].unique():
ds = data[(data['x'] == card) & (data['role'] == 'Data Scientist')]
de = data[(data['x'] == card) & (data['role'] == 'Domain Expert')]
t = ttest_ind(ds['y'].values, de['y'].values)
if t.pvalue < 0.05:
print(f'{card} {t.pvalue:.5f}')
print('\n\n')
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
fig.tight_layout()
sns.violinplot(data=data, x='x', y='y', hue='role', split=True, palette='pastel', ax=ax)
sns.despine(left=True)
ax.set_ylim(0, 24)
ax.set_yticklabels([])
ax.set_ylabel(None)
ax.set_xlabel(None)
box = ax.get_position()
if group:
plt.xticks(rotation=15)
fig.text(0.0125, 0.2, 'least important', rotation=90, va='bottom')
fig.text(0.0125, 0.95, 'most important', rotation=90, va='top')
ax.set_position([box.x0, box.y0 + box.height * 0.125, box.width, box.height * 0.875])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
else:
plt.xticks(rotation=25, ha='right', rotation_mode='anchor')
fig.text(0.025, 0.225, 'least important', rotation=90, va='bottom')
fig.text(0.025, 0.91, 'most important', rotation=90, va='top')
ax.set_position([box.x0 + 0.015, box.y0 + box.height * 0.15, box.width, box.height * 0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.13), ncol=2)
fig.show()
fig.savefig('requirement_cards.pdf')
def calculate_trust_result(text_df: pd.DataFrame, vis_df: pd.DataFrame):
def cohen_d(x: pd.Series, y: pd.Series):
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (x.mean() - y.mean()) / math.sqrt(((nx - 1) * x.std() ** 2 + (ny - 1) * y.std() ** 2) / dof)
vis_df.columns = text_df.columns
print('###### Trust ######')
for col in text_df:
if col == 'Role':
continue
text = text_df.loc[:, col]
vis = vis_df.loc[:, col]
t = ttest_ind(text.values, vis.values, alternative='less')
print(
f'{col}, \({text.mean() + 1:.2f} \pm {text.std():.2f}\), \({vis.mean() + 1:.2f} \pm {vis.std():.2f}\), \(p = {t.pvalue:.2e}\), \(d = {cohen_d(text, vis):.2f}\)')
text_de, vis_de = text_df[text_df['Role'] == 'domain expert'], vis_df[vis_df['Role'] == 'domain expert']
text_ar, vis_ar = text_df[text_df['Role'] == 'automl researcher'], vis_df[vis_df['Role'] == 'automl researcher']
text_ds, vis_ds = text_df[text_df['Role'] == 'data scientist'], vis_df[vis_df['Role'] == 'data scientist']
for col in text_df:
if col == 'Role':
continue
print(
f'\\({text_de[col].mean() + 1:.2f}\\)\t& \\({text_ds[col].mean() + 1:.2f}\\)\t& \\({text_ar[col].mean() + 1:.2f}\\)\t& \\({text_df[col].mean() + 1:.2f}\\) \\\\')
print(
f'\\({vis_de[col].mean() + 1:.2f}\\)\t& \\({vis_ds[col].mean() + 1:.2f}\\)\t& \\({vis_ar[col].mean() + 1:.2f}\\)\t& \\({vis_df[col].mean() + 1:.2f}\\) \\\\')
print('\n\n')
def calculate_task_success(df: pd.DataFrame):
encoder = LabelEncoder()
encoder.classes_ = np.array(['n', 'y'])
for c in df.columns:
df.loc[:, c] = encoder.transform(df.loc[:, c])
with pd.option_context('display.precision', 0):
print('Task success percentage')
print(df.mean(axis=1) * 100)
print(df.mean().mean() * 100)
print('\n\n')
def index(df: pd.DataFrame, slice_) -> pd.DataFrame:
df2 = df.iloc[:, slice_]
df2['Role'] = df['Role']
return df2
questionnaire, requirements, tasks = load_data()
print_visual_design(index(questionnaire, slice(27, 32)))
print_previous_knowledge(index(questionnaire, slice(6, 11)))
calculate_sus(index(questionnaire, slice(32, 42)))
plot_priority_distribution(requirements)
calculate_task_success(tasks)
calculate_trust_result(index(questionnaire, slice(14, 20)), index(questionnaire, slice(20, 26)))
print('Correlation ML expertise and understanding of ML model')
print(questionnaire.iloc[:, [6, 15]].corr())
|
[
"sklearn.preprocessing.LabelEncoder",
"seaborn.despine",
"matplotlib.pyplot.xticks",
"seaborn.set_theme",
"pandas.option_context",
"numpy.array",
"scipy.stats.ttest_ind",
"seaborn.violinplot",
"pandas.read_excel",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] |
[((227, 256), 'pandas.read_excel', 'pd.read_excel', (['"""XAutoML.xlsx"""'], {}), "('XAutoML.xlsx')\n", (240, 256), True, 'import pandas as pd\n'), ((272, 286), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (284, 286), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((310, 395), 'numpy.array', 'np.array', (["['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree']"], {}), "(['strongly disagree', 'disagree', 'neutral', 'agree',\n 'strongly agree'])\n", (318, 395), True, 'import numpy as np\n'), ((753, 825), 'pandas.read_excel', 'pd.read_excel', (['"""task_results.ods"""'], {'sheet_name': '"""Requirements"""', 'skiprows': '(1)'}), "('task_results.ods', sheet_name='Requirements', skiprows=1)\n", (766, 825), True, 'import pandas as pd\n'), ((998, 1045), 'pandas.read_excel', 'pd.read_excel', (['"""task_results.ods"""'], {'sheet_name': '(0)'}), "('task_results.ods', sheet_name=0)\n", (1011, 1045), True, 'import pandas as pd\n'), ((3063, 3104), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'role': m}"], {}), "({'x': x, 'y': y, 'role': m})\n", (3075, 3104), True, 'import pandas as pd\n'), ((6222, 6254), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (6235, 6254), True, 'import seaborn as sns\n'), ((6269, 6304), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 5)'}), '(1, 1, figsize=(15, 5))\n', (6281, 6304), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6426), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'data', 'x': '"""x"""', 'y': '"""y"""', 'hue': '"""role"""', 'split': '(True)', 'palette': '"""pastel"""', 'ax': 'ax'}), "(data=data, x='x', y='y', hue='role', split=True, palette=\n 'pastel', ax=ax)\n", (6347, 6426), True, 'import seaborn as sns\n'), ((6426, 6448), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)'}), '(left=True)\n', (6437, 6448), True, 'import seaborn as sns\n'), ((9021, 9035), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (9033, 9035), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((9059, 9079), 'numpy.array', 'np.array', (["['n', 'y']"], {}), "(['n', 'y'])\n", (9067, 9079), True, 'import numpy as np\n'), ((6084, 6125), 'scipy.stats.ttest_ind', 'ttest_ind', (["ds['y'].values", "de['y'].values"], {}), "(ds['y'].values, de['y'].values)\n", (6093, 6125), False, 'from scipy.stats import ttest_ind\n'), ((6598, 6621), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(15)'}), '(rotation=15)\n', (6608, 6621), True, 'import matplotlib.pyplot as plt\n'), ((6957, 7016), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(25)', 'ha': '"""right"""', 'rotation_mode': '"""anchor"""'}), "(rotation=25, ha='right', rotation_mode='anchor')\n", (6967, 7016), True, 'import matplotlib.pyplot as plt\n'), ((7912, 7966), 'scipy.stats.ttest_ind', 'ttest_ind', (['text.values', 'vis.values'], {'alternative': '"""less"""'}), "(text.values, vis.values, alternative='less')\n", (7921, 7966), False, 'from scipy.stats import ttest_ind\n'), ((9171, 9212), 'pandas.option_context', 'pd.option_context', (['"""display.precision"""', '(0)'], {}), "('display.precision', 0)\n", (9188, 9212), True, 'import pandas as pd\n'), ((2964, 2975), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2972, 2975), True, 'import numpy as np\n'), ((3029, 3040), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (3037, 3040), True, 'import numpy as np\n'), ((2999, 3010), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3007, 3010), True, 'import numpy as np\n')]
|
"""
*****************
Specifying Colors
*****************
Matplotlib recognizes the following formats to specify a color:
* an RGB or RGBA (red, green, blue, alpha) tuple of float values in closed
interval ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)`` or ``(0.1, 0.2, 0.5, 0.3)``);
* a hex RGB or RGBA string (e.g., ``'#0f0f0f'`` or ``'#0f0f0f80'``;
case-insensitive);
* a shorthand hex RGB or RGBA string, equivalent to the hex RGB or RGBA
string obtained by duplicating each character, (e.g., ``'#abc'``, equivalent
to ``'#aabbcc'``, or ``'#abcd'``, equivalent to ``'#aabbccdd'``;
case-insensitive);
* a string representation of a float value in ``[0, 1]`` inclusive for gray
level (e.g., ``'0.5'``);
* one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``, they are the single
character short-hand notations for blue, green, red, cyan, magenta, yellow,
black, and white.
* a X11/CSS4 color name (case-insensitive);
* a name from the `xkcd color survey`_, prefixed with ``'xkcd:'`` (e.g.,
``'xkcd:sky blue'``; case insensitive);
* one of the Tableau Colors from the 'T10' categorical palette (the default
color cycle): ``{'tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'}``
(case-insensitive);
* a "CN" color spec, i.e. ``'C'`` followed by a number, which is an index into
the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``); the
indexing is intended to occur at rendering time, and defaults to black if the
cycle does not include color.
.. _xkcd color survey: https://xkcd.com/color/rgb/
"Red", "Green", and "Blue" are the intensities of those colors, the combination
of which span the colorspace.
How "Alpha" behaves depends on the ``zorder`` of the Artist. Higher
``zorder`` Artists are drawn on top of lower Artists, and "Alpha" determines
whether the lower artist is covered by the higher.
If the old RGB of a pixel is ``RGBold`` and the RGB of the
pixel of the Artist being added is ``RGBnew`` with Alpha ``alpha``,
then the RGB of the pixel is updated to:
``RGB = RGBOld * (1 - Alpha) + RGBnew * Alpha``. Alpha
of 1 means the old color is completely covered by the new Artist, Alpha of 0
means that pixel of the Artist is transparent.
For more information on colors in matplotlib see
* the :doc:`/gallery/color/color_demo` example;
* the `matplotlib.colors` API;
* the :doc:`/gallery/color/named_colors` example.
"CN" color selection
--------------------
"CN" colors are converted to RGBA as soon as the artist is created. For
example,
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
th = np.linspace(0, 2*np.pi, 128)
def demo(sty):
mpl.style.use(sty)
fig, ax = plt.subplots(figsize=(3, 3))
ax.set_title('style: {!r}'.format(sty), color='C0')
ax.plot(th, np.cos(th), 'C1', label='C1')
ax.plot(th, np.sin(th), 'C2', label='C2')
ax.legend()
demo('default')
demo('seaborn')
###############################################################################
# will use the first color for the title and then plot using the second
# and third colors of each style's ``mpl.rcParams['axes.prop_cycle']``.
#
#
# .. _xkcd-colors:
#
# xkcd v X11/CSS4
# ---------------
#
# The xkcd colors are derived from a user survey conducted by the
# webcomic xkcd. `Details of the survey are available on the xkcd blog
# <https://blog.xkcd.com/2010/05/03/color-survey-results/>`__.
#
# Out of 148 colors in the CSS color list, there are 95 name collisions
# between the X11/CSS4 names and the xkcd names, all but 3 of which have
# different hex values. For example ``'blue'`` maps to ``'#0000FF'``
# where as ``'xkcd:blue'`` maps to ``'#0343DF'``. Due to these name
# collisions all of the xkcd colors have ``'xkcd:'`` prefixed. As noted in
# the blog post, while it might be interesting to re-define the X11/CSS4 names
# based on such a survey, we do not do so unilaterally.
#
# The name collisions are shown in the table below; the color names
# where the hex values agree are shown in bold.
import matplotlib._color_data as mcd
import matplotlib.patches as mpatch
overlap = {name for name in mcd.CSS4_COLORS
if "xkcd:" + name in mcd.XKCD_COLORS}
fig = plt.figure(figsize=[4.8, 16])
ax = fig.add_axes([0, 0, 1, 1])
for j, n in enumerate(sorted(overlap, reverse=True)):
weight = None
cn = mcd.CSS4_COLORS[n]
xkcd = mcd.XKCD_COLORS["xkcd:" + n].upper()
if cn == xkcd:
weight = 'bold'
r1 = mpatch.Rectangle((0, j), 1, 1, color=cn)
r2 = mpatch.Rectangle((1, j), 1, 1, color=xkcd)
txt = ax.text(2, j+.5, ' ' + n, va='center', fontsize=10,
weight=weight)
ax.add_patch(r1)
ax.add_patch(r2)
ax.axhline(j, color='k')
ax.text(.5, j + 1.5, 'X11', ha='center', va='center')
ax.text(1.5, j + 1.5, 'xkcd', ha='center', va='center')
ax.set_xlim(0, 3)
ax.set_ylim(0, j + 2)
ax.axis('off')
|
[
"matplotlib.patches.Rectangle",
"numpy.linspace",
"matplotlib.style.use",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.subplots"
] |
[((2661, 2691), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(128)'], {}), '(0, 2 * np.pi, 128)\n', (2672, 2691), True, 'import numpy as np\n'), ((4251, 4280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[4.8, 16]'}), '(figsize=[4.8, 16])\n', (4261, 4280), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2729), 'matplotlib.style.use', 'mpl.style.use', (['sty'], {}), '(sty)\n', (2724, 2729), True, 'import matplotlib as mpl\n'), ((2744, 2772), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (2756, 2772), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4555), 'matplotlib.patches.Rectangle', 'mpatch.Rectangle', (['(0, j)', '(1)', '(1)'], {'color': 'cn'}), '((0, j), 1, 1, color=cn)\n', (4531, 4555), True, 'import matplotlib.patches as mpatch\n'), ((4565, 4607), 'matplotlib.patches.Rectangle', 'mpatch.Rectangle', (['(1, j)', '(1)', '(1)'], {'color': 'xkcd'}), '((1, j), 1, 1, color=xkcd)\n', (4581, 4607), True, 'import matplotlib.patches as mpatch\n'), ((2847, 2857), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (2853, 2857), True, 'import numpy as np\n'), ((2893, 2903), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (2899, 2903), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""CLI for Chemical Roles exporters."""
import os
import click
from ..constants import DATA
@click.group()
def export():
"""Export the database."""
@export.command(name='all')
@click.pass_context
def export_all(ctx):
"""Export all."""
ctx.invoke(summary)
ctx.invoke(obo)
ctx.invoke(bel)
ctx.invoke(indra)
directory_option = click.option('--directory', default=DATA)
@export.command()
def summary():
"""Rewrite readme and generate new export."""
from .build import rewrite_repo_readme, write_export
import seaborn as sns
sns.set(font_scale=1.3, style='whitegrid')
rewrite_repo_readme()
write_export()
@export.command()
@directory_option
def bel(directory):
"""Write BEL export."""
import pybel
from .bel import get_bel
graph = get_bel()
pybel.dump(graph, os.path.join(directory, 'crog.bel.nodelink.json.gz'))
@export.command()
@directory_option
def indra(directory):
"""Write INDRA export."""
import pybel
from .bel import get_bel
graph = get_bel(use_inferred=False, add_evidence=False)
pybel.to_indra_statements_json_file(graph, os.path.join(directory, 'crog.indra.json'), sort_keys=True)
@export.command()
@directory_option
def obo(directory):
"""Write OBO export."""
from .obo import get_obo
o = get_obo()
o.write_obo(os.path.join(directory, 'crog.obo'))
o.write_obonet_gz(os.path.join(directory, 'crog.obonet.json.gz'))
if __name__ == '__main__':
export()
|
[
"click.group",
"click.option",
"os.path.join",
"seaborn.set"
] |
[((123, 136), 'click.group', 'click.group', ([], {}), '()\n', (134, 136), False, 'import click\n'), ((382, 423), 'click.option', 'click.option', (['"""--directory"""'], {'default': 'DATA'}), "('--directory', default=DATA)\n", (394, 423), False, 'import click\n'), ((596, 638), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.3)', 'style': '"""whitegrid"""'}), "(font_scale=1.3, style='whitegrid')\n", (603, 638), True, 'import seaborn as sns\n'), ((860, 912), 'os.path.join', 'os.path.join', (['directory', '"""crog.bel.nodelink.json.gz"""'], {}), "(directory, 'crog.bel.nodelink.json.gz')\n", (872, 912), False, 'import os\n'), ((1157, 1199), 'os.path.join', 'os.path.join', (['directory', '"""crog.indra.json"""'], {}), "(directory, 'crog.indra.json')\n", (1169, 1199), False, 'import os\n'), ((1366, 1401), 'os.path.join', 'os.path.join', (['directory', '"""crog.obo"""'], {}), "(directory, 'crog.obo')\n", (1378, 1401), False, 'import os\n'), ((1425, 1471), 'os.path.join', 'os.path.join', (['directory', '"""crog.obonet.json.gz"""'], {}), "(directory, 'crog.obonet.json.gz')\n", (1437, 1471), False, 'import os\n')]
|
# Generated by Django 2.0.4 on 2018-04-17 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180417_1613'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.IntegerField(verbose_name='codigo')),
('descricao', models.CharField(max_length=255, verbose_name='descricao')),
('valor', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='valor')),
('unitario', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Unitário')),
('quantidade', models.IntegerField(verbose_name='quantidade')),
],
options={
'verbose_name': 'Item',
'verbose_name_plural': 'Itens',
'ordering': ['codigo'],
},
),
migrations.AlterModelOptions(
name='cliente',
options={'ordering': ['nome'], 'verbose_name': 'Cliente', 'verbose_name_plural': 'Clientes'},
),
migrations.AlterModelOptions(
name='endereco',
options={'ordering': ['tipo'], 'verbose_name': 'Endereço', 'verbose_name_plural': 'Endereços'},
),
migrations.AlterModelOptions(
name='pedido',
options={'ordering': ['numero'], 'verbose_name': 'Pedido', 'verbose_name_plural': 'Pedidos'},
),
]
|
[
"django.db.models.IntegerField",
"django.db.migrations.AlterModelOptions",
"django.db.models.AutoField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] |
[((1071, 1213), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""cliente"""', 'options': "{'ordering': ['nome'], 'verbose_name': 'Cliente', 'verbose_name_plural':\n 'Clientes'}"}), "(name='cliente', options={'ordering': ['nome'],\n 'verbose_name': 'Cliente', 'verbose_name_plural': 'Clientes'})\n", (1099, 1213), False, 'from django.db import migrations, models\n'), ((1254, 1399), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""endereco"""', 'options': "{'ordering': ['tipo'], 'verbose_name': 'Endereço', 'verbose_name_plural':\n 'Endereços'}"}), "(name='endereco', options={'ordering': ['tipo'],\n 'verbose_name': 'Endereço', 'verbose_name_plural': 'Endereços'})\n", (1282, 1399), False, 'from django.db import migrations, models\n'), ((1440, 1581), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""pedido"""', 'options': "{'ordering': ['numero'], 'verbose_name': 'Pedido', 'verbose_name_plural':\n 'Pedidos'}"}), "(name='pedido', options={'ordering': ['numero'],\n 'verbose_name': 'Pedido', 'verbose_name_plural': 'Pedidos'})\n", (1468, 1581), False, 'from django.db import migrations, models\n'), ((325, 418), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (341, 418), False, 'from django.db import migrations, models\n'), ((444, 486), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""codigo"""'}), "(verbose_name='codigo')\n", (463, 486), False, 'from django.db import migrations, models\n'), ((519, 577), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""descricao"""'}), "(max_length=255, verbose_name='descricao')\n", (535, 577), False, 'from django.db import migrations, models\n'), ((606, 680), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(10)', 'verbose_name': '"""valor"""'}), "(decimal_places=2, max_digits=10, verbose_name='valor')\n", (625, 680), False, 'from django.db import migrations, models\n'), ((712, 789), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(10)', 'verbose_name': '"""Unitário"""'}), "(decimal_places=2, max_digits=10, verbose_name='Unitário')\n", (731, 789), False, 'from django.db import migrations, models\n'), ((823, 869), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""quantidade"""'}), "(verbose_name='quantidade')\n", (842, 869), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Taskmaster-2 implementation for ParlAI.
No official train/valid/test splits are available as of 2020-05-18, so we make our own
splits.
"""
import os
import pandas as pd
import hashlib
from collections import Counter
from parlai.core.opt import Opt
from parlai.core.teachers import DialogTeacher
from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric
from parlai.utils.misc import warn_once
import json
import parlai.utils.logging as logging
from typing import Optional, Tuple
from parlai.core.message import Message
from parlai.utils.io import PathManager
import parlai.tasks.taskmaster2.build as build_
DOMAINS = [
'flights',
'food-ordering',
'hotels',
'movies',
'restaurant-search',
'sports',
'music',
]
ONTO_TOKEN = "Onto:"
CALL_TOKEN = "Call:"
RESP_TOKEN = "Result:"
class _Abstract(DialogTeacher):
"""
Abstract data loader.
"""
@classmethod
def add_cmdline_args(cls, argparser):
argparser.add_argument('--include-ontology', type=bool, default=False)
argparser.add_argument(
'--domains',
nargs='+',
default=DOMAINS,
choices=DOMAINS,
help='Uses last passed in configuration.',
)
return argparser
def __init__(self, opt: Opt, shared=None):
self.fold = opt['datatype'].split(':')[0]
opt['datafile'] = self.fold
self.dpath = os.path.join(opt['datapath'], 'taskmaster-2')
if shared is None:
warn_once(
"Taskmaster2 is a beta dataset, and format may significantly change."
)
build_.build(opt)
super().__init__(opt, shared)
def _h(self, x):
"""
Hash function.
"""
h = int(hashlib.sha1(x.encode('utf-8')).hexdigest(), 16) % 10
if h == 0:
return 'valid'
elif h == 1:
return 'test'
else:
return 'train'
def _normalize_annotation(self, anno):
return anno
def _load_data(self, fold, domains):
# load up the ontology
ontology = {}
for section in domains:
parts = []
fn = os.path.join(self.dpath, section + '.onto.json')
with PathManager.open(fn, 'r') as f:
o = json.load(f)
assert len(o) == 1
o = list(o.values())[0]
for sub in o:
prefix = sub['prefix']
parts += [
self._normalize_annotation(f'{prefix}.{a}')
for a in sub['annotations']
]
ontology[section] = ' ; '.join(parts)
chunks = []
for section in domains:
with PathManager.open(os.path.join(self.dpath, section + '.json')) as f:
subset = pd.read_json(f)
subset['domain'] = section
chunks.append(subset)
chunks = pd.concat(chunks, axis=0)
# shuffle deterministically for randomness in few-shot training
chunks = chunks.sample(frac=1.0, random_state=42)
chunks['fold'] = self._label_fold(chunks)
# only the fold we need here
chunks = chunks[chunks.fold == fold].reset_index()
chunks['ontology'] = chunks['domain'].apply(ontology.get)
return chunks
def _segments2text(self, segments):
output = []
slots = {}
for segment in segments:
val = segment['text']
for anno_ in segment['annotations']:
anno = anno_['name']
anno = self._normalize_annotation(anno)
output.append(f'{anno} = {val}')
slots[anno] = val
return " ; ".join(output), slots
def custom_evaluation(
self,
teacher_action: Message,
labels: Optional[Tuple[str]],
model_response: Message,
):
if 'metrics' in model_response and 'type' in teacher_action:
# keep copies of metrics across both api calls/responses
prefix = teacher_action['type']
keys = list(model_response['metrics'].keys())
for k in keys:
self.metrics.add(f'{prefix}_{k}', model_response['metrics'][k])
if 'text' not in model_response or not labels or 'type' not in teacher_action:
return
domain = teacher_action['domain']
if teacher_action['type'] == 'apicall':
# also count slot accuracy
text = model_response['text']
slot_guesses = set(
text.replace(CALL_TOKEN + " ", "").split(' ; ')
) # prevent cheating via repeated guesses
correct = 0
for slot_guess in slot_guesses:
if ' = ' not in slot_guess:
continue
try:
slot, guess = slot_guess.split(' = ')
except ValueError:
continue
if teacher_action['slots'].get(slot) == guess:
self.metrics.add('slot_p', AverageMetric(1))
self.metrics.add(f'{domain}_slot_p', AverageMetric(1))
correct += 1
else:
self.metrics.add('slot_p', AverageMetric(0))
self.metrics.add(f'{domain}_slot_p', AverageMetric(0))
logging.debug(
f"Bad slot guess '{slot_guess}' != {teacher_action['slots']}"
)
if teacher_action['slots']:
self.metrics.add(
'slot_r', AverageMetric(correct, len(teacher_action['slots']))
)
self.metrics.add(
f'{domain}_slot_r',
AverageMetric(correct, len(teacher_action['slots'])),
)
self.metrics.add(
'jga', AverageMetric(correct == len(teacher_action['slots']))
)
elif teacher_action['type'] == 'apiresp':
# keep track of statistics by domain
f1_metric = F1Metric.compute(model_response['text'], labels)
bleu_metric = BleuMetric.compute(model_response['text'], labels)
self.metrics.add(f'{domain}_lex_f1', f1_metric)
self.metrics.add(f'{domain}_lex_bleu', bleu_metric)
delex_text = model_response['text']
delex_label = labels[0]
# compute delexicalized string metrics
for slot, value in teacher_action['slots'].items():
delex_text = delex_text.replace(value, slot)
delex_label = delex_label.replace(value, slot)
f1_metric = F1Metric.compute(delex_text, (delex_label,))
self.metrics.add('delex_f1', f1_metric)
self.metrics.add(f'{domain}_delex_f1', f1_metric)
bleu_metric = BleuMetric.compute(delex_text, [delex_label])
self.metrics.add('delex_bleu', bleu_metric)
self.metrics.add(f'{domain}_delex_bleu', bleu_metric)
def setup_data(self, fold):
domains = self.opt.get('domains', DOMAINS)
chunks = self._load_data(fold, domains)
domains_cnt = Counter()
for _, row in chunks.iterrows():
domains_cnt[row['domain']] += 1
first = True
utterances = row['utterances'][:]
if (
len(utterances) >= 3
and utterances[0]['speaker'] == 'USER'
and utterances[1]['speaker'] == 'ASSISTANT'
and utterances[2]['speaker'] == 'ASSISTANT'
and "help you?" in utterances[1]['text']
):
# skip this one
utterances.pop(1)
if self.opt['include_ontology']:
yield {'text': f"{ONTO_TOKEN} {row['ontology']}", 'label': ''}, True
first = False
while utterances:
utt = utterances.pop(0)
segtxt, slots = self._segments2text(utt.get('segments', []))
if utt['speaker'] == 'USER':
yield {
'text': utt['text'],
'label': f'{CALL_TOKEN} {segtxt}',
'domain': row['domain'],
'slots': slots,
'type': 'apicall',
}, first
first = False
elif utt['speaker'] == 'ASSISTANT':
yield {
'text': f'{RESP_TOKEN} {segtxt}',
'label': utt['text'],
'domain': row['domain'],
'slots': slots,
'type': 'apiresp',
}, first
first = False
logging.debug(f"Fold {fold} domains: {domains_cnt}")
class DelexTeacher(_Abstract):
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
def _delexicalize(self, text, slots):
for key, value in slots.items():
text = text.replace(value, key)
return text
def setup_data(self, fold):
domains_cnt = Counter()
chunks = self._load_data(fold)
for _, row in chunks.iterrows():
domains_cnt[row['domain']] += 1
first = True
utterances = row['utterances'][:]
if (
len(utterances) >= 3
and utterances[0]['speaker'] == 'USER'
and utterances[1]['speaker'] == 'ASSISTANT'
and utterances[2]['speaker'] == 'ASSISTANT'
and "help you?" in utterances[1]['text']
):
# skip this one
utterances.pop(1)
user_utterances = []
asst_utterances = []
while utterances:
utt = utterances.pop(0)
_, slots = self._segments2text(utt.get('segments', []))
if utt['speaker'] == 'USER':
if asst_utterances:
yield {
'text': ' __BREAK__ '.join(user_utterances),
'label': ' __BREAK__ '.join(asst_utterances),
'domain': row['domain'],
}, first
first = False
user_utterances = []
asst_utterances = []
user_utterances.append(self._delexicalize(utt['text'], slots))
elif utt['speaker'] == 'ASSISTANT':
asst_utterances.append(self._delexicalize(utt['text'], slots))
if not user_utterances:
user_utterances.append('__SILENCE__')
if asst_utterances:
yield {
'text': ' __BREAK__ '.join(user_utterances),
'label': ' __BREAK__ '.join(asst_utterances),
'domain': row['domain'],
}, first
class TextOnlyTeacher(DelexTeacher):
def _delexicalize(self, text, slots):
return text
class FullShotTeacher(_Abstract):
"""
The full shot teacher uses a standard 80-10-10 split, without regarding domain.
"""
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
class FewShotTeacher(_Abstract):
"""
Few shot teacher tests for generalization to new domains.
"""
@classmethod
def add_cmdline_args(cls, argparser):
argparser.add_argument(
'--holdout',
default=DOMAINS[0],
choices=DOMAINS,
help='Domain which is held out from test',
)
argparser.add_argument(
'--n-shot',
default=100,
type=int,
help='Number of few shot examples to provide in training fold.',
)
return super().add_cmdline_args(argparser)
def _label_fold(self, chunks):
folds = []
num_shots = 0
for _, row in chunks.iterrows():
if row['domain'] != self.opt['holdout']:
# if it's not in the holdout, always mark it train
folds.append('train')
else:
# keep the same valid/test sets as in fullshot, and only leak
# a small number of the training examples (i.e. throw away the
# vast majority of our data but keep test sets the same)
f = self._h(row['conversation_id'])
if f != 'train':
folds.append(f)
elif num_shots < self.opt['n_shot']:
folds.append('train')
num_shots += 1
else:
folds.append('throwaway')
return folds
class DefaultTeacher(FullShotTeacher):
pass
|
[
"parlai.core.metrics.F1Metric.compute",
"parlai.core.metrics.BleuMetric.compute",
"os.path.join",
"collections.Counter",
"parlai.utils.logging.debug",
"parlai.utils.misc.warn_once",
"parlai.core.metrics.AverageMetric",
"parlai.utils.io.PathManager.open",
"json.load",
"parlai.tasks.taskmaster2.build.build",
"pandas.concat",
"pandas.read_json"
] |
[((1619, 1664), 'os.path.join', 'os.path.join', (["opt['datapath']", '"""taskmaster-2"""'], {}), "(opt['datapath'], 'taskmaster-2')\n", (1631, 1664), False, 'import os\n'), ((3126, 3151), 'pandas.concat', 'pd.concat', (['chunks'], {'axis': '(0)'}), '(chunks, axis=0)\n', (3135, 3151), True, 'import pandas as pd\n'), ((7389, 7398), 'collections.Counter', 'Counter', ([], {}), '()\n', (7396, 7398), False, 'from collections import Counter\n'), ((8988, 9040), 'parlai.utils.logging.debug', 'logging.debug', (['f"""Fold {fold} domains: {domains_cnt}"""'], {}), "(f'Fold {fold} domains: {domains_cnt}')\n", (9001, 9040), True, 'import parlai.utils.logging as logging\n'), ((9365, 9374), 'collections.Counter', 'Counter', ([], {}), '()\n', (9372, 9374), False, 'from collections import Counter\n'), ((1704, 1789), 'parlai.utils.misc.warn_once', 'warn_once', (['"""Taskmaster2 is a beta dataset, and format may significantly change."""'], {}), "('Taskmaster2 is a beta dataset, and format may significantly change.'\n )\n", (1713, 1789), False, 'from parlai.utils.misc import warn_once\n'), ((1827, 1844), 'parlai.tasks.taskmaster2.build.build', 'build_.build', (['opt'], {}), '(opt)\n', (1839, 1844), True, 'import parlai.tasks.taskmaster2.build as build_\n'), ((2387, 2435), 'os.path.join', 'os.path.join', (['self.dpath', "(section + '.onto.json')"], {}), "(self.dpath, section + '.onto.json')\n", (2399, 2435), False, 'import os\n'), ((2453, 2478), 'parlai.utils.io.PathManager.open', 'PathManager.open', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (2469, 2478), False, 'from parlai.utils.io import PathManager\n'), ((2505, 2517), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2514, 2517), False, 'import json\n'), ((3020, 3035), 'pandas.read_json', 'pd.read_json', (['f'], {}), '(f)\n', (3032, 3035), True, 'import pandas as pd\n'), ((6284, 6332), 'parlai.core.metrics.F1Metric.compute', 'F1Metric.compute', (["model_response['text']", 'labels'], {}), "(model_response['text'], labels)\n", (6300, 6332), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((6359, 6409), 'parlai.core.metrics.BleuMetric.compute', 'BleuMetric.compute', (["model_response['text']", 'labels'], {}), "(model_response['text'], labels)\n", (6377, 6409), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((6882, 6926), 'parlai.core.metrics.F1Metric.compute', 'F1Metric.compute', (['delex_text', '(delex_label,)'], {}), '(delex_text, (delex_label,))\n', (6898, 6926), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((7067, 7112), 'parlai.core.metrics.BleuMetric.compute', 'BleuMetric.compute', (['delex_text', '[delex_label]'], {}), '(delex_text, [delex_label])\n', (7085, 7112), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((2944, 2987), 'os.path.join', 'os.path.join', (['self.dpath', "(section + '.json')"], {}), "(self.dpath, section + '.json')\n", (2956, 2987), False, 'import os\n'), ((5562, 5638), 'parlai.utils.logging.debug', 'logging.debug', (['f"""Bad slot guess \'{slot_guess}\' != {teacher_action[\'slots\']}"""'], {}), '(f"Bad slot guess \'{slot_guess}\' != {teacher_action[\'slots\']}")\n', (5575, 5638), True, 'import parlai.utils.logging as logging\n'), ((5254, 5270), 'parlai.core.metrics.AverageMetric', 'AverageMetric', (['(1)'], {}), '(1)\n', (5267, 5270), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((5329, 5345), 'parlai.core.metrics.AverageMetric', 'AverageMetric', (['(1)'], {}), '(1)\n', (5342, 5345), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((5449, 5465), 'parlai.core.metrics.AverageMetric', 'AverageMetric', (['(0)'], {}), '(0)\n', (5462, 5465), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n'), ((5524, 5540), 'parlai.core.metrics.AverageMetric', 'AverageMetric', (['(0)'], {}), '(0)\n', (5537, 5540), False, 'from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric\n')]
|
import numpy as np
nparr = np.array([i for i in range(10)])
a = np.zeros(10)
f = np.zeros(10,dtype=float)
n = np.full((3,5),44)
r = np.random.randint(0,100,size=(3,5))
r2 = np.random.random((3,5))
x = np.linspace(0,100,50)
print(nparr,a,f,n,r,r2,x)
|
[
"numpy.random.random",
"numpy.zeros",
"numpy.linspace",
"numpy.random.randint",
"numpy.full"
] |
[((66, 78), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (74, 78), True, 'import numpy as np\n'), ((83, 108), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'float'}), '(10, dtype=float)\n', (91, 108), True, 'import numpy as np\n'), ((112, 131), 'numpy.full', 'np.full', (['(3, 5)', '(44)'], {}), '((3, 5), 44)\n', (119, 131), True, 'import numpy as np\n'), ((134, 172), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(3, 5)'}), '(0, 100, size=(3, 5))\n', (151, 172), True, 'import numpy as np\n'), ((175, 199), 'numpy.random.random', 'np.random.random', (['(3, 5)'], {}), '((3, 5))\n', (191, 199), True, 'import numpy as np\n'), ((203, 226), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(50)'], {}), '(0, 100, 50)\n', (214, 226), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import abc
import math
from collections import defaultdict, deque
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
cast,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
import torch.nn as nn
from torchmetrics import Metric
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import (
compose_metric_key,
MetricNameBase,
MetricNamespaceBase,
MetricPrefix,
)
RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]]
@dataclass(frozen=True)
class MetricComputationReport:
name: MetricNameBase
metric_prefix: MetricPrefix
value: torch.Tensor
DefaultValueT = TypeVar("DefaultValueT")
ComputeIterType = Iterator[
Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix]
]
MAX_BUFFER_COUNT = 1000
class RecMetricException(Exception):
pass
class WindowBuffer:
def __init__(self, max_size: int, max_buffer_count: int) -> None:
self._max_size: int = max_size
self._max_buffer_count: int = max_buffer_count
self._buffers: Deque[torch.Tensor] = deque(maxlen=max_buffer_count)
self._used_sizes: Deque[int] = deque(maxlen=max_buffer_count)
self._window_used_size = 0
def aggregate_state(
self, window_state: torch.Tensor, curr_state: torch.Tensor, size: int
) -> None:
def remove(window_state: torch.Tensor) -> None:
window_state -= self._buffers.popleft()
self._window_used_size -= self._used_sizes.popleft()
if len(self._buffers) == self._buffers.maxlen:
remove(window_state)
self._buffers.append(curr_state)
self._used_sizes.append(size)
window_state += curr_state
self._window_used_size += size
while self._window_used_size > self._max_size:
remove(window_state)
@property
def buffers(self) -> Deque[torch.Tensor]:
return self._buffers
class RecMetricComputation(Metric, abc.ABC):
r"""The internal computation class template.
A metric implementation should overwrite update() and compute(). These two
APIs focuses the actual mathematical meaning of the metric, without the
detail knowledge of model output and task information.
Args:
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
n_tasks (int): the number tasks this communication obj
will have to compute.
window_size (int): the window size for the window metric.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consum metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
"""
_batch_window_buffers: Optional[Dict[str, WindowBuffer]]
def __init__(
self,
my_rank: int,
batch_size: int,
n_tasks: int,
window_size: int,
compute_on_all_ranks: bool = False,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
process_group: Optional[dist.ProcessGroup] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(process_group=process_group, *args, **kwargs)
self._my_rank = my_rank
self._n_tasks = n_tasks
self._batch_size = batch_size
self._window_size = window_size
self._compute_on_all_ranks = compute_on_all_ranks
if self._window_size > 0:
self._batch_window_buffers = {}
else:
self._batch_window_buffers = None
self._add_state(
"has_valid_update",
torch.zeros(self._n_tasks, dtype=torch.uint8),
add_window_state=False,
dist_reduce_fx=lambda x: torch.any(x, dim=0).byte(),
persistent=True,
)
@staticmethod
def get_window_state_name(state_name: str) -> str:
return f"window_{state_name}"
def get_window_state(self, state_name: str) -> torch.Tensor:
return getattr(self, self.get_window_state_name(state_name))
def _add_state(
self, name: str, default: DefaultValueT, add_window_state: bool, **kwargs: Any
) -> None:
# pyre-fixme[6]: Expected `Union[List[typing.Any], torch.Tensor]` for 2nd
# param but got `DefaultValueT`.
super().add_state(name, default, **kwargs)
if add_window_state:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
kwargs["persistent"] = False
window_state_name = self.get_window_state_name(name)
# Avoid pyre error
assert isinstance(default, torch.Tensor)
super().add_state(window_state_name, default.detach().clone(), **kwargs)
self._batch_window_buffers[window_state_name] = WindowBuffer(
max_size=self._window_size,
max_buffer_count=MAX_BUFFER_COUNT,
)
def _aggregate_window_state(
self, state_name: str, state: torch.Tensor, num_samples: int
) -> None:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
window_state_name = self.get_window_state_name(state_name)
assert self._batch_window_buffers is not None
self._batch_window_buffers[window_state_name].aggregate_state(
getattr(self, window_state_name), curr_state=state, size=num_samples
)
@abc.abstractmethod
# pyre-fixme[14]: `update` overrides method defined in `Metric` inconsistently.
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor],
) -> None: # pragma: no cover
pass
@abc.abstractmethod
def _compute(self) -> List[MetricComputationReport]: # pragma: no cover
pass
def pre_compute(self) -> None:
r"""If a metric need to do some work before `compute()`, the metric
has to override this `pre_compute()`. One possible usage is to do
some pre-processing of the local state before `compute()` as TorchMetric
wraps `RecMetricComputation.compute()` and will do the global aggregation
before `RecMetricComputation.compute()` is called.
"""
return
def compute(self) -> List[MetricComputationReport]:
if self._my_rank == 0 or self._compute_on_all_ranks:
return self._compute()
else:
return []
def local_compute(self) -> List[MetricComputationReport]:
return self._compute()
class RecMetric(nn.Module, abc.ABC):
r"""The main class template to implement a recommendation metric.
This class contains the recommendation tasks information (RecTaskInfo) and
the actual computation object (RecMetricComputation). RecMetric processes
all the information related to RecTaskInfo and models and pass the required
signals to the computation object, allowing the implementation of
RecMetricComputation to focus on the mathemetical meaning.
A new metric that inherit RecMetric must override the following attributes
in its own __init__(): `_namespace` and `_metrics_computations`. No other
methods should be overridden.
Args:
world_size (int): the number of trainers.
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
tasks (List[RecTaskInfo]): the information of the model tasks.
compute_mode (RecComputeMode): the computation mode. See RecComputeMode.
window_size (int): the window size for the window metric.
fused_update_limit (int): the maximum number of updates to be fused.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consume global metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo,
)
"""
_computation_class: Type[RecMetricComputation]
_namespace: MetricNamespaceBase
_metrics_computations: nn.ModuleList
_tasks: List[RecTaskInfo]
_window_size: int
_tasks_iter: Callable[[str], ComputeIterType]
_update_buffers: Dict[str, List[RecModelOutput]]
_default_weights: Dict[Tuple[int, ...], torch.Tensor]
PREDICTIONS: str = "predictions"
LABELS: str = "labels"
WEIGHTS: str = "weights"
def __init__(
self,
world_size: int,
my_rank: int,
batch_size: int,
tasks: List[RecTaskInfo],
compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION,
window_size: int = 100,
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
process_group: Optional[dist.ProcessGroup] = None,
**kwargs: Any,
) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.Metric or
# TorchMetrics.MetricCollection.
if (
compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION
and fused_update_limit > 0
):
raise ValueError(
"The fused tasks computation and the fused update cannot be set at the same time"
)
super().__init__()
self._world_size = world_size
self._my_rank = my_rank
self._window_size = math.ceil(window_size / world_size)
self._batch_size = batch_size
self._tasks = tasks
self._compute_mode = compute_mode
self._fused_update_limit = fused_update_limit
self._default_weights = {}
self._update_buffers = {
self.PREDICTIONS: [],
self.LABELS: [],
self.WEIGHTS: [],
}
if compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
n_metrics = 1
task_per_metric = len(self._tasks)
self._tasks_iter = self._fused_tasks_iter
else:
n_metrics = len(self._tasks)
task_per_metric = 1
self._tasks_iter = self._unfused_tasks_iter
self._metrics_computations: nn.ModuleList = nn.ModuleList(
[
# This Pyre error seems to be Pyre's bug as it can be inferred by mypy
# according to https://github.com/python/mypy/issues/3048.
# pyre-fixme[45]: Cannot instantiate abstract class `RecMetricCoputation`.
self._computation_class(
my_rank,
batch_size,
task_per_metric,
self._window_size,
compute_on_all_ranks,
process_group,
**kwargs,
)
for _ in range(n_metrics)
]
)
# TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the
# compute_scope str input with an enum
def _fused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
assert len(self._metrics_computations) == 1
self._metrics_computations[0].pre_compute()
for metric_report in getattr(
self._metrics_computations[0], compute_scope + "compute"
)():
for task, metric_value, has_valid_update in zip(
self._tasks,
metric_report.value,
self._metrics_computations[0].has_valid_update,
):
# The attribute has_valid_update is a tensor whose length equals to the
# number of tasks. Each value in it is corresponding to whether a task
# has valid updates or not.
# If for a task there's no valid updates, the calculated metric_value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_value
if has_valid_update > 0
else torch.zeros_like(metric_value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _unfused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
for task, metric_computation in zip(self._tasks, self._metrics_computations):
metric_computation.pre_compute()
for metric_report in getattr(
metric_computation, compute_scope + "compute"
)():
# The attribute has_valid_update is a tensor with only 1 value
# corresponding to whether the task has valid updates or not.
# If there's no valid update, the calculated metric_report.value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_report.value
if metric_computation.has_valid_update[0] > 0
else torch.zeros_like(metric_report.value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]:
def fuse(outputs: List[RecModelOutput]) -> RecModelOutput:
assert len(outputs) > 0
if isinstance(outputs[0], torch.Tensor):
return torch.cat(cast(List[torch.Tensor], outputs))
else:
task_outputs: Dict[str, List[torch.Tensor]] = defaultdict(list)
for output in outputs:
assert isinstance(output, dict)
for task_name, tensor in output.items():
task_outputs[task_name].append(tensor)
return {
name: torch.cat(tensors) for name, tensors in task_outputs.items()
}
ret: Dict[str, RecModelOutput] = {}
for key, output_list in self._update_buffers.items():
if len(output_list) > 0:
ret[key] = fuse(output_list)
else:
assert key == self.WEIGHTS
output_list.clear()
return ret
def _check_fused_update(self, force: bool) -> None:
if self._fused_update_limit <= 0:
return
if len(self._update_buffers[self.PREDICTIONS]) == 0:
return
if (
not force
and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit
):
return
fused_arguments = self._fuse_update_buffers()
self._update(
predictions=fused_arguments[self.PREDICTIONS],
labels=fused_arguments[self.LABELS],
weights=fused_arguments.get(self.WEIGHTS, None),
)
def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor:
weights = self._default_weights.get(predictions.size(), None)
if weights is None:
weights = torch.ones_like(predictions)
self._default_weights[predictions.size()] = weights
return weights
def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor:
return torch.gt(torch.count_nonzero(weights, dim=-1), 0)
def _update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
with torch.no_grad():
if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
assert isinstance(predictions, torch.Tensor)
# Reshape the predictions to size([len(self._tasks), self._batch_size])
predictions = predictions.view(-1, self._batch_size)
assert isinstance(labels, torch.Tensor)
labels = labels.view(-1, self._batch_size)
if weights is None:
weights = self._create_default_weights(predictions)
else:
assert isinstance(weights, torch.Tensor)
weights = weights.view(-1, self._batch_size)
# has_valid_weights is a tensor of bool whose length equals to the number
# of tasks. Each value in it is corresponding to whether the weights
# are valid, i.e. are set to non-zero values for that task in this update.
# If has_valid_weights are Falses for all the tasks, we just ignore this
# update.
has_valid_weights = self._check_nonempty_weights(weights)
if torch.any(has_valid_weights):
self._metrics_computations[0].update(
predictions=predictions, labels=labels, weights=weights
)
self._metrics_computations[0].has_valid_update.logical_or_(
has_valid_weights
).byte()
else:
for task, metric_ in zip(self._tasks, self._metrics_computations):
if task.name not in predictions:
continue
if torch.numel(predictions[task.name]) == 0:
assert torch.numel(labels[task.name]) == 0
assert weights is None or torch.numel(weights[task.name]) == 0
continue
# Reshape the predictions to size([1, self._batch_size])
task_predictions = predictions[task.name].view(1, -1)
task_labels = labels[task.name].view(1, -1)
if weights is None:
task_weights = self._create_default_weights(task_predictions)
else:
task_weights = weights[task.name].view(1, -1)
# has_valid_weights is a tensor with only 1 value corresponding to
# whether the weights are valid, i.e. are set to non-zero values for
# the task in this update.
# If has_valid_update[0] is False, we just ignore this update.
has_valid_weights = self._check_nonempty_weights(task_weights)
if has_valid_weights[0]:
metric_.update(
predictions=task_predictions,
labels=task_labels,
weights=task_weights,
)
metric_.has_valid_update.logical_or_(has_valid_weights).byte()
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
if self._fused_update_limit > 0:
self._update_buffers[self.PREDICTIONS].append(predictions)
self._update_buffers[self.LABELS].append(labels)
if weights is not None:
self._update_buffers[self.WEIGHTS].append(weights)
self._check_fused_update(force=False)
else:
self._update(predictions=predictions, labels=labels, weights=weights)
# The implementation of compute is very similar to local_compute, but compute overwrites
# the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute
def compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter(""):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter("local_"):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def sync(self) -> None:
for computation in self._metrics_computations:
computation.sync()
def unsync(self) -> None:
for computation in self._metrics_computations:
if computation._is_synced:
computation.unsync()
def reset(self) -> None:
for computation in self._metrics_computations:
computation.reset()
def get_memory_usage(self) -> Dict[torch.Tensor, int]:
r"""Estimates the memory of the rec metric instance's
underlying tensors; returns the map of tensor to size
"""
tensor_map = {}
attributes_q = deque(self.__dict__.values())
while attributes_q:
attribute = attributes_q.popleft()
if isinstance(attribute, torch.Tensor):
tensor_map[attribute] = (
attribute.size().numel() * attribute.element_size()
)
elif isinstance(attribute, WindowBuffer):
attributes_q.extend(attribute.buffers)
elif isinstance(attribute, Mapping):
attributes_q.extend(attribute.values())
elif isinstance(attribute, Sequence) and not isinstance(attribute, str):
attributes_q.extend(attribute)
elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum):
attributes_q.extend(attribute.__dict__.values())
return tensor_map
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, torch.Tensor]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, torch.Tensor]:
# We need to flush the cached output to ensure checkpointing correctness.
self._check_fused_update(force=True)
destination = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
return self._metrics_computations.state_dict(
destination=destination,
prefix=f"{prefix}_metrics_computations.",
keep_vars=keep_vars,
)
class RecMetricList(nn.Module):
"""
A list module to encapulate multiple RecMetric instances and provide the
same interfaces as RecMetric.
Args:
rec_metrics (List[RecMetric]: the list of the input RecMetrics.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo
)
metrics = RecMetricList([ne])
"""
rec_metrics: nn.ModuleList
def __init__(self, rec_metrics: List[RecMetric]) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.MetricCollection.
# The prequsite to use MetricCollection is that RecMetric inherits from
# TorchMetrics.Metric or TorchMetrics.MetricCollection
super().__init__()
self.rec_metrics = nn.ModuleList(rec_metrics)
def __len__(self) -> int:
return len(self.rec_metrics)
def __getitem__(self, idx: int) -> nn.Module:
return self.rec_metrics[idx]
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: RecModelOutput,
) -> None:
for metric in self.rec_metrics:
metric.update(predictions=predictions, labels=labels, weights=weights)
def compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.compute())
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.local_compute())
return ret
def sync(self) -> None:
for metric in self.rec_metrics:
metric.sync()
def unsync(self) -> None:
for metric in self.rec_metrics:
metric.unsync()
def reset(self) -> None:
for metric in self.rec_metrics:
metric.reset()
|
[
"torch.ones_like",
"torch.any",
"math.ceil",
"collections.deque",
"torch.nn.ModuleList",
"torchrec.metrics.metrics_namespace.compose_metric_key",
"dataclasses.dataclass",
"torch.zeros_like",
"typing.cast",
"torch.numel",
"collections.defaultdict",
"torch.no_grad",
"torch.count_nonzero",
"torch.zeros",
"torch.cat",
"typing.TypeVar"
] |
[((943, 965), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (952, 965), False, 'from dataclasses import dataclass\n'), ((1096, 1120), 'typing.TypeVar', 'TypeVar', (['"""DefaultValueT"""'], {}), "('DefaultValueT')\n", (1103, 1120), False, 'from typing import Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n'), ((1523, 1553), 'collections.deque', 'deque', ([], {'maxlen': 'max_buffer_count'}), '(maxlen=max_buffer_count)\n', (1528, 1553), False, 'from collections import defaultdict, deque\n'), ((1593, 1623), 'collections.deque', 'deque', ([], {'maxlen': 'max_buffer_count'}), '(maxlen=max_buffer_count)\n', (1598, 1623), False, 'from collections import defaultdict, deque\n'), ((10435, 10470), 'math.ceil', 'math.ceil', (['(window_size / world_size)'], {}), '(window_size / world_size)\n', (10444, 10470), False, 'import math\n'), ((24195, 24221), 'torch.nn.ModuleList', 'nn.ModuleList', (['rec_metrics'], {}), '(rec_metrics)\n', (24208, 24221), True, 'import torch.nn as nn\n'), ((4195, 4240), 'torch.zeros', 'torch.zeros', (['self._n_tasks'], {'dtype': 'torch.uint8'}), '(self._n_tasks, dtype=torch.uint8)\n', (4206, 4240), False, 'import torch\n'), ((16013, 16041), 'torch.ones_like', 'torch.ones_like', (['predictions'], {}), '(predictions)\n', (16028, 16041), False, 'import torch\n'), ((16232, 16268), 'torch.count_nonzero', 'torch.count_nonzero', (['weights'], {'dim': '(-1)'}), '(weights, dim=-1)\n', (16251, 16268), False, 'import torch\n'), ((16456, 16471), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16469, 16471), False, 'import torch\n'), ((20564, 20631), 'torchrec.metrics.metrics_namespace.compose_metric_key', 'compose_metric_key', (['self._namespace', 'task.name', 'metric_name', 'prefix'], {}), '(self._namespace, task.name, metric_name, prefix)\n', (20582, 20631), False, 'from torchrec.metrics.metrics_namespace import compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix\n'), ((20951, 21018), 'torchrec.metrics.metrics_namespace.compose_metric_key', 'compose_metric_key', (['self._namespace', 'task.name', 'metric_name', 'prefix'], {}), '(self._namespace, task.name, metric_name, prefix)\n', (20969, 21018), False, 'from torchrec.metrics.metrics_namespace import compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix\n'), ((14541, 14558), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14552, 14558), False, 'from collections import defaultdict, deque\n'), ((17613, 17641), 'torch.any', 'torch.any', (['has_valid_weights'], {}), '(has_valid_weights)\n', (17622, 17641), False, 'import torch\n'), ((13010, 13040), 'torch.zeros_like', 'torch.zeros_like', (['metric_value'], {}), '(metric_value)\n', (13026, 13040), False, 'import torch\n'), ((13997, 14034), 'torch.zeros_like', 'torch.zeros_like', (['metric_report.value'], {}), '(metric_report.value)\n', (14013, 14034), False, 'import torch\n'), ((14426, 14459), 'typing.cast', 'cast', (['List[torch.Tensor]', 'outputs'], {}), '(List[torch.Tensor], outputs)\n', (14430, 14459), False, 'from typing import Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n'), ((14825, 14843), 'torch.cat', 'torch.cat', (['tensors'], {}), '(tensors)\n', (14834, 14843), False, 'import torch\n'), ((18164, 18199), 'torch.numel', 'torch.numel', (['predictions[task.name]'], {}), '(predictions[task.name])\n', (18175, 18199), False, 'import torch\n'), ((4315, 4334), 'torch.any', 'torch.any', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (4324, 4334), False, 'import torch\n'), ((18237, 18267), 'torch.numel', 'torch.numel', (['labels[task.name]'], {}), '(labels[task.name])\n', (18248, 18267), False, 'import torch\n'), ((18323, 18354), 'torch.numel', 'torch.numel', (['weights[task.name]'], {}), '(weights[task.name])\n', (18334, 18354), False, 'import torch\n')]
|
import cv2 as cv
from deskew import determine_skew
import numpy as np
from PIL import Image, ImageFilter, ImageOps
from pytesseract import image_to_string
from skimage import io
from skimage.color import rgb2gray
from skimage.transform import rotate
from spellchecker import SpellChecker
import traceback
# On Windows, you need to tell it where Tesseract is installed, for example:
# pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe
# OCR Stuff
####################################################################################################
def to_text(pic):
"""
Read and return text from an image.
Args:
pic: filename string, pathlib.Path object, or file object to read.
Returns:
Text from the image.
"""
try:
img = Image.open(pic)
except FileNotFoundError as e:
print("File " + pic + " does not exist.")
quit()
except PIL.UnidentifiedImageError as e:
print("That file is not an image.")
quit()
except:
print("Unanticipated error:")
traceback.print_exc()
quit()
remove_alpha(img)
text = image_to_string(img)
return text
def valid_text(ocr, accuracy_pct, language="en", distance=2, case_sensitive=True): # this spellchecker sucks
"""
Checks that the output of to_text() makes sense. To build your own dictionary, see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#how-to-build-a-new-dictionary
Args:
ocr: string to analyze.
accuracy_pct: percentage of words in ocr that should be in the dictionary.
language: language of dictionary (default English); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language
distance: Levenshtein distance (default 2 for shorter words); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage
https://en.wikipedia.org/wiki/Levenshtein_distance
Returns:
Boolean indicating success of to_text():
True: to_text() makes sense.
False: to_text() returned nonsense.
"""
if ocr == "":
return False # if it returned nothing
word_list = ocr.split() # get list of all words in input string
spell = SpellChecker(language=language, distance=distance, case_sensitive=case_sensitive)
misspelled = spell.unknown(word_list) # list of unknown words from word_list
#print(misspelled)
#print(word_list)
if (len(word_list) - len(misspelled)) / len(word_list) < accuracy_pct / 100:
return False # if it returned gibberish
return True # otherwise, all good
def parse(pic, accuracy_pct, language="en", distance=2, case_sensitive=True):
"""
Attempts OCR with image and decides if processing is needed.
Args:
pic: filename string, pathlib.Path object, or file object to read.
accuracy_pct: percentage of words in string that should be in the dictionary.
language: language of dictionary (default English); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language
distance: Levenshtein distance (default 2 for shorter words); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage
https://en.wikipedia.org/wiki/Levenshtein_distance
Returns:
Text from the image if OCR was successful; otherwise a failure message.
"""
text = to_text(pic)
if valid_text(text, accuracy_pct, language=language, distance=distance,
case_sensitive=case_sensitive):
return text
else:
return "OCR failed." # time for processing
# Image Processing Stuff
####################################################################################################
def remove_alpha(pic):
"""
Removes the alpha channel from an image, if it exists. Necessary for OCR.
Args:
pic: PIL.Image object to convert.
Returns:
The PIL.Image object in RGB format.
"""
return pic.convert("RGB")
def invert(pic):
"""
Inverts the colors in an image. Useful if OCR doesn't work.
Args:
pic: PIL.Image object to invert.
Returns:
The inverted PIL.Image object.
"""
return ImageOps.invert(remove_alpha(pic)) # negative colors
'''def resize(pic): # needs work: possible key error "dpi"
"""
Resizes an image that is less than 300 dpi. Useful if OCR doesn't work.
Args:
pic: PIL.Image object to resize.
Returns:
The resized PIL.Image object.
"""
pic = remove_alpha(pic)
res = pic.info["dpi"] # fetch tuple of dpi
lower = min(res) # get the lower of the two entries in the tuple
factor = 300 / lower # how much should we scale?
resized = pic.resize((round(pic.size[0]*factor), round(pic.size[1]*factor))) # scale it!
return resized'''
def threshold(pic, gaussian=True): # needs work
"""
Applies thresholding to the image. Doesn't work.
(Tesseract already tries the Otsu algorithm.)
Args:
pic: filename string, pathlib.Path object, or file object to read.
gaussian: boolean:
True: apply adaptive Gaussian thresholding.
False: apply adaptive mean thresholding.
Returns:
The image with thresholding.
"""
img = cv.imread("test2.jpg", 0)
if gaussian: # adaptive Gaussian thresholding
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
else: # adaptive mean thresholding
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
return Image.fromarray(img)
def denoise(pic): # needs work
"""
Allegedly removes noise? Useful if OCR doesn't work.
Args:
pic: filename string, pathlib.Path object, or file object to read.
Returns:
The denoised image.
"""
img = cv.imread(pic)
img = cv.fastNlMeansDenoising(img)
return Image.fromarray(img)
def dilate(pic, size):
"""
Dilates the text (grows edges of characters) if it's against a common background.
Useful if OCR doesn't work.
Args:
pic: PIL.Image object to dilate.
size: kernel size, in pixels. Recommend starting at 1.
Returns:
The dilated PIL.Image object.
"""
pic = remove_alpha(pic)
return pic.filter(ImageFilter.MaxFilter(size))
def erode(pic, size):
"""
Erodes the text (shrinks edges of characters) if it's against a common background.
Useful if OCR doesn't work.
Args:
pic: PIL.Image object to erode.
size: kernel size, in pixels. Recommend starting at 1.
Returns:
The eroded PIL.Image object.
"""
pic = remove_alpha(pic)
return pic.filter(ImageFilter.MinFilter(size))
def deskew(pic, output): # needs work
"""
Deskews an image. Useful if OCR doesn't work.
Args:
pic: filename string, pathlib.Path object, or file object to read.
output: string to save output as
"""
# Thanks to <NAME> (https://github.com/sbrunner) for deskew and the code!
img = io.imread(pic)
grayscale = rgb2gray(img)
angle = determine_skew(grayscale)
rotated = rotate(img, angle, resize=True) * 255
io.imsave(output, rotated.astype(np.uint8))
|
[
"PIL.Image.fromarray",
"skimage.color.rgb2gray",
"PIL.Image.open",
"cv2.fastNlMeansDenoising",
"skimage.transform.rotate",
"spellchecker.SpellChecker",
"deskew.determine_skew",
"PIL.ImageFilter.MinFilter",
"skimage.io.imread",
"cv2.adaptiveThreshold",
"pytesseract.image_to_string",
"PIL.ImageFilter.MaxFilter",
"traceback.print_exc",
"cv2.imread"
] |
[((1162, 1182), 'pytesseract.image_to_string', 'image_to_string', (['img'], {}), '(img)\n', (1177, 1182), False, 'from pytesseract import image_to_string\n'), ((2320, 2406), 'spellchecker.SpellChecker', 'SpellChecker', ([], {'language': 'language', 'distance': 'distance', 'case_sensitive': 'case_sensitive'}), '(language=language, distance=distance, case_sensitive=\n case_sensitive)\n', (2332, 2406), False, 'from spellchecker import SpellChecker\n'), ((5426, 5451), 'cv2.imread', 'cv.imread', (['"""test2.jpg"""', '(0)'], {}), "('test2.jpg', 0)\n", (5435, 5451), True, 'import cv2 as cv\n'), ((5750, 5770), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (5765, 5770), False, 'from PIL import Image, ImageFilter, ImageOps\n'), ((6014, 6028), 'cv2.imread', 'cv.imread', (['pic'], {}), '(pic)\n', (6023, 6028), True, 'import cv2 as cv\n'), ((6039, 6067), 'cv2.fastNlMeansDenoising', 'cv.fastNlMeansDenoising', (['img'], {}), '(img)\n', (6062, 6067), True, 'import cv2 as cv\n'), ((6079, 6099), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (6094, 6099), False, 'from PIL import Image, ImageFilter, ImageOps\n'), ((7226, 7240), 'skimage.io.imread', 'io.imread', (['pic'], {}), '(pic)\n', (7235, 7240), False, 'from skimage import io\n'), ((7257, 7270), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (7265, 7270), False, 'from skimage.color import rgb2gray\n'), ((7283, 7308), 'deskew.determine_skew', 'determine_skew', (['grayscale'], {}), '(grayscale)\n', (7297, 7308), False, 'from deskew import determine_skew\n'), ((815, 830), 'PIL.Image.open', 'Image.open', (['pic'], {}), '(pic)\n', (825, 830), False, 'from PIL import Image, ImageFilter, ImageOps\n'), ((5516, 5607), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['img', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY', '(11)', '(2)'], {}), '(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY, 11, 2)\n', (5536, 5607), True, 'import cv2 as cv\n'), ((5656, 5742), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['img', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY', '(11)', '(2)'], {}), '(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY,\n 11, 2)\n', (5676, 5742), True, 'import cv2 as cv\n'), ((6475, 6502), 'PIL.ImageFilter.MaxFilter', 'ImageFilter.MaxFilter', (['size'], {}), '(size)\n', (6496, 6502), False, 'from PIL import Image, ImageFilter, ImageOps\n'), ((6877, 6904), 'PIL.ImageFilter.MinFilter', 'ImageFilter.MinFilter', (['size'], {}), '(size)\n', (6898, 6904), False, 'from PIL import Image, ImageFilter, ImageOps\n'), ((7323, 7354), 'skimage.transform.rotate', 'rotate', (['img', 'angle'], {'resize': '(True)'}), '(img, angle, resize=True)\n', (7329, 7354), False, 'from skimage.transform import rotate\n'), ((1092, 1113), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1111, 1113), False, 'import traceback\n')]
|
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
import pytest
import vector.backends.numpy_
import vector.backends.object_
def test_xy():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectXY(1, 0)
)
assert vec.rotateZ(0.1).x == pytest.approx(0.9950041652780258)
assert vec.rotateZ(0.1).y == pytest.approx(0.09983341664682815)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("x", numpy.float64), ("y", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("x", "y")
assert numpy.allclose(out.x, [0, 0.9950041652780258, -0.09983341664682815])
assert numpy.allclose(out.y, [0, 0.09983341664682815, 0.9950041652780258])
def test_rhophi():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectRhoPhi(1, 0)
)
assert vec.rotateZ(0.1).rho == pytest.approx(1)
assert vec.rotateZ(0.1).phi == pytest.approx(0.1)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("rho", numpy.float64), ("phi", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("rho", "phi")
assert numpy.allclose(out.rho, [0, 1, 0])
assert numpy.allclose(out.phi, [0.1, 0.1, 1.1])
|
[
"pytest.approx",
"numpy.allclose"
] |
[((853, 921), 'numpy.allclose', 'numpy.allclose', (['out.x', '[0, 0.9950041652780258, -0.09983341664682815]'], {}), '(out.x, [0, 0.9950041652780258, -0.09983341664682815])\n', (867, 921), False, 'import numpy\n'), ((933, 1000), 'numpy.allclose', 'numpy.allclose', (['out.y', '[0, 0.09983341664682815, 0.9950041652780258]'], {}), '(out.y, [0, 0.09983341664682815, 0.9950041652780258])\n', (947, 1000), False, 'import numpy\n'), ((1555, 1589), 'numpy.allclose', 'numpy.allclose', (['out.rho', '[0, 1, 0]'], {}), '(out.rho, [0, 1, 0])\n', (1569, 1589), False, 'import numpy\n'), ((1601, 1641), 'numpy.allclose', 'numpy.allclose', (['out.phi', '[0.1, 0.1, 1.1]'], {}), '(out.phi, [0.1, 0.1, 1.1])\n', (1615, 1641), False, 'import numpy\n'), ((448, 481), 'pytest.approx', 'pytest.approx', (['(0.9950041652780258)'], {}), '(0.9950041652780258)\n', (461, 481), False, 'import pytest\n'), ((515, 549), 'pytest.approx', 'pytest.approx', (['(0.09983341664682815)'], {}), '(0.09983341664682815)\n', (528, 549), False, 'import pytest\n'), ((1173, 1189), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (1186, 1189), False, 'import pytest\n'), ((1225, 1243), 'pytest.approx', 'pytest.approx', (['(0.1)'], {}), '(0.1)\n', (1238, 1243), False, 'import pytest\n')]
|
# This python script handles stock api request from yfinance
# Last Updated: 4/7/2020
# Credits:nóto
#Import yfinance api lib
import yfinance as yf
#Import pandas lib
import pandas as pd
#Import json to manipulate api data
import json
#Import math
import math
class StockApi():
def __init__(self):
self.panda = pd
def request_data(self, t, p='1d', i="5m"):
#set the stock we would like to search for
stock = yf.Ticker(t)
#Retrieve data and store as Panda Data Frame
self.unclean_data = stock.history(period=p,interval=i)
#unclean_data selectors stored in an array
self.data_selectors = list(self.unclean_data.columns)
#create list of the index values which the values are equal to the time stamps of our data
self.time_stamps = list(self.unclean_data.index)
#get the length
self.time_stamp_total_length = len(self.time_stamps)
#now let us clean the data
self.clean_data()
#lets convert the data and return it back to what ever called us
return self.convert_data()
#END
#function to organize 'clean' the stock data
def clean_data(self):
#function to clean panda data returned by Api
#
self.new_data = {
}
for count in range(self.time_stamp_total_length):
#get the next timestamp and store it as a string
self.new_time_stamp = str(self.time_stamps[count])
#insert new data here
if(not math.isnan((self.unclean_data.iloc[count].to_list())[0])):
self.new_data.update({self.new_time_stamp:self.unclean_data.iloc[count].to_list()})
for i in range(4):
self.new_data[self.new_time_stamp][i] = (round(self.new_data[self.new_time_stamp][i], 2))
#return the new data
return self.new_data
#END
#function to convert the data so the front end can read it
def convert_data(self):
self.new_data = json.dumps(self.new_data, indent=2)
return self.new_data
#END
|
[
"json.dumps",
"yfinance.Ticker"
] |
[((443, 455), 'yfinance.Ticker', 'yf.Ticker', (['t'], {}), '(t)\n', (452, 455), True, 'import yfinance as yf\n'), ((2017, 2052), 'json.dumps', 'json.dumps', (['self.new_data'], {'indent': '(2)'}), '(self.new_data, indent=2)\n', (2027, 2052), False, 'import json\n')]
|
from datetime import date
from random import randrange
import factory
import factory.fuzzy
from hth.core.tests.utils import from_today
class VenueFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'shows.Venue'
name = factory.Sequence(lambda n: 'Venue %d' % n)
city = factory.Sequence(lambda n: 'City %d' % n)
website = factory.Sequence(lambda n: 'http://venue-%d.dev' % n)
class GigFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'shows.Gig'
date = factory.fuzzy.FuzzyDate(date(2000, 1, 1))
venue = factory.SubFactory(VenueFactory)
description = factory.fuzzy.FuzzyText(length=100)
details = factory.fuzzy.FuzzyText(length=100)
class PublishedGigFactory(GigFactory):
publish = True
class UpcomingGigFactory(PublishedGigFactory):
# Pick a random date from today through next year
date = factory.LazyAttribute(lambda obj: from_today(days=randrange(365)))
@classmethod
def create_batch(cls, size, **kwargs):
batch = super().create_batch(size, **kwargs)
return sorted(batch, key=lambda x: x.date)
class PastGigFactory(PublishedGigFactory):
# Pick a random date from 10 years ago through yesterday
date = factory.LazyAttribute(lambda obj: from_today(randrange(-3650, 0)))
@classmethod
def create_batch(cls, size, **kwargs):
batch = super().create_batch(size, **kwargs)
return sorted(batch, key=lambda x: x.date, reverse=True)
|
[
"factory.SubFactory",
"factory.fuzzy.FuzzyText",
"random.randrange",
"datetime.date",
"factory.Sequence"
] |
[((254, 296), 'factory.Sequence', 'factory.Sequence', (["(lambda n: 'Venue %d' % n)"], {}), "(lambda n: 'Venue %d' % n)\n", (270, 296), False, 'import factory\n'), ((308, 349), 'factory.Sequence', 'factory.Sequence', (["(lambda n: 'City %d' % n)"], {}), "(lambda n: 'City %d' % n)\n", (324, 349), False, 'import factory\n'), ((364, 417), 'factory.Sequence', 'factory.Sequence', (["(lambda n: 'http://venue-%d.dev' % n)"], {}), "(lambda n: 'http://venue-%d.dev' % n)\n", (380, 417), False, 'import factory\n'), ((584, 616), 'factory.SubFactory', 'factory.SubFactory', (['VenueFactory'], {}), '(VenueFactory)\n', (602, 616), False, 'import factory\n'), ((635, 670), 'factory.fuzzy.FuzzyText', 'factory.fuzzy.FuzzyText', ([], {'length': '(100)'}), '(length=100)\n', (658, 670), False, 'import factory\n'), ((685, 720), 'factory.fuzzy.FuzzyText', 'factory.fuzzy.FuzzyText', ([], {'length': '(100)'}), '(length=100)\n', (708, 720), False, 'import factory\n'), ((554, 570), 'datetime.date', 'date', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (558, 570), False, 'from datetime import date\n'), ((1292, 1311), 'random.randrange', 'randrange', (['(-3650)', '(0)'], {}), '(-3650, 0)\n', (1301, 1311), False, 'from random import randrange\n'), ((947, 961), 'random.randrange', 'randrange', (['(365)'], {}), '(365)\n', (956, 961), False, 'from random import randrange\n')]
|
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
# pylint: disable=missing-module-docstring
import os
from typing import Any, Dict, Iterator, Tuple
from tensorbay.dataset import Data, Dataset
from tensorbay.exception import ModuleImportError
from tensorbay.label import Classification, LabeledBox2D, LabeledKeypoints2D
DATASET_NAME = "FLIC"
_VALID_KEYPOINT_INDICES = [0, 1, 2, 3, 4, 5, 6, 9, 12, 13, 16]
def FLIC(path: str) -> Dataset:
"""`FLIC <https://bensapp.github.io/flic-dataset.html>`_ dataset.
The folder structure should be like::
<path>
exampls.mat
images/
2-fast-2-furious-00003571.jpg
...
Arguments:
path: The root directory of the dataset.
Raises:
ModuleImportError: When the module "scipy" can not be found.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
try:
from scipy.io import loadmat # pylint: disable=import-outside-toplevel
except ModuleNotFoundError as error:
raise ModuleImportError(module_name=error.name) from error
root_path = os.path.abspath(os.path.expanduser(path))
dataset = Dataset(DATASET_NAME)
annotations = loadmat(os.path.join(root_path, "examples.mat"))["examples"][0]
dataset.create_segment("train")
dataset.create_segment("test")
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
# try whether the dataset has bad segment
try:
_ = annotations["isbad"]
flag = True
dataset.create_segment("bad")
dataset.catalog.classification.add_attribute(name="isunchecked", type_="boolean")
except ValueError:
flag = False
for data, segment_name in _get_data(root_path, annotations, flag):
dataset[segment_name].append(data)
return dataset
def _get_data(path: str, annotations: Any, flag: bool) -> Iterator[Tuple[Data, str]]:
filepath_to_data: Dict[str, Data] = {}
for annotation in annotations:
filepath = annotation["filepath"][0]
keypoints = LabeledKeypoints2D(
annotation["coords"].T[_VALID_KEYPOINT_INDICES],
attributes={"poselet_hit_idx": annotation["poselet_hit_idx"].T.tolist()},
)
box2d = LabeledBox2D(*annotation["torsobox"][0].tolist())
if filepath not in filepath_to_data:
data = Data(os.path.join(path, "images", filepath))
data.label.keypoints2d = [keypoints]
data.label.box2d = [box2d]
attribute = {"currframe": int(annotation["currframe"][0][0])}
if flag:
attribute["isunchecked"] = bool(annotation["isunchecked"])
data.label.classification = Classification(
category=annotation["moviename"][0], attributes=attribute
)
filepath_to_data[filepath] = data
if annotation["istrain"]:
segment_name = "train"
elif annotation["istest"]:
segment_name = "test"
else:
segment_name = "bad"
yield data, segment_name
else:
image_data = filepath_to_data[filepath]
image_data.label.keypoints2d.append(keypoints)
image_data.label.box2d.append(box2d)
|
[
"tensorbay.dataset.Dataset",
"tensorbay.exception.ModuleImportError",
"os.path.join",
"os.path.dirname",
"tensorbay.label.Classification",
"os.path.expanduser"
] |
[((1253, 1274), 'tensorbay.dataset.Dataset', 'Dataset', (['DATASET_NAME'], {}), '(DATASET_NAME)\n', (1260, 1274), False, 'from tensorbay.dataset import Data, Dataset\n'), ((1212, 1236), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (1230, 1236), False, 'import os\n'), ((1126, 1167), 'tensorbay.exception.ModuleImportError', 'ModuleImportError', ([], {'module_name': 'error.name'}), '(module_name=error.name)\n', (1143, 1167), False, 'from tensorbay.exception import ModuleImportError\n'), ((1467, 1492), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1482, 1492), False, 'import os\n'), ((2812, 2885), 'tensorbay.label.Classification', 'Classification', ([], {'category': "annotation['moviename'][0]", 'attributes': 'attribute'}), "(category=annotation['moviename'][0], attributes=attribute)\n", (2826, 2885), False, 'from tensorbay.label import Classification, LabeledBox2D, LabeledKeypoints2D\n'), ((1302, 1341), 'os.path.join', 'os.path.join', (['root_path', '"""examples.mat"""'], {}), "(root_path, 'examples.mat')\n", (1314, 1341), False, 'import os\n'), ((2473, 2511), 'os.path.join', 'os.path.join', (['path', '"""images"""', 'filepath'], {}), "(path, 'images', filepath)\n", (2485, 2511), False, 'import os\n')]
|
# coding: utf-8
"""
Copyright 2018 OSIsoft, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class PIDataServerLicense(object):
swagger_types = {
'amount_left': 'str',
'amount_used': 'str',
'name': 'str',
'total_amount': 'str',
'links': 'PIDataServerLicenseLinks',
'web_exception': 'PIWebException',
}
attribute_map = {
'amount_left': 'AmountLeft',
'amount_used': 'AmountUsed',
'name': 'Name',
'total_amount': 'TotalAmount',
'links': 'Links',
'web_exception': 'WebException',
}
def __init__(self, amount_left=None, amount_used=None, name=None, total_amount=None, links=None, web_exception=None):
self._amount_left = None
self._amount_used = None
self._name = None
self._total_amount = None
self._links = None
self._web_exception = None
if amount_left is not None:
self.amount_left = amount_left
if amount_used is not None:
self.amount_used = amount_used
if name is not None:
self.name = name
if total_amount is not None:
self.total_amount = total_amount
if links is not None:
self.links = links
if web_exception is not None:
self.web_exception = web_exception
@property
def amount_left(self):
return self._amount_left
@amount_left.setter
def amount_left(self, amount_left):
self._amount_left = amount_left
@property
def amount_used(self):
return self._amount_used
@amount_used.setter
def amount_used(self, amount_used):
self._amount_used = amount_used
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, total_amount):
self._total_amount = total_amount
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
@property
def web_exception(self):
return self._web_exception
@web_exception.setter
def web_exception(self, web_exception):
self._web_exception = web_exception
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if not isinstance(other, PIDataServerLicense):
return False
return self.__dict__ == other.__dict__
|
[
"six.iteritems"
] |
[((2603, 2632), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (2612, 2632), False, 'from six import iteritems\n')]
|
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict, Tuple
from skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
Employee,
MS_RCPSPModel,
SkillDetail,
)
def parse_imopse(input_data, max_horizon=None):
# parse the input
# print('input_data\n',input_data)
lines = input_data.split("\n")
# "General characteristics:
# Tasks: 161
# Resources: 10
# Precedence relations: 321
# Number of skill types: 9
# ====================================================================================================================
# ResourceID Salary Skills
# 1 14.2 Q2: 0 Q3: 2 Q1: 0 Q4: 2 Q7: 1 Q8: 2
# 2 31.2 Q0: 0 Q4: 2 Q7: 1 Q3: 1 Q8: 2 Q2: 0
# 3 34.4 Q4: 0 Q2: 1 Q6: 2 Q3: 1 Q0: 1 Q5: 0
# 4 26.0 Q5: 2 Q1: 1 Q4: 1 Q8: 2 Q0: 2 Q2: 2
# 5 30.8 Q8: 0 Q7: 1 Q3: 1 Q1: 2 Q4: 1 Q5: 1
# 6 17.3 Q6: 1 Q3: 2 Q4: 2 Q2: 0 Q7: 2 Q1: 0
# 7 19.8 Q1: 2 Q4: 2 Q5: 0 Q7: 1 Q3: 1 Q6: 2
# 8 35.8 Q2: 1 Q0: 1 Q3: 2 Q6: 0 Q7: 0 Q8: 1
# 9 37.6 Q7: 0 Q5: 2 Q2: 0 Q1: 0 Q0: 1 Q3: 1
# 10 23.5 Q8: 1 Q5: 1 Q1: 2 Q6: 0 Q4: 0 Q3: 2 "
nb_task = None
nb_worker = None
nb_precedence_relation = None
nb_skills = None
resource_zone = False
task_zone = False
resource_dict = {}
task_dict = {}
real_skills_found = set()
for line in lines:
words = line.split()
if len(words) == 2 and words[0] == "Tasks:":
nb_task = int(words[1])
continue
if len(words) == 2 and words[0] == "Resources:":
nb_worker = int(words[1])
continue
if len(words) == 3 and words[0] == "Precedence" and words[1] == "relations:":
nb_precedence_relation = int(words[2])
continue
if len(words) == 5 and words[0] == "Number" and words[1] == "of":
nb_skills = int(words[4])
continue
if len(words) == 0:
continue
if words[0] == "ResourceID":
resource_zone = True
continue
if words[0] == "TaskID":
task_zone = True
continue
if resource_zone:
if words[0][0] == "=":
resource_zone = False
continue
else:
id_worker = words[0]
resource_dict[id_worker] = {"salary": float(words[1])}
for word in words[2:]:
if word[0] == "Q":
current_skill = word[:-1]
continue
resource_dict[id_worker][current_skill] = int(word) + 1
real_skills_found.add(current_skill)
if task_zone:
if words[0][0] == "=":
task_zone = False
continue
else:
task_id = int(words[0])
if task_id not in task_dict:
task_dict[task_id] = {"id": task_id, "successors": [], "skills": {}}
task_dict[task_id]["duration"] = int(words[1])
i = 2
while i < len(words):
if words[i][0] == "Q":
current_skill = words[i][:-1]
task_dict[task_id]["skills"][current_skill] = int(words[i + 1]) + 1
real_skills_found.add(current_skill)
i = i + 2
continue
else:
if "precedence" not in task_dict[task_id]:
task_dict[task_id]["precedence"] = []
task_dict[task_id]["precedence"] += [int(words[i])]
if int(words[i]) not in task_dict:
task_dict[int(words[i])] = {
"id": int(words[i]),
"successors": [],
"skills": {},
}
if "successors" not in task_dict[int(words[i])]:
task_dict[int(words[i])]["successors"] = []
task_dict[int(words[i])]["successors"] += [task_id]
i += 1
# print(resource_dict)
# print(task_dict)
sorted_task_names = sorted(task_dict.keys())
task_id_to_new_name = {
sorted_task_names[i]: i + 2 for i in range(len(sorted_task_names))
}
new_tame_to_original_task_id = {
task_id_to_new_name[ind]: ind for ind in task_id_to_new_name
}
mode_details = {
task_id_to_new_name[task_id]: {1: {"duration": task_dict[task_id]["duration"]}}
for task_id in task_dict
}
resource_dict = {int(i): resource_dict[i] for i in resource_dict}
# skills = set(["Q"+str(i) for i in range(nb_skills)])
skills = real_skills_found
for task_id in task_dict:
for skill in skills:
req_squill = task_dict[task_id]["skills"].get(skill, 0.0)
mode_details[task_id_to_new_name[task_id]][1][skill] = req_squill
mode_details[1] = {1: {"duration": 0}}
for skill in skills:
mode_details[1][1][skill] = int(0)
max_t = max(mode_details)
mode_details[max_t + 1] = {1: {"duration": 0}}
for skill in skills:
mode_details[max_t + 1][1][skill] = int(0)
successors = {
task_id_to_new_name[task_id]: [
task_id_to_new_name[t] for t in task_dict[task_id]["successors"]
]
+ [max_t + 1]
for task_id in task_dict
}
successors[max_t + 1] = []
successors[1] = [k for k in successors]
# max_horizon = 2*sum([task_dict[task_id]["duration"] for task_id in task_dict])
max_horizon = 300 if max_horizon is None else max_horizon
return (
MS_RCPSPModel(
skills_set=set(real_skills_found),
resources_set=set(),
non_renewable_resources=set(),
resources_availability={},
employees={
res: Employee(
dict_skill={
skill: SkillDetail(
skill_value=resource_dict[res][skill],
efficiency_ratio=1.0,
experience=1.0,
)
for skill in resource_dict[res]
if skill != "salary"
},
salary=resource_dict[res]["salary"],
calendar_employee=[True] * max_horizon,
)
for res in resource_dict
},
employees_availability=[len(resource_dict)] * max_horizon,
mode_details=mode_details,
successors=successors,
horizon=max_horizon,
source_task=1,
sink_task=max_t + 1,
one_unit_per_task_max=True,
),
new_tame_to_original_task_id,
)
def parse_file(file_path, max_horizon=None) -> Tuple[MS_RCPSPModel, Dict]:
with open(file_path, "r") as input_data_file:
input_data = input_data_file.read()
rcpsp_model, new_tame_to_original_task_id = parse_imopse(
input_data, max_horizon
)
return rcpsp_model, new_tame_to_original_task_id
|
[
"skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill.SkillDetail"
] |
[((6434, 6526), 'skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill.SkillDetail', 'SkillDetail', ([], {'skill_value': 'resource_dict[res][skill]', 'efficiency_ratio': '(1.0)', 'experience': '(1.0)'}), '(skill_value=resource_dict[res][skill], efficiency_ratio=1.0,\n experience=1.0)\n', (6445, 6526), False, 'from skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import Employee, MS_RCPSPModel, SkillDetail\n')]
|
import logging
# monkey patch to suppress the annoying warning you get when you import apache_beam
#
# No handlers could be found for logger "oauth2client.contrib.multistore_file"
#
# This warning is harmless, but annooying when you are using beam from a command line app
# see: https://issues.apache.org/jira/browse/BEAM-1183
# This just creates a null handler for that logger so there is no output
logger = logging.getLogger('oauth2client.contrib.multistore_file')
handler = logging.NullHandler()
logger.addHandler(handler)
|
[
"logging.getLogger",
"logging.NullHandler"
] |
[((412, 469), 'logging.getLogger', 'logging.getLogger', (['"""oauth2client.contrib.multistore_file"""'], {}), "('oauth2client.contrib.multistore_file')\n", (429, 469), False, 'import logging\n'), ((480, 501), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (499, 501), False, 'import logging\n')]
|
# Copyright 2022 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import numpy as np
from floris.tools import FlorisInterface
from floris.tools.visualization import visualize_cut_plane
"""
04_sweep_wind_directions
This example demonstrates vectorization of wind direction.
A vector of wind directions is passed to the intialize function
and the powers of the two simulated turbines is computed for all
wind directions in one call
The power of both turbines for each wind direction is then plotted
"""
# Instantiate FLORIS using either the GCH or CC model
fi = FlorisInterface("inputs/gch.yaml") # GCH model matched to the default "legacy_gauss" of V2
# fi = FlorisInterface("inputs/cc.yaml") # New CumulativeCurl model
# Define a two turbine farm
D = 126.
layout_x = np.array([0, D*6])
layout_y = [0, 0]
fi.reinitialize(layout = [layout_x, layout_y])
# Sweep wind speeds but keep wind direction fixed
wd_array = np.arange(250,291,1.)
fi.reinitialize(wind_directions=wd_array)
# Define a matrix of yaw angles to be all 0
# Note that yaw angles is now specified as a matrix whose dimesions are
# wd/ws/turbine
num_wd = len(wd_array) # Number of wind directions
num_ws = 1 # Number of wind speeds
num_turbine = len(layout_x) # Number of turbines
yaw_angles = np.zeros((num_wd, num_ws, num_turbine))
# Calculate
fi.calculate_wake(yaw_angles=yaw_angles)
# Collect the turbine powers
turbine_powers = fi.get_turbine_powers() / 1E3 # In kW
# Pull out the power values per turbine
pow_t0 = turbine_powers[:,:,0].flatten()
pow_t1 = turbine_powers[:,:,1].flatten()
# Plot
fig, ax = plt.subplots()
ax.plot(wd_array,pow_t0,color='k',label='Upstream Turbine')
ax.plot(wd_array,pow_t1,color='r',label='Downstream Turbine')
ax.grid(True)
ax.legend()
ax.set_xlabel('Wind Direction (deg)')
ax.set_ylabel('Power (kW)')
plt.show()
|
[
"numpy.array",
"numpy.zeros",
"floris.tools.FlorisInterface",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1151, 1185), 'floris.tools.FlorisInterface', 'FlorisInterface', (['"""inputs/gch.yaml"""'], {}), "('inputs/gch.yaml')\n", (1166, 1185), False, 'from floris.tools import FlorisInterface\n'), ((1359, 1379), 'numpy.array', 'np.array', (['[0, D * 6]'], {}), '([0, D * 6])\n', (1367, 1379), True, 'import numpy as np\n'), ((1505, 1529), 'numpy.arange', 'np.arange', (['(250)', '(291)', '(1.0)'], {}), '(250, 291, 1.0)\n', (1514, 1529), True, 'import numpy as np\n'), ((1851, 1890), 'numpy.zeros', 'np.zeros', (['(num_wd, num_ws, num_turbine)'], {}), '((num_wd, num_ws, num_turbine))\n', (1859, 1890), True, 'import numpy as np\n'), ((2172, 2186), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2184, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2409, 2411), True, 'import matplotlib.pyplot as plt\n')]
|
"A Console-Based Email Client"
#!/usr/local/bin/python
"""
##########################################################################
pymail - a simple console email interface client in Python; uses Python
poplib module to view POP email messages, smtplib to send new mails, and
the email package to extract mail headers and payload and compose mails;
##########################################################################
"""
import poplib, smtplib, email.utils, mailconfig
from email.parser import Parser
from email.message import Message
fetchEncoding = mailconfig.fetchEncoding
def decodeToUnicode(messageBytes, fetchEncoding=fetchEncoding):
"""
4E, Py3.1: decode fetched bytes to str Unicode string for display or parsing;
use global setting (or by platform default, hdrs inspection, intelligent guess);
in Python 3.2/3.3, this step may not be required: if so, return message intact;
"""
return [line.decode(fetchEncoding) for line in messageBytes]
def splitaddrs(field):
"""
4E: split address list on commas, allowing for commas in name parts
"""
pairs = email.utils.getaddresses([field]) # [(name,addr)]
return [email.utils.formataddr(pair) for pair in pairs] # [name <addr>]
def inputmessage():
import sys
From = input('From? ').strip()
To = input('To? ').strip() # datetime hdr may be set auto
To = splitaddrs(To) # possible many, name+<addr> okay
Subj = input('Subj? ').strip() # don't split blindly on ',' or ';'
print('Type message text, end with line="."')
text = ''
while True:
line = sys.stdin.readline()
if line == '.\n': break
text += line
return From, To, Subj, text
def sendmessage():
From, To, Subj, text = inputmessage()
msg = Message()
msg['From'] = From
msg['To'] = ', '.join(To) # join for hdr, not send
msg['Subject'] = Subj
msg['Date'] = email.utils.formatdate() # curr datetime, rfc2822
msg.set_payload(text)
server = smtplib.SMTP(mailconfig.smtpservername)
try:
failed = server.sendmail(From, To, str(msg)) # may also raise exc
except:
print('Error - send failed')
else:
if failed: print('Failed:', failed)
def connect(servername, user, passwd):
print('Connecting...')
server = poplib.POP3(servername)
server.user(user) # connect, log in to mail server
server.pass_(passwd) # pass is a reserved word
print(server.getwelcome()) # print returned greeting message
return server
def loadmessages(servername, user, passwd, loadfrom=1):
server = connect(servername, user, passwd)
try:
print(server.list())
(msgCount, msgBytes) = server.stat()
print('There are', msgCount, 'mail messages in', msgBytes, 'bytes')
print('Retrieving...')
msgList = [] # fetch mail now
for i in range(loadfrom, msgCount+1): # empty if low >= high
(hdr, message, octets) = server.retr(i) # save text on list
message = decodeToUnicode(message) # 4E, Py3.1: bytes to str
msgList.append('\n'.join(message)) # leave mail on server
finally:
server.quit() # unlock the mail box
assert len(msgList) == (msgCount - loadfrom) + 1 # msg nums start at 1
return msgList
def deletemessages(servername, user, passwd, toDelete, verify=True):
print('To be deleted:', toDelete)
if verify and input('Delete?')[:1] not in ['y', 'Y']:
print('Delete cancelled.')
else:
server = connect(servername, user, passwd)
try:
print('Deleting messages from server...')
for msgnum in toDelete: # reconnect to delete mail
server.dele(msgnum) # mbox locked until quit()
finally:
server.quit()
def showindex(msgList):
count = 0 # show some mail headers
for msgtext in msgList:
msghdrs = Parser().parsestr(msgtext, headersonly=True) # expects str in 3.1
count += 1
print('%d:\t%d bytes' % (count, len(msgtext)))
for hdr in ('From', 'To', 'Date', 'Subject'):
try:
print('\t%-8s=>%s' % (hdr, msghdrs[hdr]))
except KeyError:
print('\t%-8s=>(unknown)' % hdr)
if count % 5 == 0:
input('[Press Enter key]') # pause after each 5
def showmessage(i, msgList):
if 1 <= i <= len(msgList):
#print(msgList[i-1]) # old: prints entire mail--hdrs+text
print('-' * 79)
msg = Parser().parsestr(msgList[i-1]) # expects str in 3.1
content = msg.get_payload() # prints payload: string, or [Messages]
if isinstance(content, str): # keep just one end-line at end
content = content.rstrip() + '\n'
print(content)
print('-' * 79) # to get text only, see email.parsers
else:
print('Bad message number')
def savemessage(i, mailfile, msgList):
if 1 <= i <= len(msgList):
savefile = open(mailfile, 'a', encoding=mailconfig.fetchEncoding) # 4E
savefile.write('\n' + msgList[i-1] + '-'*80 + '\n')
else:
print('Bad message number')
def msgnum(command):
try:
return int(command.split()[1])
except:
return -1 # assume this is bad
helptext = """
Available commands:
i - index display
l n? - list all messages (or just message n)
d n? - mark all messages for deletion (or just message n)
s n? - save all messages to a file (or just message n)
m - compose and send a new mail message
q - quit pymail
? - display this help text
"""
def interact(msgList, mailfile):
showindex(msgList)
toDelete = []
while True:
try:
command = input('[Pymail] Action? (i, l, d, s, m, q, ?) ')
except EOFError:
command = 'q'
if not command: command = '*'
# quit
if command == 'q':
break
# index
elif command[0] == 'i':
showindex(msgList)
# list
elif command[0] == 'l':
if len(command) == 1:
for i in range(1, len(msgList)+1):
showmessage(i, msgList)
else:
showmessage(msgnum(command), msgList)
# save
elif command[0] == 's':
if len(command) == 1:
for i in range(1, len(msgList)+1):
savemessage(i, mailfile, msgList)
else:
savemessage(msgnum(command), mailfile, msgList)
# delete
elif command[0] == 'd':
if len(command) == 1: # delete all later
toDelete = list(range(1, len(msgList)+1)) # 3.x requires list
else:
delnum = msgnum(command)
if (1 <= delnum <= len(msgList)) and (delnum not in toDelete):
toDelete.append(delnum)
else:
print('Bad message number')
# mail
elif command[0] == 'm': # send a new mail via SMTP
sendmessage()
#execfile('smtpmail.py', {}) # alt: run file in own namespace
elif command[0] == '?':
print(helptext)
else:
print('What? -- type "?" for commands help')
return toDelete
if __name__ == '__main__':
import getpass, mailconfig
mailserver = mailconfig.popservername # ex: 'pop.rmi.net'
mailuser = mailconfig.popusername # ex: 'lutz'
mailfile = mailconfig.savemailfile # ex: r'c:\stuff\savemail'
mailpswd = getpass.getpass('Password for %s?' % mailserver)
print('[Pymail email client]')
msgList = loadmessages(mailserver, mailuser, mailpswd) # load all
toDelete = interact(msgList, mailfile)
if toDelete: deletemessages(mailserver, mailuser, mailpswd, toDelete)
print('Bye.')
|
[
"smtplib.SMTP",
"email.parser.Parser",
"getpass.getpass",
"email.message.Message",
"sys.stdin.readline",
"poplib.POP3"
] |
[((1836, 1845), 'email.message.Message', 'Message', ([], {}), '()\n', (1843, 1845), False, 'from email.message import Message\n'), ((2097, 2136), 'smtplib.SMTP', 'smtplib.SMTP', (['mailconfig.smtpservername'], {}), '(mailconfig.smtpservername)\n', (2109, 2136), False, 'import poplib, smtplib, email.utils, mailconfig\n'), ((2405, 2428), 'poplib.POP3', 'poplib.POP3', (['servername'], {}), '(servername)\n', (2416, 2428), False, 'import poplib, smtplib, email.utils, mailconfig\n'), ((7949, 7997), 'getpass.getpass', 'getpass.getpass', (["('Password for %s?' % mailserver)"], {}), "('Password for %s?' % mailserver)\n", (7964, 7997), False, 'import getpass, mailconfig\n'), ((1658, 1678), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1676, 1678), False, 'import sys\n'), ((4222, 4230), 'email.parser.Parser', 'Parser', ([], {}), '()\n', (4228, 4230), False, 'from email.parser import Parser\n'), ((4834, 4842), 'email.parser.Parser', 'Parser', ([], {}), '()\n', (4840, 4842), False, 'from email.parser import Parser\n')]
|
import io
import os
from flask import Flask, request, jsonify
from PIL import Image
from resnet_model import MyResnetModel
app = Flask(__name__)
# max filesize 2mb
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024
# setup resnet model
model = MyResnetModel(os.path.dirname(os.path.abspath(__file__)))
@app.route("/")
def hello():
return jsonify({"message": "Hello from the API"})
@app.route('/predict', methods=['POST'])
def predict():
if 'image' not in request.files:
return jsonify({"error": "Missing file in request"})
img = request.files['image']
return jsonify({"result": model.predict(img.read())})
|
[
"flask.jsonify",
"os.path.abspath",
"flask.Flask"
] |
[((130, 145), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'from flask import Flask, request, jsonify\n'), ((347, 389), 'flask.jsonify', 'jsonify', (["{'message': 'Hello from the API'}"], {}), "({'message': 'Hello from the API'})\n", (354, 389), False, 'from flask import Flask, request, jsonify\n'), ((278, 303), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (293, 303), False, 'import os\n'), ((500, 545), 'flask.jsonify', 'jsonify', (["{'error': 'Missing file in request'}"], {}), "({'error': 'Missing file in request'})\n", (507, 545), False, 'from flask import Flask, request, jsonify\n')]
|
from django.urls import reverse
from rest_framework import status
from .base import BaseTestCase
class FollowTestCase(BaseTestCase):
"""Testcases for following a user."""
def test_follow_user_post(self):
"""Test start following a user."""
url = reverse('follow', kwargs={'username': 'test2'})
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_follow_already_followed_user(self):
"""Test start following a user you already follow."""
url = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_follow_missing_user_post(self):
"""Test trying to start following a missing user."""
url = reverse('follow', kwargs={'username': 'joel'})
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_follow(self):
"""Test unfollowing a user"""
url = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_follow_of_not_followed_user(self):
"""Test unfollowing a user you are not following"""
url = reverse('follow', kwargs={'username': 'test2'})
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_list_followers_of_user(self):
"""Test list followers of a user"""
url_followers = reverse('getfollowers', kwargs={'username': 'test2'})
self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header)
url_follow = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_user_is_following(self):
"""Test list users the user is following"""
url_following = reverse('getfollowing', kwargs={'username': 'test1'})
self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header)
url_follow = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
[
"django.urls.reverse"
] |
[((272, 319), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'test2'}"}), "('follow', kwargs={'username': 'test2'})\n", (279, 319), False, 'from django.urls import reverse\n'), ((596, 643), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'test2'}"}), "('follow', kwargs={'username': 'test2'})\n", (603, 643), False, 'from django.urls import reverse\n'), ((977, 1023), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'joel'}"}), "('follow', kwargs={'username': 'joel'})\n", (984, 1023), False, 'from django.urls import reverse\n'), ((1263, 1310), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'test2'}"}), "('follow', kwargs={'username': 'test2'})\n", (1270, 1310), False, 'from django.urls import reverse\n'), ((1655, 1702), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'test2'}"}), "('follow', kwargs={'username': 'test2'})\n", (1662, 1702), False, 'from django.urls import reverse\n'), ((1971, 2024), 'django.urls.reverse', 'reverse', (['"""getfollowers"""'], {'kwargs': "{'username': 'test2'}"}), "('getfollowers', kwargs={'username': 'test2'})\n", (1978, 2024), False, 'from django.urls import reverse\n'), ((2122, 2169), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'test2'}"}), "('follow', kwargs={'username': 'test2'})\n", (2129, 2169), False, 'from django.urls import reverse\n'), ((2518, 2571), 'django.urls.reverse', 'reverse', (['"""getfollowing"""'], {'kwargs': "{'username': 'test1'}"}), "('getfollowing', kwargs={'username': 'test1'})\n", (2525, 2571), False, 'from django.urls import reverse\n'), ((2669, 2716), 'django.urls.reverse', 'reverse', (['"""follow"""'], {'kwargs': "{'username': 'test2'}"}), "('follow', kwargs={'username': 'test2'})\n", (2676, 2716), False, 'from django.urls import reverse\n')]
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Mail
class MailAdmin(admin.ModelAdmin):
list_display = ['subject', 'sent_time', 'recipients_total', 'successful_mails', 'failed_mails', 'done_sending']
ordering = ['-sent_time']
# Prevent creation
def has_add_permission(self, request, obj=None):
return False
# Prevent changes
def save_model(self, request, obj, form, change):
pass
# Prevent M2M changes
def save_related(self, request, form, formsets, change):
pass
admin.site.register(Mail, MailAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((559, 595), 'django.contrib.admin.site.register', 'admin.site.register', (['Mail', 'MailAdmin'], {}), '(Mail, MailAdmin)\n', (578, 595), False, 'from django.contrib import admin\n')]
|
from uuid import uuid4
from fastapi.testclient import TestClient
from ..main import app
client = TestClient(app)
class Test_Event:
record = {
"name": "<NAME>",
"description": "It is a coding event held in the month of Decemeber by Programming Club",
"created_on": "2022-01-28T21:33:50.795775",
"last_update": "2021-01-28T12:33:52.795775",
"start_time": "2022-02-19T19:33:10.895775",
"end_time": "2022-02-19T21:00:10.895775",
"image": "https://www.google.com/search?q=P",
"website": "",
"notify": True,
"is_online": False,
"meet_link": "",
"venue": "Carbon Building",
}
updated_record = {
"name": "<NAME>",
"description": "It is a coding event held in the month of Decemeber by Programming Club",
"created_on": "2022-01-28T21:33:50.795775",
"last_update": "2021-01-28T12:33:52.795775",
"start_time": "2022-02-19T19:33:10.895775",
"end_time": "2022-02-19T21:00:10.895775",
"image": "https://www.google.com/search?",
"website": "",
"notify": False,
"is_online": True,
"meet_link": "https://meet.google.com/abc-defg-hij",
"venue": "",
}
def test_create(self):
response = client.post("/event/", json=self.record)
assert response.status_code == 201, f"Received {response.status_code}"
response_record = response.json()
self.record["id"] = response_record["id"]
print(self.record)
for key in response_record.keys():
assert self.record[key] == response_record[key]
def test_get_one(self):
response = client.get(f"/event/{self.record['id']}")
assert response.status_code == 200, f"Received {response.status_code}"
assert response.json() == self.record
def test_get_non_existing(self):
response = client.get(f"/event/{uuid4()}")
assert response.status_code == 404, f"Received {response.status_code}"
assert response.json() == {"detail": "Event not found"}
def test_patch(self):
response = client.patch(
f"/event/{self.record['id']}", json=self.updated_record
)
assert response.status_code == 202, f"Received {response.status_code}"
assert response.json() == self.updated_record
def test_get_all(self):
response = client.get("/event/")
assert response.status_code == 200, f"Received {response.status_code}"
def test_delete(self):
response = client.delete(f"/event/{self.record['id']}")
assert response.status_code == 204, f"Received {response.status_code}"
def test_delete_non_existing(self):
response = client.get(f"/event/{uuid4()}")
assert response.status_code == 404, f"Received {response.status_code}"
assert response.json() == {"detail": "Event not found"}
|
[
"fastapi.testclient.TestClient",
"uuid.uuid4"
] |
[((100, 115), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (110, 115), False, 'from fastapi.testclient import TestClient\n'), ((1929, 1936), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1934, 1936), False, 'from uuid import uuid4\n'), ((2755, 2762), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2760, 2762), False, 'from uuid import uuid4\n')]
|
from setuptools import setup
setup(
name='uam_simulator',
version='1.0',
description='A tool to simulate different architectures for UAM traffic management',
author='<NAME>',
author_email='<EMAIL>',
packages=['uam_simulator'],
install_requires=['numpy', 'scikit-learn', 'gurobipy']
)
# If installing from source the package name is gurobipy, if installing with conda it's gurobi, but when importing it's still gurobipy
|
[
"setuptools.setup"
] |
[((30, 295), 'setuptools.setup', 'setup', ([], {'name': '"""uam_simulator"""', 'version': '"""1.0"""', 'description': '"""A tool to simulate different architectures for UAM traffic management"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['uam_simulator']", 'install_requires': "['numpy', 'scikit-learn', 'gurobipy']"}), "(name='uam_simulator', version='1.0', description=\n 'A tool to simulate different architectures for UAM traffic management',\n author='<NAME>', author_email='<EMAIL>', packages=['uam_simulator'],\n install_requires=['numpy', 'scikit-learn', 'gurobipy'])\n", (35, 295), False, 'from setuptools import setup\n')]
|
import time
from os import system, walk
from config import CONFIG
from encry import ENCRY
from decry import DECRY
# Функция настройки конфигурации
def conf_setting():
system('CLS')
print("Enter key elements: ")
# Выбор алфавита
alphabet = input("Select the used alphabet [EN]GLISH | [RU]SSIAN: ")
# Ввод числового ключа
numberKey = input("Enter a numeric key: ")
# Ввод ключевого слова
stringKey = input("Enter your keyword: ")
return CONFIG(alphabet, numberKey, stringKey)
def en_message():
print("Encryption")
def de_message():
print("Decryption")
def select_file():
# Создаем список всех .txt файлов
filelist = []
for root, dirs, files in walk("."):
for file in files:
if file.endswith(".txt"):
# Добавляем в список
filelist.append(file)
s = ''
while True:
system('CLS')
print("List of txt files: ")
for i in filelist:
print(i)
file = input("Select a file: ")
try:
f = open(file, 'r', encoding='utf-8')
s = f.read()
f.close()
break
except Exception:
print("Error: file not found")
return s
# Вывод меню
def print_menu(cryptMode, CONF, text):
file_text = text
while cryptMode != 'EXIT':
system('CLS')
# Выбор действия
cryptMode = input("[E]ncryption|[D]ecryption| [Select] file |[S]etting configure |[Show] configuration |[Show text] |[Exit]: ").upper()
# Если команды не существует
if cryptMode not in ['E', 'D', 'S', 'EXIT', 'SHOW', 'SELECT', 'SHOW TEXT']:
print("Error: command not find!")
time.sleep(2)
# Если выбрана настройка конфигурации
if cryptMode == 'S':
CONF = conf_setting()
# Если выбран шифровка или дешифровка
if cryptMode in ['E', 'D']:
# Проверка на то, что файл выбран и проведены настройки конфигурации
if CONF is not object:
try:
if cryptMode == 'E':
print("Encryption in progress please wait...")
en_text = ENCRY(CONF.alphaList, CONF.new_alphaList, file_text.upper()).new_text()
print(file_text)
print(en_text)
try:
f = open("en_text.txt", 'w', encoding='utf-8')
f.write(en_text)
f.close()
print("Successfully. Encrypted file written! (en_text.txt)")
input("Please enter something to continue ...")
except Exception:
print("Error: file don't creat!")
input("Please enter something to continue ...")
if cryptMode == 'D':
print("Decryption in progress please wait...")
de_text = DECRY(CONF.alphaList, CONF.new_alphaList, file_text.upper()).new_text()
print(file_text)
print(de_text)
try:
f = open("de_text.txt", 'w', encoding='utf-8')
f.write(de_text)
f.close()
print("Successfully. Encrypted file written! (de_text.txt)")
input("Please enter something to continue ...")
except Exception:
print("Error: file don't creat!")
input("Please enter something to continue ...")
except Exception:
print(Exception)
time.sleep(2)
else:
if CONF is object:
print("Customize the configuration!")
time.sleep(2)
if file_text == '':
print("Chose file!")
time.sleep(2)
print("Wait...")
time.sleep(2)
# Если выбран выбор файла
if cryptMode == 'SELECT':
file_text = select_file()
# Если выбран показать файлы конфигурации
if cryptMode == 'SHOW':
if CONF is not object:
CONF.print_conf()
input("Please enter something to continue ...")
else:
print("Customize the configuration!")
time.sleep(2)
# Если выбран показать текст
if cryptMode == 'SHOW TEXT':
if file_text != '':
print(file_text)
input("Please enter something to continue ...")
else:
print("Please choose file!")
time.sleep(2)
if __name__ == '__main__':
CONF = object
text = ''
cryptMode = ''
print_menu(cryptMode, CONF, text)
|
[
"os.system",
"time.sleep",
"os.walk",
"config.CONFIG"
] |
[((173, 186), 'os.system', 'system', (['"""CLS"""'], {}), "('CLS')\n", (179, 186), False, 'from os import system, walk\n'), ((473, 511), 'config.CONFIG', 'CONFIG', (['alphabet', 'numberKey', 'stringKey'], {}), '(alphabet, numberKey, stringKey)\n', (479, 511), False, 'from config import CONFIG\n'), ((706, 715), 'os.walk', 'walk', (['"""."""'], {}), "('.')\n", (710, 715), False, 'from os import system, walk\n'), ((893, 906), 'os.system', 'system', (['"""CLS"""'], {}), "('CLS')\n", (899, 906), False, 'from os import system, walk\n'), ((1359, 1372), 'os.system', 'system', (['"""CLS"""'], {}), "('CLS')\n", (1365, 1372), False, 'from os import system, walk\n'), ((1721, 1734), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1731, 1734), False, 'import time\n'), ((4139, 4152), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4149, 4152), False, 'import time\n'), ((4564, 4577), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4574, 4577), False, 'import time\n'), ((4861, 4874), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4871, 4874), False, 'import time\n'), ((3965, 3978), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3975, 3978), False, 'import time\n'), ((4076, 4089), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4086, 4089), False, 'import time\n'), ((3820, 3833), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3830, 3833), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe.model.document import Document
class EmailQueueRecipient(Document):
DOCTYPE = "Email Queue Recipient"
def is_mail_to_be_sent(self):
return self.status == "Not Sent"
def is_main_sent(self):
return self.status == "Sent"
def update_db(self, commit=False, **kwargs):
frappe.db.set_value(self.DOCTYPE, self.name, kwargs)
if commit:
frappe.db.commit()
|
[
"frappe.db.commit",
"frappe.db.set_value"
] |
[((416, 468), 'frappe.db.set_value', 'frappe.db.set_value', (['self.DOCTYPE', 'self.name', 'kwargs'], {}), '(self.DOCTYPE, self.name, kwargs)\n', (435, 468), False, 'import frappe\n'), ((485, 503), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (501, 503), False, 'import frappe\n')]
|
# -*- coding: utf-8 -*-
# pylint: disable=unused-argument,pointless-statement
"""Tests for the `PseudoDojoFamily` class."""
import pytest
from aiida_pseudo.data.pseudo import UpfData, Psp8Data, PsmlData, JthXmlData
from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily
def test_type_string(clear_db):
"""Verify the `_type_string` class attribute is correctly set to the corresponding entry point name."""
assert PseudoDojoFamily._type_string == 'pseudo.family.pseudo_dojo' # pylint: disable=protected-access
def test_pseudo_types():
"""Test the `PseudoDojoFamily.pseudo_types` method."""
assert PseudoDojoFamily.pseudo_types == (UpfData, PsmlData, Psp8Data, JthXmlData)
def test_default_configuration():
"""Test the `PseudoDojoFamily.default_configuration` class attribute."""
assert isinstance(PseudoDojoFamily.default_configuration, PseudoDojoConfiguration)
def test_valid_configurations():
"""Test the `PseudoDojoFamily.valid_configurations` class attribute."""
valid_configurations = PseudoDojoFamily.valid_configurations
assert isinstance(valid_configurations, tuple)
for entry in valid_configurations:
assert isinstance(entry, PseudoDojoConfiguration)
def test_get_valid_labels():
"""Test the `PseudoDojoFamily.get_valid_labels` class method."""
valid_labels = PseudoDojoFamily.get_valid_labels()
assert isinstance(valid_labels, tuple)
for entry in valid_labels:
assert isinstance(entry, str)
def test_format_configuration_label():
"""Test the `PseudoDojoFamily.format_configuration_label` class method."""
configuration = PseudoDojoConfiguration('0.4', 'PBE', 'SR', 'standard', 'psp8')
assert PseudoDojoFamily.format_configuration_label(configuration) == 'PseudoDojo/0.4/PBE/SR/standard/psp8'
def test_constructor():
"""Test that the `PseudoDojoFamily` constructor validates the label."""
with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'):
PseudoDojoFamily()
with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'):
PseudoDojoFamily(label='nc-sr-04_pbe_standard_psp8')
label = PseudoDojoFamily.format_configuration_label(PseudoDojoFamily.default_configuration)
family = PseudoDojoFamily(label=label)
assert isinstance(family, PseudoDojoFamily)
@pytest.mark.usefixtures('clear_db')
def test_create_from_folder(filepath_pseudos):
"""Test the `PseudoDojoFamily.create_from_folder` class method."""
family = PseudoDojoFamily.create_from_folder(
filepath_pseudos('upf'), 'PseudoDojo/0.4/PBE/SR/standard/psp8', pseudo_type=UpfData
)
assert isinstance(family, PseudoDojoFamily)
@pytest.mark.usefixtures('clear_db')
def test_create_from_folder_duplicate(filepath_pseudos):
"""Test that `PseudoDojoFamily.create_from_folder` raises for duplicate label."""
label = 'PseudoDojo/0.4/PBE/SR/standard/psp8'
PseudoDojoFamily(label=label).store()
with pytest.raises(ValueError, match=r'the PseudoDojoFamily `.*` already exists'):
PseudoDojoFamily.create_from_folder(filepath_pseudos('upf'), label)
|
[
"aiida_pseudo.groups.family.PseudoDojoFamily",
"aiida_pseudo.groups.family.PseudoDojoFamily.get_valid_labels",
"pytest.raises",
"pytest.mark.usefixtures",
"aiida_pseudo.groups.family.PseudoDojoFamily.format_configuration_label",
"aiida_pseudo.groups.family.PseudoDojoConfiguration"
] |
[((2423, 2458), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clear_db"""'], {}), "('clear_db')\n", (2446, 2458), False, 'import pytest\n'), ((2776, 2811), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clear_db"""'], {}), "('clear_db')\n", (2799, 2811), False, 'import pytest\n'), ((1363, 1398), 'aiida_pseudo.groups.family.PseudoDojoFamily.get_valid_labels', 'PseudoDojoFamily.get_valid_labels', ([], {}), '()\n', (1396, 1398), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((1652, 1715), 'aiida_pseudo.groups.family.PseudoDojoConfiguration', 'PseudoDojoConfiguration', (['"""0.4"""', '"""PBE"""', '"""SR"""', '"""standard"""', '"""psp8"""'], {}), "('0.4', 'PBE', 'SR', 'standard', 'psp8')\n", (1675, 1715), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((2245, 2333), 'aiida_pseudo.groups.family.PseudoDojoFamily.format_configuration_label', 'PseudoDojoFamily.format_configuration_label', (['PseudoDojoFamily.default_configuration'], {}), '(PseudoDojoFamily.\n default_configuration)\n', (2288, 2333), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((2342, 2371), 'aiida_pseudo.groups.family.PseudoDojoFamily', 'PseudoDojoFamily', ([], {'label': 'label'}), '(label=label)\n', (2358, 2371), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((1727, 1785), 'aiida_pseudo.groups.family.PseudoDojoFamily.format_configuration_label', 'PseudoDojoFamily.format_configuration_label', (['configuration'], {}), '(configuration)\n', (1770, 1785), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((1938, 2038), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""the label `.*` is not a valid PseudoDojo configuration label"""'}), "(ValueError, match=\n 'the label `.*` is not a valid PseudoDojo configuration label')\n", (1951, 2038), False, 'import pytest\n'), ((2044, 2062), 'aiida_pseudo.groups.family.PseudoDojoFamily', 'PseudoDojoFamily', ([], {}), '()\n', (2060, 2062), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((2073, 2173), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""the label `.*` is not a valid PseudoDojo configuration label"""'}), "(ValueError, match=\n 'the label `.*` is not a valid PseudoDojo configuration label')\n", (2086, 2173), False, 'import pytest\n'), ((2179, 2231), 'aiida_pseudo.groups.family.PseudoDojoFamily', 'PseudoDojoFamily', ([], {'label': '"""nc-sr-04_pbe_standard_psp8"""'}), "(label='nc-sr-04_pbe_standard_psp8')\n", (2195, 2231), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n'), ((3057, 3132), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""the PseudoDojoFamily `.*` already exists"""'}), "(ValueError, match='the PseudoDojoFamily `.*` already exists')\n", (3070, 3132), False, 'import pytest\n'), ((3009, 3038), 'aiida_pseudo.groups.family.PseudoDojoFamily', 'PseudoDojoFamily', ([], {'label': 'label'}), '(label=label)\n', (3025, 3038), False, 'from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily\n')]
|
import sys
import json
import plotly
from flask import Flask
from flask import render_template, request
from plotly.graph_objects import Heatmap, Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
sys.path.append("common")
from common.nlp_common_utils import *
if len(sys.argv) == 1:
sys.argv.append('./data/DisasterResponse.db')
sys.argv.append('./models/classifier.pkl')
# this requires for joblib and pickle
def tokenize(text):
"""
Used a common utility functions for tokenize text in to cleaned token list.
INPUT:
text - raw message
OUTPUT:
clean_tokens -- cleaned tokenized list
"""
return tokenize_text(text)
# create a flask app
app = Flask(__name__, template_folder='app/templates')
#
database_file_location, model_location = sys.argv[1:]
# load data
engine = create_engine('sqlite:///{}'.format(database_file_location))
df = pd.read_sql_table('DisasterResponse', engine)
# category df
df_categories = df.iloc[:, 4:]
# load model
model = joblib.load(model_location)
def generate_graph_with_template(data, title, yaxis_title, xaxi_title):
"""
This common layout can be used to create Plotly graph layout.
INPUT:
data - a graph required JSON data i.e list
title - a tile of the chart
yaxis_title - Y title
xaxix_title - X title
OUTPUT:
layout for particular graph.
"""
return {
'data': [data],
'layout': {
'title': title,
'yaxis': {
'title': yaxis_title
},
'xaxis': {
'title': xaxi_title
}
}
}
def generate_message_genres_bar_chart():
"""
create a graph using extracted data for `genre`
"""
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
data = Bar(x=genre_names, y=genre_counts)
title = 'Distribution of Message Genres'
y_title = 'Count'
x_title = 'Genre'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_message_categories_distribution_bar_chart():
"""
create a graph for distribution of the messages.
"""
data = Bar(x=df_categories.columns,
y=list(df_categories.sum().sort_values(ascending=False)))
title = 'Distribution of Message Categories'
y_title = 'Count'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_two_cat_relation_heat_map():
"""
A correlation matrix for categories
"""
data = Heatmap(
z=df_categories.corr(),
y=df_categories.columns,
x=df_categories.columns)
title = 'Correlation Distribution of Categories'
y_title = 'Category'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_graphs():
# create visuals
graphs = [generate_message_genres_bar_chart(),
generate_message_categories_distribution_bar_chart(),
generate_two_cat_relation_heat_map()]
return graphs
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
graphs = generate_graphs()
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graph_json = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graph_json)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
|
[
"plotly.graph_objects.Bar",
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"sys.argv.append",
"sklearn.externals.joblib.load",
"json.dumps",
"sys.path.append"
] |
[((226, 251), 'sys.path.append', 'sys.path.append', (['"""common"""'], {}), "('common')\n", (241, 251), False, 'import sys\n'), ((727, 775), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""app/templates"""'}), "(__name__, template_folder='app/templates')\n", (732, 775), False, 'from flask import Flask\n'), ((1034, 1061), 'sklearn.externals.joblib.load', 'joblib.load', (['model_location'], {}), '(model_location)\n', (1045, 1061), False, 'from sklearn.externals import joblib\n'), ((319, 364), 'sys.argv.append', 'sys.argv.append', (['"""./data/DisasterResponse.db"""'], {}), "('./data/DisasterResponse.db')\n", (334, 364), False, 'import sys\n'), ((369, 411), 'sys.argv.append', 'sys.argv.append', (['"""./models/classifier.pkl"""'], {}), "('./models/classifier.pkl')\n", (384, 411), False, 'import sys\n'), ((1936, 1970), 'plotly.graph_objects.Bar', 'Bar', ([], {'x': 'genre_names', 'y': 'genre_counts'}), '(x=genre_names, y=genre_counts)\n', (1939, 1970), False, 'from plotly.graph_objects import Heatmap, Bar\n'), ((3445, 3499), 'json.dumps', 'json.dumps', (['graphs'], {'cls': 'plotly.utils.PlotlyJSONEncoder'}), '(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n', (3455, 3499), False, 'import json\n'), ((3553, 3614), 'flask.render_template', 'render_template', (['"""master.html"""'], {'ids': 'ids', 'graphJSON': 'graph_json'}), "('master.html', ids=ids, graphJSON=graph_json)\n", (3568, 3614), False, 'from flask import render_template, request\n'), ((3750, 3779), 'flask.request.args.get', 'request.args.get', (['"""query"""', '""""""'], {}), "('query', '')\n", (3766, 3779), False, 'from flask import render_template, request\n'), ((4035, 4125), 'flask.render_template', 'render_template', (['"""go.html"""'], {'query': 'query', 'classification_result': 'classification_results'}), "('go.html', query=query, classification_result=\n classification_results)\n", (4050, 4125), False, 'from flask import render_template, request\n')]
|
# coding: utf-8
import re
import utility
from commands import Command
def google_pages(string):
url = 'http://www.google.se/search?q=' + utility.escape(string) + '&ie=UTF-8&oe=UTF-8'
response = utility.read_url(url)
data = response["data"]
search = re.search('swrnum=(\d+)">', data)
if search:
result = search.group(1)
if result:
return int(result, 10)
else:
return None
else:
return None
def google_divisor(int1, int2):
if int1 < int2:
biggest = int1
else:
biggest = int2
if biggest > 1000000:
divisor = 1000000.0
unit = 'm'
elif biggest > 1000:
divisor = 1000.0
unit = 'k'
else:
divisor = 1
unit = ''
return (divisor, unit)
class Googlefight(Command):
def __init__(self):
pass
def trig_googlefight(self, bot, source, target, trigger, argument):
args = argument.split('|', 2)
if len(args) == 2 and len(args[0]) > 0 and len(args[1]) > 0:
result1 = google_pages(args[0])
result2 = google_pages(args[1])
if result1 and result2:
grej = google_divisor(result1, result2)
result1 = result1 / grej[0]
result2 = result2 / grej[0]
unit = grej[1]
if result1 == result2:
return "It's a tie! " + str(result1/1000.0) + "k hits!"
elif result1 > result2:
return args[0] + ' is the winner! (' + str(result1) + unit + ' to ' + str(result2) + unit + ')'
else:
return args[1] + ' is the winner! (' + str(result2) + unit + ' to ' + str(result1) + unit + ')'
else:
return "Couldn't search."
else:
return "Usage: .googlefight arg1|arg2"
|
[
"utility.escape",
"utility.read_url",
"re.search"
] |
[((199, 220), 'utility.read_url', 'utility.read_url', (['url'], {}), '(url)\n', (215, 220), False, 'import utility\n'), ((257, 291), 're.search', 're.search', (['"""swrnum=(\\\\d+)">"""', 'data'], {}), '(\'swrnum=(\\\\d+)">\', data)\n', (266, 291), False, 'import re\n'), ((140, 162), 'utility.escape', 'utility.escape', (['string'], {}), '(string)\n', (154, 162), False, 'import utility\n')]
|
import copy
import numpy as np
from scipy.special import wofz
from scipy.integrate import quad
from typing import List, Tuple
import autoarray as aa
from autogalaxy.profiles.mass_profiles import MassProfile
from autogalaxy.profiles.mass_profiles.mass_profiles import (
MassProfileMGE,
MassProfileCSE,
)
from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from
class StellarProfile:
pass
class EllGaussian(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
sigma: float = 0.01,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Gaussian light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
sigma
The sigma value of the Gaussian.
"""
super(EllGaussian, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.sigma = sigma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
deflections = (
self.mass_to_light_ratio
* self.intensity
* self.sigma
* np.sqrt((2 * np.pi) / (1.0 - self.axis_ratio ** 2.0))
* self.zeta_from(grid=grid)
)
return self.rotate_grid_from_reference_frame(
np.multiply(
1.0, np.vstack((-1.0 * np.imag(deflections), np.real(deflections))).T
)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
Note: sigma is divided by sqrt(q) here.
"""
def calculate_deflection_component(npow, index):
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sigma / np.sqrt(self.axis_ratio),
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(u, y, x, npow, axis_ratio, sigma):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(-0.5 * np.square(np.divide(eta_u, sigma))) / (
(1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
Note: sigma is divided by sqrt(q) here.
"""
return np.multiply(
self.intensity,
np.exp(
-0.5
* np.square(
np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio))
)
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.9999 else 0.9999
def zeta_from(self, grid: aa.type.Grid2DLike):
q2 = self.axis_ratio ** 2.0
ind_pos_y = grid[:, 0] >= 0
shape_grid = np.shape(grid)
output_grid = np.zeros((shape_grid[0]), dtype=np.complex128)
scale_factor = self.axis_ratio / (self.sigma * np.sqrt(2.0 * (1.0 - q2)))
xs_0 = grid[:, 1][ind_pos_y] * scale_factor
ys_0 = grid[:, 0][ind_pos_y] * scale_factor
xs_1 = grid[:, 1][~ind_pos_y] * scale_factor
ys_1 = -grid[:, 0][~ind_pos_y] * scale_factor
output_grid[ind_pos_y] = -1j * (
wofz(xs_0 + 1j * ys_0)
- np.exp(-(xs_0 ** 2.0) * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_0 + 1j * ys_0 / self.axis_ratio)
)
output_grid[~ind_pos_y] = np.conj(
-1j
* (
wofz(xs_1 + 1j * ys_1)
- np.exp(-(xs_1 ** 2.0) * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_1 + 1j * ys_1 / self.axis_ratio)
)
)
return output_grid
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
# noinspection PyAbstractClass
class AbstractEllSersic(MassProfile, MassProfileMGE, MassProfileCSE, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super(AbstractEllSersic, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfileMGE, self).__init__()
super(MassProfileCSE, self).__init__()
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.effective_radius = effective_radius
self.sersic_index = sersic_index
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_mge_from(
grid=grid, sigmas_factor=np.sqrt(self.axis_ratio)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
eccentric_radii = self.grid_to_eccentric_radii(grid=grid)
return self._convergence_2d_via_mge_from(grid_radii=eccentric_radii)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D convergence from a grid of (y,x) arc second coordinates, by computing and summing
the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
elliptical_radii = self.grid_to_elliptical_radii(grid=grid)
return self._convergence_2d_via_cse_from(grid_radii=elliptical_radii)
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, radius: np.ndarray):
"""
Returns the intensity of the profile at a given radius.
Parameters
----------
radius
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant
* (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1)
)
def decompose_convergence_via_mge(self) -> Tuple[List, List]:
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_2d, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self,) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_cses
The number of CSEs used to approximate the input func.
sample_points: int (should be larger than 'total_cses')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=0.0,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_2d,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
@property
def sersic_constant(self):
"""A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's
total integrated light.
"""
return (
(2 * self.sersic_index)
- (1.0 / 3.0)
+ (4.0 / (405.0 * self.sersic_index))
+ (46.0 / (25515.0 * self.sersic_index ** 2))
+ (131.0 / (1148175.0 * self.sersic_index ** 3))
- (2194697.0 / (30690717750.0 * self.sersic_index ** 4))
)
@property
def ellipticity_rescale(self):
return 1.0 - ((1.0 - self.axis_ratio) / 2.0)
@property
def elliptical_effective_radius(self):
"""
The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \
radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \
systems, this won't robustly capture the light profile's elliptical shape.
The elliptical effective radius instead describes the major-axis radius of the ellipse containing \
half the light, and may be more appropriate for highly flattened systems like disk galaxies.
"""
return self.effective_radius / np.sqrt(self.axis_ratio)
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE):
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u, y, x, npow, axis_ratio, sersic_index, effective_radius, sersic_constant
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphSersic(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre
intensity
Overall flux intensity normalisation in the light profiles (electrons per second)
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllExponential(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllExponential mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=1.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphExponential(EllExponential):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllDevVaucouleurs(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllDevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=4.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphDevVaucouleurs(EllDevVaucouleurs):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the
lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllSersicRadialGradient(AbstractEllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.mass_to_light_gradient = mass_to_light_gradient
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
self.mass_to_light_gradient,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
sersic_index,
effective_radius,
mass_to_light_gradient,
sersic_constant,
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return (
(((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient)
* np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
)
/ ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return (
self.mass_to_light_ratio
* (
((self.axis_ratio * grid_radius) / self.effective_radius)
** -self.mass_to_light_gradient
)
* self.image_2d_via_radii_from(grid_radius)
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / self.effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / scaled_effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_mge_from(grid=grid)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 50.0
radii_max = self.effective_radius * 20.0
def core_sersic_2D(r):
return (
self.mass_to_light_ratio
* self.intensity_prime
* (1.0 + (self.radius_break / r) ** self.alpha)
** (self.gamma / self.alpha)
* np.exp(
-self.sersic_constant
* (
(r ** self.alpha + self.radius_break ** self.alpha)
/ self.effective_radius ** self.alpha
)
** (1.0 / (self.sersic_index * self.alpha))
)
)
return self._decompose_convergence_via_mge(
func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max
)
@property
def intensity_prime(self):
"""Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)"""
return (
self.intensity_break
* (2.0 ** (-self.gamma / self.alpha))
* np.exp(
self.sersic_constant
* (
((2.0 ** (1.0 / self.alpha)) * self.radius_break)
/ self.effective_radius
)
** (1.0 / self.sersic_index)
)
)
class SphSersicCore(EllSersicCore):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
effective_radius=effective_radius,
sersic_index=sersic_index,
radius_break=radius_break,
intensity_break=intensity_break,
gamma=gamma,
alpha=alpha,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
class EllChameleon(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Chamelon mass profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
"""
super(EllChameleon, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.core_radius_0 = core_radius_0
self.core_radius_1 = core_radius_1
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Following Eq. (15) and (16), but the parameters are slightly different.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
factor = (
2.0
* self.mass_to_light_ratio
* self.intensity
/ (1 + self.axis_ratio)
* self.axis_ratio
/ np.sqrt(1.0 - self.axis_ratio ** 2.0)
)
core_radius_0 = np.sqrt(
(4.0 * self.core_radius_0 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
core_radius_1 = np.sqrt(
(4.0 * self.core_radius_1 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
psi0 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0
)
psi1 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1
)
deflection_y0 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi0, self.axis_ratio ** 2.0 * core_radius_0),
)
)
deflection_x0 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi0, core_radius_0),
)
)
deflection_y1 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi1, self.axis_ratio ** 2.0 * core_radius_1),
)
)
deflection_x1 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi1, core_radius_1),
)
)
deflection_y = np.subtract(deflection_y0, deflection_y1)
deflection_x = np.subtract(deflection_x0, deflection_x1)
return self.rotate_grid_from_reference_frame(
np.multiply(factor, np.vstack((deflection_y, deflection_x)).T)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_elliptical_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Chamelon light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
axis_ratio_factor = (1.0 + self.axis_ratio) ** 2.0
return np.multiply(
self.intensity / (1 + self.axis_ratio),
np.add(
np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_0 ** 2.0) / axis_ratio_factor,
)
),
),
-np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_1 ** 2.0) / axis_ratio_factor,
)
),
),
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.99999 else 0.99999
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class SphChameleon(EllChameleon):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The spherica; Chameleon mass profile.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
core_radius_0=core_radius_0,
core_radius_1=core_radius_1,
mass_to_light_ratio=mass_to_light_ratio,
)
def cse_settings_from(
effective_radius, sersic_index, sersic_constant, mass_to_light_gradient
):
if mass_to_light_gradient > 0.5:
if effective_radius > 0.2:
lower_dex = 6.0
upper_dex = np.min(
[np.log10((18.0 / sersic_constant) ** sersic_index), 1.1]
)
if sersic_index <= 1.2:
total_cses = 50
sample_points = 80
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.5
else:
total_cses = 30
sample_points = 50
else:
if sersic_index <= 1.2:
upper_dex = 1.0
total_cses = 50
sample_points = 80
lower_dex = 4.5
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.0
upper_dex = 1.5
else:
upper_dex = 1.1
lower_dex = 6.0
total_cses = 30
sample_points = 50
else:
upper_dex = np.min(
[
np.log10((23.0 / sersic_constant) ** sersic_index),
0.85 - np.log10(effective_radius),
]
)
if (sersic_index <= 0.9) and (sersic_index > 0.8):
total_cses = 50
sample_points = 80
upper_dex = np.log10((18.0 / sersic_constant) ** sersic_index)
lower_dex = 4.3 + np.log10(effective_radius)
elif sersic_index <= 0.8:
total_cses = 50
sample_points = 80
upper_dex = np.log10((16.0 / sersic_constant) ** sersic_index)
lower_dex = 4.0 + np.log10(effective_radius)
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 4.5 + np.log10(effective_radius)
else:
lower_dex = 3.5 + np.log10(effective_radius)
total_cses = 30
sample_points = 50
return upper_dex, lower_dex, total_cses, sample_points
|
[
"numpy.log10",
"numpy.sqrt",
"autogalaxy.profiles.mass_profiles.mass_profiles.psi_from",
"numpy.add",
"numpy.power",
"scipy.integrate.quad",
"numpy.subtract",
"copy.copy",
"numpy.exp",
"numpy.real",
"numpy.zeros",
"numpy.square",
"numpy.vstack",
"scipy.special.wofz",
"numpy.shape",
"numpy.imag",
"numpy.divide"
] |
[((5818, 5847), 'numpy.zeros', 'np.zeros', ([], {'shape': 'grid.shape[0]'}), '(shape=grid.shape[0])\n', (5826, 5847), True, 'import numpy as np\n'), ((6793, 6807), 'numpy.shape', 'np.shape', (['grid'], {}), '(grid)\n', (6801, 6807), True, 'import numpy as np\n'), ((6831, 6875), 'numpy.zeros', 'np.zeros', (['shape_grid[0]'], {'dtype': 'np.complex128'}), '(shape_grid[0], dtype=np.complex128)\n', (6839, 6875), True, 'import numpy as np\n'), ((7851, 7866), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (7860, 7866), False, 'import copy\n'), ((14018, 14047), 'numpy.zeros', 'np.zeros', ([], {'shape': 'grid.shape[0]'}), '(shape=grid.shape[0])\n', (14026, 14047), True, 'import numpy as np\n'), ((18625, 18640), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (18634, 18640), False, 'import copy\n'), ((46683, 46754), 'numpy.sqrt', 'np.sqrt', (['(4.0 * self.core_radius_0 ** 2.0 / (1.0 + self.axis_ratio) ** 2)'], {}), '(4.0 * self.core_radius_0 ** 2.0 / (1.0 + self.axis_ratio) ** 2)\n', (46690, 46754), True, 'import numpy as np\n'), ((46806, 46877), 'numpy.sqrt', 'np.sqrt', (['(4.0 * self.core_radius_1 ** 2.0 / (1.0 + self.axis_ratio) ** 2)'], {}), '(4.0 * self.core_radius_1 ** 2.0 / (1.0 + self.axis_ratio) ** 2)\n', (46813, 46877), True, 'import numpy as np\n'), ((46922, 46996), 'autogalaxy.profiles.mass_profiles.mass_profiles.psi_from', 'psi_from', ([], {'grid': 'grid', 'axis_ratio': 'self.axis_ratio', 'core_radius': 'core_radius_0'}), '(grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0)\n', (46930, 46996), False, 'from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from\n'), ((47037, 47111), 'autogalaxy.profiles.mass_profiles.mass_profiles.psi_from', 'psi_from', ([], {'grid': 'grid', 'axis_ratio': 'self.axis_ratio', 'core_radius': 'core_radius_1'}), '(grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1)\n', (47045, 47111), False, 'from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from\n'), ((48074, 48115), 'numpy.subtract', 'np.subtract', (['deflection_y0', 'deflection_y1'], {}), '(deflection_y0, deflection_y1)\n', (48085, 48115), True, 'import numpy as np\n'), ((48140, 48181), 'numpy.subtract', 'np.subtract', (['deflection_x0', 'deflection_x1'], {}), '(deflection_x0, deflection_x1)\n', (48151, 48181), True, 'import numpy as np\n'), ((49094, 49123), 'numpy.zeros', 'np.zeros', ([], {'shape': 'grid.shape[0]'}), '(shape=grid.shape[0])\n', (49102, 49123), True, 'import numpy as np\n'), ((50536, 50551), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (50545, 50551), False, 'import copy\n'), ((4786, 4805), 'numpy.sqrt', 'np.sqrt', (['axis_ratio'], {}), '(axis_ratio)\n', (4793, 4805), True, 'import numpy as np\n'), ((4808, 4872), 'numpy.sqrt', 'np.sqrt', (['(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))'], {}), '(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))\n', (4815, 4872), True, 'import numpy as np\n'), ((14366, 14469), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((radius / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1))'], {}), '(-self.sersic_constant * ((radius / self.effective_radius) ** (1.0 /\n self.sersic_index) - 1))\n', (14372, 14469), True, 'import numpy as np\n'), ((16410, 16434), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (16417, 16434), True, 'import numpy as np\n'), ((18518, 18542), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (18525, 18542), True, 'import numpy as np\n'), ((20624, 20643), 'numpy.sqrt', 'np.sqrt', (['axis_ratio'], {}), '(axis_ratio)\n', (20631, 20643), True, 'import numpy as np\n'), ((20646, 20710), 'numpy.sqrt', 'np.sqrt', (['(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))'], {}), '(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))\n', (20653, 20710), True, 'import numpy as np\n'), ((20759, 20846), 'numpy.exp', 'np.exp', (['(-sersic_constant * ((eta_u / effective_radius) ** (1.0 / sersic_index) - 1))'], {}), '(-sersic_constant * ((eta_u / effective_radius) ** (1.0 /\n sersic_index) - 1))\n', (20765, 20846), True, 'import numpy as np\n'), ((30915, 30934), 'numpy.sqrt', 'np.sqrt', (['axis_ratio'], {}), '(axis_ratio)\n', (30922, 30934), True, 'import numpy as np\n'), ((30937, 31001), 'numpy.sqrt', 'np.sqrt', (['(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))'], {}), '(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))\n', (30944, 31001), True, 'import numpy as np\n'), ((34328, 34352), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (34335, 34352), True, 'import numpy as np\n'), ((41647, 41783), 'numpy.exp', 'np.exp', (['(self.sersic_constant * (2.0 ** (1.0 / self.alpha) * self.radius_break /\n self.effective_radius) ** (1.0 / self.sersic_index))'], {}), '(self.sersic_constant * (2.0 ** (1.0 / self.alpha) * self.\n radius_break / self.effective_radius) ** (1.0 / self.sersic_index))\n', (41653, 41783), True, 'import numpy as np\n'), ((46607, 46644), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (46614, 46644), True, 'import numpy as np\n'), ((53870, 53920), 'numpy.log10', 'np.log10', (['((18.0 / sersic_constant) ** sersic_index)'], {}), '((18.0 / sersic_constant) ** sersic_index)\n', (53878, 53920), True, 'import numpy as np\n'), ((2796, 2847), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi / (1.0 - self.axis_ratio ** 2.0))'], {}), '(2 * np.pi / (1.0 - self.axis_ratio ** 2.0))\n', (2803, 2847), True, 'import numpy as np\n'), ((6934, 6959), 'numpy.sqrt', 'np.sqrt', (['(2.0 * (1.0 - q2))'], {}), '(2.0 * (1.0 - q2))\n', (6941, 6959), True, 'import numpy as np\n'), ((7235, 7259), 'scipy.special.wofz', 'wofz', (['(xs_0 + 1.0j * ys_0)'], {}), '(xs_0 + 1.0j * ys_0)\n', (7239, 7259), False, 'from scipy.special import wofz\n'), ((10836, 10860), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (10843, 10860), True, 'import numpy as np\n'), ((14821, 14922), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))\n', (14827, 14922), True, 'import numpy as np\n'), ((16711, 16813), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 /\n self.sersic_index) - 1.0))\n', (16717, 16813), True, 'import numpy as np\n'), ((31151, 31238), 'numpy.exp', 'np.exp', (['(-sersic_constant * ((eta_u / effective_radius) ** (1.0 / sersic_index) - 1))'], {}), '(-sersic_constant * ((eta_u / effective_radius) ** (1.0 /\n sersic_index) - 1))\n', (31157, 31238), True, 'import numpy as np\n'), ((32684, 32785), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))\n', (32690, 32785), True, 'import numpy as np\n'), ((34809, 34911), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 /\n self.sersic_index) - 1.0))\n', (34815, 34911), True, 'import numpy as np\n'), ((40888, 41064), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r ** self.alpha + self.radius_break ** self.\n alpha) / self.effective_radius ** self.alpha) ** (1.0 / (self.\n sersic_index * self.alpha)))'], {}), '(-self.sersic_constant * ((r ** self.alpha + self.radius_break **\n self.alpha) / self.effective_radius ** self.alpha) ** (1.0 / (self.\n sersic_index * self.alpha)))\n', (40894, 41064), True, 'import numpy as np\n'), ((47297, 47349), 'numpy.add', 'np.add', (['psi0', '(self.axis_ratio ** 2.0 * core_radius_0)'], {}), '(psi0, self.axis_ratio ** 2.0 * core_radius_0)\n', (47303, 47349), True, 'import numpy as np\n'), ((47537, 47564), 'numpy.add', 'np.add', (['psi0', 'core_radius_0'], {}), '(psi0, core_radius_0)\n', (47543, 47564), True, 'import numpy as np\n'), ((47753, 47805), 'numpy.add', 'np.add', (['psi1', '(self.axis_ratio ** 2.0 * core_radius_1)'], {}), '(psi1, self.axis_ratio ** 2.0 * core_radius_1)\n', (47759, 47805), True, 'import numpy as np\n'), ((47993, 48020), 'numpy.add', 'np.add', (['psi1', 'core_radius_1'], {}), '(psi1, core_radius_1)\n', (47999, 48020), True, 'import numpy as np\n'), ((53592, 53642), 'numpy.log10', 'np.log10', (['((23.0 / sersic_constant) ** sersic_index)'], {}), '((23.0 / sersic_constant) ** sersic_index)\n', (53600, 53642), True, 'import numpy as np\n'), ((53952, 53978), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (53960, 53978), True, 'import numpy as np\n'), ((54100, 54150), 'numpy.log10', 'np.log10', (['((16.0 / sersic_constant) ** sersic_index)'], {}), '((16.0 / sersic_constant) ** sersic_index)\n', (54108, 54150), True, 'import numpy as np\n'), ((4634, 4673), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (4643, 4673), True, 'import numpy as np\n'), ((7273, 7339), 'numpy.exp', 'np.exp', (['(-xs_0 ** 2.0 * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))'], {}), '(-xs_0 ** 2.0 * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))\n', (7279, 7339), True, 'import numpy as np\n'), ((7357, 7417), 'scipy.special.wofz', 'wofz', (['(self.axis_ratio * xs_0 + 1.0j * ys_0 / self.axis_ratio)'], {}), '(self.axis_ratio * xs_0 + 1.0j * ys_0 / self.axis_ratio)\n', (7361, 7417), False, 'from scipy.special import wofz\n'), ((7524, 7548), 'scipy.special.wofz', 'wofz', (['(xs_1 + 1.0j * ys_1)'], {}), '(xs_1 + 1.0j * ys_1)\n', (7528, 7548), False, 'from scipy.special import wofz\n'), ((20414, 20453), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (20423, 20453), True, 'import numpy as np\n'), ((30608, 30647), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (30617, 30647), True, 'import numpy as np\n'), ((47228, 47265), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47235, 47265), True, 'import numpy as np\n'), ((47468, 47505), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47475, 47505), True, 'import numpy as np\n'), ((47684, 47721), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47691, 47721), True, 'import numpy as np\n'), ((47924, 47961), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47931, 47961), True, 'import numpy as np\n'), ((48272, 48311), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (48281, 48311), True, 'import numpy as np\n'), ((52592, 52642), 'numpy.log10', 'np.log10', (['((18.0 / sersic_constant) ** sersic_index)'], {}), '((18.0 / sersic_constant) ** sersic_index)\n', (52600, 52642), True, 'import numpy as np\n'), ((53668, 53694), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (53676, 53694), True, 'import numpy as np\n'), ((54182, 54208), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (54190, 54208), True, 'import numpy as np\n'), ((4945, 4968), 'numpy.divide', 'np.divide', (['eta_u', 'sigma'], {}), '(eta_u, sigma)\n', (4954, 4968), True, 'import numpy as np\n'), ((7566, 7632), 'numpy.exp', 'np.exp', (['(-xs_1 ** 2.0 * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))'], {}), '(-xs_1 ** 2.0 * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))\n', (7572, 7632), True, 'import numpy as np\n'), ((7654, 7714), 'scipy.special.wofz', 'wofz', (['(self.axis_ratio * xs_1 + 1.0j * ys_1 / self.axis_ratio)'], {}), '(self.axis_ratio * xs_1 + 1.0j * ys_1 / self.axis_ratio)\n', (7658, 7714), False, 'from scipy.special import wofz\n'), ((19630, 19792), 'scipy.integrate.quad', 'quad', (['self.deflection_func'], {'a': '(0.0)', 'b': '(1.0)', 'args': '(grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sersic_index, self.\n effective_radius, sersic_constant)'}), '(self.deflection_func, a=0.0, b=1.0, args=(grid[i, 0], grid[i, 1], npow,\n self.axis_ratio, self.sersic_index, self.effective_radius, sersic_constant)\n )\n', (19634, 19792), False, 'from scipy.integrate import quad\n'), ((29768, 29959), 'scipy.integrate.quad', 'quad', (['self.deflection_func'], {'a': '(0.0)', 'b': '(1.0)', 'args': '(grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sersic_index, self.\n effective_radius, self.mass_to_light_gradient, sersic_constant)'}), '(self.deflection_func, a=0.0, b=1.0, args=(grid[i, 0], grid[i, 1], npow,\n self.axis_ratio, self.sersic_index, self.effective_radius, self.\n mass_to_light_gradient, sersic_constant))\n', (29772, 29959), False, 'from scipy.integrate import quad\n'), ((54335, 54361), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (54343, 54361), True, 'import numpy as np\n'), ((54408, 54434), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (54416, 54434), True, 'import numpy as np\n'), ((3047, 3067), 'numpy.real', 'np.real', (['deflections'], {}), '(deflections)\n', (3054, 3067), True, 'import numpy as np\n'), ((39622, 39662), 'numpy.divide', 'np.divide', (['self.radius_break', 'grid_radii'], {}), '(self.radius_break, grid_radii)\n', (39631, 39662), True, 'import numpy as np\n'), ((49775, 49796), 'numpy.square', 'np.square', (['grid_radii'], {}), '(grid_radii)\n', (49784, 49796), True, 'import numpy as np\n'), ((3025, 3045), 'numpy.imag', 'np.imag', (['deflections'], {}), '(deflections)\n', (3032, 3045), True, 'import numpy as np\n'), ((6424, 6448), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (6431, 6448), True, 'import numpy as np\n'), ((40056, 40088), 'numpy.power', 'np.power', (['grid_radii', 'self.alpha'], {}), '(grid_radii, self.alpha)\n', (40064, 40088), True, 'import numpy as np\n'), ((50100, 50121), 'numpy.square', 'np.square', (['grid_radii'], {}), '(grid_radii)\n', (50109, 50121), True, 'import numpy as np\n'), ((4282, 4306), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (4289, 4306), True, 'import numpy as np\n')]
|
"""Connection object for Network Manager."""
from ipaddress import ip_address, ip_interface
from typing import Optional
from ...const import ATTR_ADDRESS, ATTR_PREFIX
from ...utils.gdbus import DBus
from ..const import (
DBUS_ATTR_ADDRESS_DATA,
DBUS_ATTR_CONNECTION,
DBUS_ATTR_GATEWAY,
DBUS_ATTR_ID,
DBUS_ATTR_IP4CONFIG,
DBUS_ATTR_IP6CONFIG,
DBUS_ATTR_NAMESERVER_DATA,
DBUS_ATTR_NAMESERVERS,
DBUS_ATTR_STATE,
DBUS_ATTR_TYPE,
DBUS_ATTR_UUID,
DBUS_NAME_CONNECTION_ACTIVE,
DBUS_NAME_IP4CONFIG,
DBUS_NAME_IP6CONFIG,
DBUS_NAME_NM,
DBUS_OBJECT_BASE,
)
from ..interface import DBusInterfaceProxy
from .configuration import IpConfiguration
class NetworkConnection(DBusInterfaceProxy):
"""NetworkConnection object for Network Manager."""
def __init__(self, object_path: str) -> None:
"""Initialize NetworkConnection object."""
self.object_path = object_path
self.properties = {}
self._ipv4: Optional[IpConfiguration] = None
self._ipv6: Optional[IpConfiguration] = None
@property
def id(self) -> str:
"""Return the id of the connection."""
return self.properties[DBUS_ATTR_ID]
@property
def type(self) -> str:
"""Return the type of the connection."""
return self.properties[DBUS_ATTR_TYPE]
@property
def uuid(self) -> str:
"""Return the uuid of the connection."""
return self.properties[DBUS_ATTR_UUID]
@property
def state(self) -> int:
"""Return the state of the connection."""
return self.properties[DBUS_ATTR_STATE]
@property
def setting_object(self) -> int:
"""Return the connection object path."""
return self.properties[DBUS_ATTR_CONNECTION]
@property
def ipv4(self) -> Optional[IpConfiguration]:
"""Return a ip4 configuration object for the connection."""
return self._ipv4
@property
def ipv6(self) -> Optional[IpConfiguration]:
"""Return a ip6 configuration object for the connection."""
return self._ipv6
async def connect(self) -> None:
"""Get connection information."""
self.dbus = await DBus.connect(DBUS_NAME_NM, self.object_path)
self.properties = await self.dbus.get_properties(DBUS_NAME_CONNECTION_ACTIVE)
# IPv4
if self.properties[DBUS_ATTR_IP4CONFIG] != DBUS_OBJECT_BASE:
ip4 = await DBus.connect(DBUS_NAME_NM, self.properties[DBUS_ATTR_IP4CONFIG])
ip4_data = await ip4.get_properties(DBUS_NAME_IP4CONFIG)
self._ipv4 = IpConfiguration(
ip_address(ip4_data[DBUS_ATTR_GATEWAY])
if ip4_data.get(DBUS_ATTR_GATEWAY)
else None,
[
ip_address(nameserver[ATTR_ADDRESS])
for nameserver in ip4_data.get(DBUS_ATTR_NAMESERVER_DATA, [])
],
[
ip_interface(f"{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}")
for address in ip4_data.get(DBUS_ATTR_ADDRESS_DATA, [])
],
)
# IPv6
if self.properties[DBUS_ATTR_IP6CONFIG] != DBUS_OBJECT_BASE:
ip6 = await DBus.connect(DBUS_NAME_NM, self.properties[DBUS_ATTR_IP6CONFIG])
ip6_data = await ip6.get_properties(DBUS_NAME_IP6CONFIG)
self._ipv6 = IpConfiguration(
ip_address(ip6_data[DBUS_ATTR_GATEWAY])
if ip6_data.get(DBUS_ATTR_GATEWAY)
else None,
[
ip_address(bytes(nameserver))
for nameserver in ip6_data.get(DBUS_ATTR_NAMESERVERS)
],
[
ip_interface(f"{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}")
for address in ip6_data.get(DBUS_ATTR_ADDRESS_DATA, [])
],
)
|
[
"ipaddress.ip_address",
"ipaddress.ip_interface"
] |
[((2635, 2674), 'ipaddress.ip_address', 'ip_address', (['ip4_data[DBUS_ATTR_GATEWAY]'], {}), '(ip4_data[DBUS_ATTR_GATEWAY])\n', (2645, 2674), False, 'from ipaddress import ip_address, ip_interface\n'), ((2791, 2827), 'ipaddress.ip_address', 'ip_address', (['nameserver[ATTR_ADDRESS]'], {}), '(nameserver[ATTR_ADDRESS])\n', (2801, 2827), False, 'from ipaddress import ip_address, ip_interface\n'), ((2967, 3030), 'ipaddress.ip_interface', 'ip_interface', (['f"""{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}"""'], {}), "(f'{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}')\n", (2979, 3030), False, 'from ipaddress import ip_address, ip_interface\n'), ((3442, 3481), 'ipaddress.ip_address', 'ip_address', (['ip6_data[DBUS_ATTR_GATEWAY]'], {}), '(ip6_data[DBUS_ATTR_GATEWAY])\n', (3452, 3481), False, 'from ipaddress import ip_address, ip_interface\n'), ((3759, 3822), 'ipaddress.ip_interface', 'ip_interface', (['f"""{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}"""'], {}), "(f'{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}')\n", (3771, 3822), False, 'from ipaddress import ip_address, ip_interface\n')]
|
from __future__ import print_function, absolute_import
from .beats import Beat
from StringIO import StringIO
import sys
import os
import json
import urllib
import webbrowser
try:
import pycurl
except:
print("Need pycurl dependency to use qubole as the deployment platform. Run pip install pycurl in your virtualenv and try this again.")
sys.exit(1)
class Databricks:
def __init__(self, config, options):
self.config = config
self.options = options
projectsDir = self.config.get(self.options.env, "projects_dir")
schemasDir = os.path.join(projectsDir, "schemas")
schemasFile = os.path.join(schemasDir, "beats.schema.json")
if os.path.exists(schemasFile):
self.beats = Beat(file(schemasFile).read())
def _q_config(self,item):
return self.config.get(self.options.env, "databricks-{}".format(item))
def _do_request(self, method, path, base_url=None, **data):
# Uh, only using pycurl because that was the example that was around, will port to requests someday
# it's supposed to be faster, so oh well
c = pycurl.Curl()
#auth_token = self._q_config("auth_token")
username = self._q_config("username")
password = self._q_config("password")
if base_url == None:
base_url = self.config.get(self.options.env, "master")
url = base_url+ "/" + path
buffer = StringIO()
c.setopt(c.WRITEDATA, buffer)
print("Using", url, file=sys.stderr)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Accept:application/json'])
#c.setopt(pycurl.HTTPHEADER, ["X-AUTH-TOKEN: "+ auth_token, "Content-Type:application/json", "Accept: application/json, text/plain"])
## Note: Only POST and GET have been tested...
## It's not very obvious with pycurl to do this properly with PUT and DELETE
## Review this if ever needed to add these methods
## http://www.programcreek.com/python/example/2132/pycurl.HTTPHEADER
if method.lower() == "post":
c.setopt(pycurl.POST,1)
post_data = urllib.urlencode(data)
print(post_data)
c.setopt(pycurl.POSTFIELDS, post_data)
elif method.lower() == "get":
c.setopt(pycurl.HTTPGET, 1)
elif method.lower() == "delete":
c.setopt(pycurl.DELETE, 1)
elif method.lower() == "put":
#c.setopt(pycurl.UPLOAD, 1)
post_data = urllib.urlencode(data)
c.setopt(pycurl.CUSTOMREQUEST, "PUT")
c.setopt(pycurl.POSTFIELDS, post_data)
elif method.lower() == "head":
c.setopt(pycurl.NOBODY,1)
else:
print("Unknown method ", method)
sys.exit(1)
if username != None and password != None:
c.setopt(pycurl.USERPWD, '%s:%s' % (username, password))
c.perform()
c.close()
body = buffer.getvalue()
return body
def _get_cluster_id(self):
cluster_id = self._q_config("cluster_id")
assert cluster_id is not None
return cluster_id
def invoke_task(self,name, *args):
if args == (None,):
getattr(self,name)()
else:
getattr(self,name)(*args)
def deploy(self, asset_path, *args):
# Use multipart upload to libraries/upload
print("TBD")
def logs(self, job_id):
print("TBD")
def status(self, job_id):
print("TBD")
def notebook(self):
print("TBD")
def _get_clusters(self):
resp_body = self._do_request("GET", "clusters/list")
j = json.loads(resp_body)
return j
def describecluster(self, name):
clusters = self._get_clusters()
for cluster in clusters:
if cluster['name'] == name:
print(cluster)
def lsclusters(self):
clusters = self._get_clusters()
if len(clusters) == 0:
print("No clusters created")
for cluster in clusters:
print(cluster)
def mkcluster(self, name, memory_gb=6, use_spot=True):
resp_body = self._do_request("POST", "clusters/create", name=name,
memoryGB=memory_gb,
useSpot=use_spot
)
print(resp_body)
def lslibraries(self):
resp_body = self._do_request("GET", "libraries/list")
j = json.loads(resp_body)
print(j)
def describelibraries(self):
resp_body = self._do_request("GET", "libraries/status")
j = json.loads(resp_body)
print(j)
def rmlibrary(self, library_id):
resp_body = self._do_request("DELETE", "clusters/create", libraryId=library_id)
print(resp_body)
def attachlibrary(self, library_id, cluster_id):
print("TBD")
def schedule(self, asset_path, schedule_id, schedule_iso8601):
print("TBD")
|
[
"StringIO.StringIO",
"os.path.exists",
"json.loads",
"pycurl.Curl",
"os.path.join",
"urllib.urlencode",
"sys.exit"
] |
[((349, 360), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (357, 360), False, 'import sys\n'), ((574, 610), 'os.path.join', 'os.path.join', (['projectsDir', '"""schemas"""'], {}), "(projectsDir, 'schemas')\n", (586, 610), False, 'import os\n'), ((633, 678), 'os.path.join', 'os.path.join', (['schemasDir', '"""beats.schema.json"""'], {}), "(schemasDir, 'beats.schema.json')\n", (645, 678), False, 'import os\n'), ((690, 717), 'os.path.exists', 'os.path.exists', (['schemasFile'], {}), '(schemasFile)\n', (704, 717), False, 'import os\n'), ((1120, 1133), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (1131, 1133), False, 'import pycurl\n'), ((1426, 1436), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (1434, 1436), False, 'from StringIO import StringIO\n'), ((3660, 3681), 'json.loads', 'json.loads', (['resp_body'], {}), '(resp_body)\n', (3670, 3681), False, 'import json\n'), ((4464, 4485), 'json.loads', 'json.loads', (['resp_body'], {}), '(resp_body)\n', (4474, 4485), False, 'import json\n'), ((4614, 4635), 'json.loads', 'json.loads', (['resp_body'], {}), '(resp_body)\n', (4624, 4635), False, 'import json\n'), ((2134, 2156), 'urllib.urlencode', 'urllib.urlencode', (['data'], {}), '(data)\n', (2150, 2156), False, 'import urllib\n'), ((2498, 2520), 'urllib.urlencode', 'urllib.urlencode', (['data'], {}), '(data)\n', (2514, 2520), False, 'import urllib\n'), ((2770, 2781), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2778, 2781), False, 'import sys\n')]
|
# -*- coding: utf-8 *-*
import logging
from unittest import TestCase
from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat
log = logging.getLogger(__name__)
class Foo(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self[k] = v
def __setitem__(self, name, value):
# helper to add attributes per self[attr] = value -> self.attr == value
setattr(self, name, value)
def __repr__(self):
return pretty_repr(self, ignore_own_repr=True)
class TestAssertEqualStruct(TestCase):
def run_assert(self, args, expected_msg=None):
log.debug('args: %s' % str(args))
msg = None
try:
assert_equal_struct(*args)
except AssertionError as e:
msg = e.message
log.debug('msg: %s' % msg)
self.assertEqual(msg, expected_msg)
def check(self, actual_classes=(list,),
expected_classes=(list,),
expected_obj=None, expected_kwargs={},
working_obj=None, working_kwargs={},
failing_obj=None, failing_kwargs={},
failure_msg=None,
namepaths=None,
expected_namepaths=None):
for actual_cls, expected_cls in permuteflat(actual_classes, expected_classes):
expected_obj = expected_obj or expected_cls(**expected_kwargs)
working_obj = working_obj or actual_cls(**working_kwargs)
self.run_assert((working_obj, expected_obj, namepaths, expected_namepaths))
failing_obj = failing_obj or actual_cls(**failing_kwargs)
self.run_assert((failing_obj, expected_obj, namepaths, expected_namepaths),
failure_msg)
def test_directly(self):
"""
*assert_equal_struct* can compare similar flat structures directly.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(x=1),
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(expected_obj=[1],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0: 3 != 1')
def test_with_namepaths(self):
"""
With namepaths *assert_equal_struct* can compare similar structures and structures with
lists of values in full depth.
This ignores all additional paths at the expected object.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(x=1, y=4),
namepaths=['x'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(actual_classes=(dict, Foo),
expected_obj=[1, 4],
namepaths=['x'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(expected_obj=[1, 4],
namepaths=['0'],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0: 3 != 1')
def test_with_namepaths_and_expected_namepaths(self):
"""
Like just with namepaths, the values are sometimes at other paths at the expected object and
will be compared using expected_namepaths in same order as namepaths.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(a=1, b=4),
namepaths=['x'],
expected_namepaths=['a'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx != a: 3 != 1')
self.check(actual_classes=(dict, Foo),
expected_obj=[4, 1],
namepaths=['x'],
expected_namepaths=['1'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx != 1: 3 != 1')
self.check(expected_obj=[4, 1],
namepaths=['0'],
expected_namepaths=['1'],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0 != 1: 3 != 1')
class TestMultiAssertEqualStruct(TestCase):
def run_assert(self, args, expected_msg=None):
log.debug('args: %s' % str(args))
msg = None
try:
multi_assert_equal_struct(*args)
except AssertionError as e:
msg = e.message
log.debug('msg: %s' % msg)
self.assertEqual(msg, expected_msg)
def check(self, actual_classes=(list,),
expected_classes=(list,),
expected_objs=None, expected_kwargs_list=[],
working_objs=None, working_kwargs_list=[],
failing_objs=None, failing_kwargs_list=[],
failure_msg=None,
namepaths=None,
expected_namepaths=None):
for actual_cls1, actual_cls2, expected_cls1, expected_cls2 in \
permuteflat(*([actual_classes] * 2 + [expected_classes] * 2)):
if not expected_objs:
expected_objs = (expected_cls1(**expected_kwargs_list[0]),
expected_cls2(**expected_kwargs_list[1]))
if not working_objs:
working_objs = (actual_cls1(**working_kwargs_list[0]),
actual_cls2(**working_kwargs_list[1]))
self.run_assert((working_objs, expected_objs, namepaths, expected_namepaths))
if not failing_objs:
failing_objs = (actual_cls1(**failing_kwargs_list[0]),
actual_cls2(**failing_kwargs_list[1]))
self.run_assert((failing_objs, expected_objs, namepaths, expected_namepaths),
failure_msg)
def test_directly(self):
"""
*multi_assert_equal_struct* can compare multiple similar flat structures directly.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(x=1), dict(x=2, y=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2, y=3)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=2, y=5)],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\
'Index 1: actual values != expected values:\n\ty: 5 != 3')
self.check(expected_objs=[[1], [2, 3]],
working_objs=[[1, 0], [2, 3]],
failing_objs=[[4, 0], [2, 5]],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\
'Index 1: actual values != expected values:\n\t1: 5 != 3')
def test_with_namepaths(self):
"""
With namepaths *multi_assert_equal_struct* can compare multiple similar structures and
structures with lists of values in full depth.
This ignores all additional paths at the expected objects.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(x=1), dict(x=2, y=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=5)],
namepaths=['x'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\
'Index 1: actual values != expected values:\n\tx: 5 != 2')
self.check(actual_classes=(dict, Foo),
expected_objs=[[1], [2, 0]],
working_kwargs_list=[dict(x=1, y=5), dict(x=2)],
failing_kwargs_list=[dict(x=3, y=5), dict(x=4)],
namepaths=['x'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 3 != 1\n'\
'Index 1: actual values != expected values:\n\tx: 4 != 2')
self.check(expected_objs=[[1], [2, 3]],
working_objs=[[1, 0], [2, 0]],
failing_objs=[[4, 0], [5, 0]],
namepaths=['0'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\
'Index 1: actual values != expected values:\n\t0: 5 != 2')
def test_with_namepaths_and_expected_namepaths(self):
"""
Like just with namepaths, the values are sometimes at other paths at the expected object and
will be compared using expected_namepaths in same order as namepaths.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(y=1), dict(y=2, x=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=5)],
namepaths=['x'],
expected_namepaths=['y'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx != y: 4 != 1\n'\
'Index 1: actual values != expected values:\n\tx != y: 5 != 2')
self.check(actual_classes=(dict, Foo),
expected_objs=[[0, 1], [0, 2]],
working_kwargs_list=[dict(x=1, y=5), dict(x=2)],
failing_kwargs_list=[dict(x=3, y=5), dict(x=4)],
namepaths=['x'],
expected_namepaths=['1'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx != 1: 3 != 1\n'\
'Index 1: actual values != expected values:\n\tx != 1: 4 != 2')
self.check(expected_objs=[[1, 2], [3, 4]],
working_objs=[[2, 1], [4, 3]],
failing_objs=[[2, 5], [6, 3]],
namepaths=['0', '1'],
expected_namepaths=['1', '0'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t1 != 0: 5 != 1\n'\
'Index 1: actual values != expected values:\n\t0 != 1: 6 != 4')
|
[
"logging.getLogger",
"nicepy.permuteflat",
"nicepy.pretty_repr",
"nicepy.assert_equal_struct",
"nicepy.multi_assert_equal_struct"
] |
[((169, 196), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'import logging\n'), ((512, 551), 'nicepy.pretty_repr', 'pretty_repr', (['self'], {'ignore_own_repr': '(True)'}), '(self, ignore_own_repr=True)\n', (523, 551), False, 'from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat\n'), ((1328, 1373), 'nicepy.permuteflat', 'permuteflat', (['actual_classes', 'expected_classes'], {}), '(actual_classes, expected_classes)\n', (1339, 1373), False, 'from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat\n'), ((5800, 5861), 'nicepy.permuteflat', 'permuteflat', (['*([actual_classes] * 2 + [expected_classes] * 2)'], {}), '(*([actual_classes] * 2 + [expected_classes] * 2))\n', (5811, 5861), False, 'from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat\n'), ((732, 758), 'nicepy.assert_equal_struct', 'assert_equal_struct', (['*args'], {}), '(*args)\n', (751, 758), False, 'from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat\n'), ((5132, 5164), 'nicepy.multi_assert_equal_struct', 'multi_assert_equal_struct', (['*args'], {}), '(*args)\n', (5157, 5164), False, 'from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-03-20 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0073_auto_20170320_1455'),
]
operations = [
migrations.AlterField(
model_name='award',
name='fain',
field=models.CharField(blank=True, db_index=True, help_text='An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award’s life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers.', max_length=30, null=True),
),
migrations.AlterField(
model_name='award',
name='period_of_performance_current_end_date',
field=models.DateField(db_index=True, help_text='The current, not original, period of performance end date', null=True, verbose_name='End Date'),
),
migrations.AlterField(
model_name='award',
name='period_of_performance_start_date',
field=models.DateField(db_index=True, help_text='The start date for the period of performance', null=True, verbose_name='Start Date'),
),
migrations.AlterField(
model_name='award',
name='piid',
field=models.CharField(blank=True, db_index=True, help_text='Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers.', max_length=50, null=True),
),
migrations.AlterField(
model_name='award',
name='potential_total_value_of_award',
field=models.DecimalField(blank=True, db_index=True, decimal_places=2, help_text='The sum of the potential_value_of_award from associated transactions', max_digits=20, null=True, verbose_name='Potential Total Value of Award'),
),
migrations.AlterField(
model_name='award',
name='total_obligation',
field=models.DecimalField(db_index=True, decimal_places=2, help_text='The amount of money the government is obligated to pay for the award', max_digits=15, null=True, verbose_name='Total Obligated'),
),
migrations.AlterField(
model_name='award',
name='total_outlay',
field=models.DecimalField(db_index=True, decimal_places=2, help_text='The total amount of money paid out for this award', max_digits=15, null=True),
),
migrations.AlterField(
model_name='award',
name='type',
field=models.CharField(choices=[('U', 'Unknown Type'), ('02', 'Block Grant'), ('03', 'Formula Grant'), ('04', 'Project Grant'), ('05', 'Cooperative Agreement'), ('06', 'Direct Payment for Specified Use'), ('07', 'Direct Loan'), ('08', 'Guaranteed/Insured Loan'), ('09', 'Insurance'), ('10', 'Direct Payment unrestricted'), ('11', 'Other'), ('A', 'BPA Call'), ('B', 'Purchase Order'), ('C', 'Delivery Order'), ('D', 'Definitive Contract')], db_index=True, default='U', help_text='\tThe mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments.', max_length=5, null=True, verbose_name='Award Type'),
),
migrations.AlterField(
model_name='award',
name='uri',
field=models.CharField(blank=True, db_index=True, help_text='The uri of the award', max_length=70, null=True),
),
migrations.AlterField(
model_name='transaction',
name='federal_action_obligation',
field=models.DecimalField(blank=True, db_index=True, decimal_places=2, help_text='The obligation of the federal government for this transaction', max_digits=20, null=True),
),
]
|
[
"django.db.models.DecimalField",
"django.db.models.DateField",
"django.db.models.CharField"
] |
[((397, 890), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'help_text': '"""An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award’s life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers."""', 'max_length': '(30)', 'null': '(True)'}), "(blank=True, db_index=True, help_text=\n 'An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award’s life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers.'\n , max_length=30, null=True)\n", (413, 890), False, 'from django.db import migrations, models\n'), ((1033, 1180), 'django.db.models.DateField', 'models.DateField', ([], {'db_index': '(True)', 'help_text': '"""The current, not original, period of performance end date"""', 'null': '(True)', 'verbose_name': '"""End Date"""'}), "(db_index=True, help_text=\n 'The current, not original, period of performance end date', null=True,\n verbose_name='End Date')\n", (1049, 1180), False, 'from django.db import migrations, models\n'), ((1318, 1455), 'django.db.models.DateField', 'models.DateField', ([], {'db_index': '(True)', 'help_text': '"""The start date for the period of performance"""', 'null': '(True)', 'verbose_name': '"""Start Date"""'}), "(db_index=True, help_text=\n 'The start date for the period of performance', null=True, verbose_name\n ='Start Date')\n", (1334, 1455), False, 'from django.db import migrations, models\n'), ((1564, 2000), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'help_text': '"""Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers."""', 'max_length': '(50)', 'null': '(True)'}), "(blank=True, db_index=True, help_text=\n 'Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers.'\n , max_length=50, null=True)\n", (1580, 2000), False, 'from django.db import migrations, models\n'), ((2135, 2363), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'db_index': '(True)', 'decimal_places': '(2)', 'help_text': '"""The sum of the potential_value_of_award from associated transactions"""', 'max_digits': '(20)', 'null': '(True)', 'verbose_name': '"""Potential Total Value of Award"""'}), "(blank=True, db_index=True, decimal_places=2, help_text=\n 'The sum of the potential_value_of_award from associated transactions',\n max_digits=20, null=True, verbose_name='Potential Total Value of Award')\n", (2154, 2363), False, 'from django.db import migrations, models\n'), ((2485, 2686), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'db_index': '(True)', 'decimal_places': '(2)', 'help_text': '"""The amount of money the government is obligated to pay for the award"""', 'max_digits': '(15)', 'null': '(True)', 'verbose_name': '"""Total Obligated"""'}), "(db_index=True, decimal_places=2, help_text=\n 'The amount of money the government is obligated to pay for the award',\n max_digits=15, null=True, verbose_name='Total Obligated')\n", (2504, 2686), False, 'from django.db import migrations, models\n'), ((2804, 2954), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'db_index': '(True)', 'decimal_places': '(2)', 'help_text': '"""The total amount of money paid out for this award"""', 'max_digits': '(15)', 'null': '(True)'}), "(db_index=True, decimal_places=2, help_text=\n 'The total amount of money paid out for this award', max_digits=15,\n null=True)\n", (2823, 2954), False, 'from django.db import migrations, models\n'), ((3064, 3809), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('U', 'Unknown Type'), ('02', 'Block Grant'), ('03', 'Formula Grant'), (\n '04', 'Project Grant'), ('05', 'Cooperative Agreement'), ('06',\n 'Direct Payment for Specified Use'), ('07', 'Direct Loan'), ('08',\n 'Guaranteed/Insured Loan'), ('09', 'Insurance'), ('10',\n 'Direct Payment unrestricted'), ('11', 'Other'), ('A', 'BPA Call'), (\n 'B', 'Purchase Order'), ('C', 'Delivery Order'), ('D',\n 'Definitive Contract')]", 'db_index': '(True)', 'default': '"""U"""', 'help_text': '"""\tThe mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments."""', 'max_length': '(5)', 'null': '(True)', 'verbose_name': '"""Award Type"""'}), "(choices=[('U', 'Unknown Type'), ('02', 'Block Grant'), (\n '03', 'Formula Grant'), ('04', 'Project Grant'), ('05',\n 'Cooperative Agreement'), ('06', 'Direct Payment for Specified Use'), (\n '07', 'Direct Loan'), ('08', 'Guaranteed/Insured Loan'), ('09',\n 'Insurance'), ('10', 'Direct Payment unrestricted'), ('11', 'Other'), (\n 'A', 'BPA Call'), ('B', 'Purchase Order'), ('C', 'Delivery Order'), (\n 'D', 'Definitive Contract')], db_index=True, default='U', help_text=\n '\\tThe mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments.'\n , max_length=5, null=True, verbose_name='Award Type')\n", (3080, 3809), False, 'from django.db import migrations, models\n'), ((3889, 3997), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'help_text': '"""The uri of the award"""', 'max_length': '(70)', 'null': '(True)'}), "(blank=True, db_index=True, help_text=\n 'The uri of the award', max_length=70, null=True)\n", (3905, 3997), False, 'from django.db import migrations, models\n'), ((4138, 4312), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'db_index': '(True)', 'decimal_places': '(2)', 'help_text': '"""The obligation of the federal government for this transaction"""', 'max_digits': '(20)', 'null': '(True)'}), "(blank=True, db_index=True, decimal_places=2, help_text=\n 'The obligation of the federal government for this transaction',\n max_digits=20, null=True)\n", (4157, 4312), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
# coding: utf-8
# <a id='top'></a>
#
#
# # $\texttt{GiRaFFEfood}$: Initial data for $\texttt{GiRaFFE}$
#
# ## Aligned Rotator
#
# $$\label{top}$$
#
# This module provides another initial data option for $\texttt{GiRaFFE}$. This is a flat-spacetime test with initial data $$A_{\phi} = \frac{\mu \varpi}{r^3},$$ where $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$ is the cylindrical radius. We let $A_r = A_\theta = 0$.
#
# Additionally, the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$.
# <a id='preliminaries'></a>
#
# ### Steps 0-1: Preliminaries
# $$\label{preliminaries}$$
#
# \[Back to [top](#top)\]
#
# Here, we will import the NRPy+ core modules and set the reference metric to Cartesian, set commonly used NRPy+ parameters, and set C parameters that will be set from outside the code eventually generated from these expressions. We will also set up a parameter to determine what initial data is set up, although it won't do much yet.
# Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian
import NRPy_param_funcs as par
import indexedexp as ixp
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import reference_metric as rfm
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = __name__
B_p_aligned_rotator,R_NS_aligned_rotator = par.Cparameters("REAL",thismodule,
# B_p_aligned_rotator = the intensity of the magnetic field and
# R_NS_aligned_rotator= "Neutron star" radius
["B_p_aligned_rotator","R_NS_aligned_rotator"],
[1e-5, 1.0])
# The angular velocity of the "neutron star"
Omega_aligned_rotator = par.Cparameters("REAL",thismodule,"Omega_aligned_rotator",1e3)
# <a id='step2'></a>
#
# ### Step 2: Set the vectors A in Spherical coordinates
# $$\label{step2}$$
#
# \[Back to [top](#top)\]
#
# We will first build the fundamental vector $A_i$ in spherical coordinates (see [Table 3](https://arxiv.org/pdf/1704.00599.pdf)). Note that we use reference_metric.py to set $r$ and $\theta$ in terms of Cartesian coordinates; this will save us a step later when we convert to Cartesian coordinates. So, we set
# \begin{align}
# A_{\phi} &= \frac{\mu \varpi}{r^3}, \\
# \end{align}
# with $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$
def GiRaFFEfood_NRPy_Aligned_Rotator():
r = rfm.xxSph[0]
varpi = sp.sqrt(rfm.xx_to_Cart[0]**2 + rfm.xx_to_Cart[1]**2)
mu = B_p_aligned_rotator * R_NS_aligned_rotator**3 / 2
ASphD = ixp.zerorank1()
ASphD[2] = mu * varpi**2 / (r**3) # The other components were already declared to be 0.
# <a id='step3'></a>
#
# ### Step 3: Use the Jacobian matrix to transform the vectors to Cartesian coordinates.
# $$\label{step3}$$
#
# \[Back to [top](#top)\]
#
# Now, we will use the coordinate transformation definitions provided by reference_metric.py to build the Jacobian
# $$
# \frac{\partial x_{\rm Sph}^j}{\partial x_{\rm Cart}^i},
# $$
# where $x_{\rm Sph}^j \in \{r,\theta,\phi\}$ and $x_{\rm Cart}^i \in \{x,y,z\}$. We would normally compute its inverse, but since none of the quantities we need to transform have upper indices, it is not necessary. Then, since $A_i$ and has one lower index, it will need to be multiplied by the Jacobian:
#
# $$
# A_i^{\rm Cart} = A_j^{\rm Sph} \frac{\partial x_{\rm Sph}^j}{\partial x_{\rm Cart}^i},
# $$
# Step 3: Use the Jacobian matrix to transform the vectors to Cartesian coordinates.
drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])],
[sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])],
[sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]])
#dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() # We don't actually need this in this case.
global AD
AD = ixp.zerorank1(DIM=3)
for i in range(3):
for j in range(3):
AD[i] = drrefmetric__dx_0UDmatrix[(j,i)]*ASphD[j]
# <a id='step4'></a>
#
# ### Step 4: Calculate $v^i$
# $$\label{step4}$$
#
# \[Back to [top](#top)\]
#
# Here, we will calculate the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$. Conveniently, in flat space, the drift velocity reduces to the Valencia velocity because $\alpha = 1$ and $\beta^i = 0$.
# Step 4: Calculate v^i
LeviCivitaSymbolDDD = ixp.LeviCivitaSymbol_dim3_rank3()
import Min_Max_and_Piecewise_Expressions as noif
unit_zU = ixp.zerorank1()
unit_zU[2] = sp.sympify(1)
global ValenciavU
ValenciavU = ixp.zerorank1()
for i in range(3):
for j in range(3):
for k in range(3):
ValenciavU[i] += noif.coord_leq_bound(r,R_NS_aligned_rotator)*LeviCivitaSymbolDDD[i][j][k] * Omega_aligned_rotator * unit_zU[j] * rfm.xx[k]
# ### NRPy+ Module Code Validation
#
# \[Back to [top](#top)\]
#
# Here, as a code validation check, we verify agreement in the SymPy expressions for the $\texttt{GiRaFFE}$ Aligned Rotator initial data equations we intend to use between
# 1. this tutorial and
# 2. the NRPy+ [GiRaFFEfood_NRPy_Aligned_Rotator.py](../edit/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) module.
#
|
[
"NRPy_param_funcs.Cparameters",
"Min_Max_and_Piecewise_Expressions.coord_leq_bound",
"sympy.sqrt",
"sympy.sympify",
"indexedexp.LeviCivitaSymbol_dim3_rank3",
"reference_metric.reference_metric",
"sympy.diff",
"NRPy_param_funcs.set_parval_from_str",
"indexedexp.zerorank1"
] |
[((1427, 1496), 'NRPy_param_funcs.set_parval_from_str', 'par.set_parval_from_str', (['"""reference_metric::CoordSystem"""', '"""Cartesian"""'], {}), "('reference_metric::CoordSystem', 'Cartesian')\n", (1450, 1496), True, 'import NRPy_param_funcs as par\n'), ((1496, 1518), 'reference_metric.reference_metric', 'rfm.reference_metric', ([], {}), '()\n', (1516, 1518), True, 'import reference_metric as rfm\n'), ((1627, 1729), 'NRPy_param_funcs.Cparameters', 'par.Cparameters', (['"""REAL"""', 'thismodule', "['B_p_aligned_rotator', 'R_NS_aligned_rotator']", '[1e-05, 1.0]'], {}), "('REAL', thismodule, ['B_p_aligned_rotator',\n 'R_NS_aligned_rotator'], [1e-05, 1.0])\n", (1642, 1729), True, 'import NRPy_param_funcs as par\n'), ((2139, 2207), 'NRPy_param_funcs.Cparameters', 'par.Cparameters', (['"""REAL"""', 'thismodule', '"""Omega_aligned_rotator"""', '(1000.0)'], {}), "('REAL', thismodule, 'Omega_aligned_rotator', 1000.0)\n", (2154, 2207), True, 'import NRPy_param_funcs as par\n'), ((2899, 2955), 'sympy.sqrt', 'sp.sqrt', (['(rfm.xx_to_Cart[0] ** 2 + rfm.xx_to_Cart[1] ** 2)'], {}), '(rfm.xx_to_Cart[0] ** 2 + rfm.xx_to_Cart[1] ** 2)\n', (2906, 2955), True, 'import sympy as sp\n'), ((3025, 3040), 'indexedexp.zerorank1', 'ixp.zerorank1', ([], {}), '()\n', (3038, 3040), True, 'import indexedexp as ixp\n'), ((4609, 4629), 'indexedexp.zerorank1', 'ixp.zerorank1', ([], {'DIM': '(3)'}), '(DIM=3)\n', (4622, 4629), True, 'import indexedexp as ixp\n'), ((5266, 5299), 'indexedexp.LeviCivitaSymbol_dim3_rank3', 'ixp.LeviCivitaSymbol_dim3_rank3', ([], {}), '()\n', (5297, 5299), True, 'import indexedexp as ixp\n'), ((5369, 5384), 'indexedexp.zerorank1', 'ixp.zerorank1', ([], {}), '()\n', (5382, 5384), True, 'import indexedexp as ixp\n'), ((5402, 5415), 'sympy.sympify', 'sp.sympify', (['(1)'], {}), '(1)\n', (5412, 5415), True, 'import sympy as sp\n'), ((5456, 5471), 'indexedexp.zerorank1', 'ixp.zerorank1', ([], {}), '()\n', (5469, 5471), True, 'import indexedexp as ixp\n'), ((4087, 4119), 'sympy.diff', 'sp.diff', (['rfm.xxSph[0]', 'rfm.xx[0]'], {}), '(rfm.xxSph[0], rfm.xx[0])\n', (4094, 4119), True, 'import sympy as sp\n'), ((4120, 4152), 'sympy.diff', 'sp.diff', (['rfm.xxSph[0]', 'rfm.xx[1]'], {}), '(rfm.xxSph[0], rfm.xx[1])\n', (4127, 4152), True, 'import sympy as sp\n'), ((4153, 4185), 'sympy.diff', 'sp.diff', (['rfm.xxSph[0]', 'rfm.xx[2]'], {}), '(rfm.xxSph[0], rfm.xx[2])\n', (4160, 4185), True, 'import sympy as sp\n'), ((4231, 4263), 'sympy.diff', 'sp.diff', (['rfm.xxSph[1]', 'rfm.xx[0]'], {}), '(rfm.xxSph[1], rfm.xx[0])\n', (4238, 4263), True, 'import sympy as sp\n'), ((4264, 4296), 'sympy.diff', 'sp.diff', (['rfm.xxSph[1]', 'rfm.xx[1]'], {}), '(rfm.xxSph[1], rfm.xx[1])\n', (4271, 4296), True, 'import sympy as sp\n'), ((4297, 4329), 'sympy.diff', 'sp.diff', (['rfm.xxSph[1]', 'rfm.xx[2]'], {}), '(rfm.xxSph[1], rfm.xx[2])\n', (4304, 4329), True, 'import sympy as sp\n'), ((4375, 4407), 'sympy.diff', 'sp.diff', (['rfm.xxSph[2]', 'rfm.xx[0]'], {}), '(rfm.xxSph[2], rfm.xx[0])\n', (4382, 4407), True, 'import sympy as sp\n'), ((4408, 4440), 'sympy.diff', 'sp.diff', (['rfm.xxSph[2]', 'rfm.xx[1]'], {}), '(rfm.xxSph[2], rfm.xx[1])\n', (4415, 4440), True, 'import sympy as sp\n'), ((4441, 4473), 'sympy.diff', 'sp.diff', (['rfm.xxSph[2]', 'rfm.xx[2]'], {}), '(rfm.xxSph[2], rfm.xx[2])\n', (4448, 4473), True, 'import sympy as sp\n'), ((5586, 5631), 'Min_Max_and_Piecewise_Expressions.coord_leq_bound', 'noif.coord_leq_bound', (['r', 'R_NS_aligned_rotator'], {}), '(r, R_NS_aligned_rotator)\n', (5606, 5631), True, 'import Min_Max_and_Piecewise_Expressions as noif\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.